1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * This file is part of wlcore 4 * 5 * Copyright (C) 2008-2010 Nokia Corporation 6 * Copyright (C) 2011-2013 Texas Instruments Inc. 7 */ 8 9 #include <linux/module.h> 10 #include <linux/firmware.h> 11 #include <linux/etherdevice.h> 12 #include <linux/vmalloc.h> 13 #include <linux/interrupt.h> 14 #include <linux/irq.h> 15 #include <linux/pm_runtime.h> 16 #include <linux/pm_wakeirq.h> 17 18 #include "wlcore.h" 19 #include "debug.h" 20 #include "wl12xx_80211.h" 21 #include "io.h" 22 #include "tx.h" 23 #include "ps.h" 24 #include "init.h" 25 #include "debugfs.h" 26 #include "testmode.h" 27 #include "vendor_cmd.h" 28 #include "scan.h" 29 #include "hw_ops.h" 30 #include "sysfs.h" 31 32 #define WL1271_BOOT_RETRIES 3 33 #define WL1271_WAKEUP_TIMEOUT 500 34 35 static char *fwlog_param; 36 static int fwlog_mem_blocks = -1; 37 static int bug_on_recovery = -1; 38 static int no_recovery = -1; 39 40 static void __wl1271_op_remove_interface(struct wl1271 *wl, 41 struct ieee80211_vif *vif, 42 bool reset_tx_queues); 43 static void wlcore_op_stop_locked(struct wl1271 *wl); 44 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif); 45 46 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif) 47 { 48 int ret; 49 50 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS)) 51 return -EINVAL; 52 53 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) 54 return 0; 55 56 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags)) 57 return 0; 58 59 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid); 60 if (ret < 0) 61 return ret; 62 63 wl1271_info("Association completed."); 64 return 0; 65 } 66 67 static void wl1271_reg_notify(struct wiphy *wiphy, 68 struct regulatory_request *request) 69 { 70 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 71 struct wl1271 *wl = hw->priv; 72 73 /* copy the current dfs region */ 74 if (request) 75 wl->dfs_region = request->dfs_region; 76 77 wlcore_regdomain_config(wl); 78 } 79 80 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif, 81 bool enable) 82 { 83 int ret = 0; 84 85 /* we should hold wl->mutex */ 86 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable); 87 if (ret < 0) 88 goto out; 89 90 if (enable) 91 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags); 92 else 93 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags); 94 out: 95 return ret; 96 } 97 98 /* 99 * this function is being called when the rx_streaming interval 100 * has beed changed or rx_streaming should be disabled 101 */ 102 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif) 103 { 104 int ret = 0; 105 int period = wl->conf.rx_streaming.interval; 106 107 /* don't reconfigure if rx_streaming is disabled */ 108 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) 109 goto out; 110 111 /* reconfigure/disable according to new streaming_period */ 112 if (period && 113 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) && 114 (wl->conf.rx_streaming.always || 115 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) 116 ret = wl1271_set_rx_streaming(wl, wlvif, true); 117 else { 118 ret = wl1271_set_rx_streaming(wl, wlvif, false); 119 /* don't cancel_work_sync since we might deadlock */ 120 timer_delete_sync(&wlvif->rx_streaming_timer); 121 } 122 out: 123 return ret; 124 } 125 126 static void wl1271_rx_streaming_enable_work(struct work_struct *work) 127 { 128 int ret; 129 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif, 130 rx_streaming_enable_work); 131 struct wl1271 *wl = wlvif->wl; 132 133 mutex_lock(&wl->mutex); 134 135 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) || 136 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) || 137 (!wl->conf.rx_streaming.always && 138 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) 139 goto out; 140 141 if (!wl->conf.rx_streaming.interval) 142 goto out; 143 144 ret = pm_runtime_resume_and_get(wl->dev); 145 if (ret < 0) 146 goto out; 147 148 ret = wl1271_set_rx_streaming(wl, wlvif, true); 149 if (ret < 0) 150 goto out_sleep; 151 152 /* stop it after some time of inactivity */ 153 mod_timer(&wlvif->rx_streaming_timer, 154 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration)); 155 156 out_sleep: 157 pm_runtime_put_autosuspend(wl->dev); 158 out: 159 mutex_unlock(&wl->mutex); 160 } 161 162 static void wl1271_rx_streaming_disable_work(struct work_struct *work) 163 { 164 int ret; 165 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif, 166 rx_streaming_disable_work); 167 struct wl1271 *wl = wlvif->wl; 168 169 mutex_lock(&wl->mutex); 170 171 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) 172 goto out; 173 174 ret = pm_runtime_resume_and_get(wl->dev); 175 if (ret < 0) 176 goto out; 177 178 ret = wl1271_set_rx_streaming(wl, wlvif, false); 179 if (ret) 180 goto out_sleep; 181 182 out_sleep: 183 pm_runtime_put_autosuspend(wl->dev); 184 out: 185 mutex_unlock(&wl->mutex); 186 } 187 188 static void wl1271_rx_streaming_timer(struct timer_list *t) 189 { 190 struct wl12xx_vif *wlvif = timer_container_of(wlvif, t, 191 rx_streaming_timer); 192 struct wl1271 *wl = wlvif->wl; 193 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work); 194 } 195 196 /* wl->mutex must be taken */ 197 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl) 198 { 199 /* if the watchdog is not armed, don't do anything */ 200 if (wl->tx_allocated_blocks == 0) 201 return; 202 203 cancel_delayed_work(&wl->tx_watchdog_work); 204 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work, 205 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout)); 206 } 207 208 static void wlcore_rc_update_work(struct work_struct *work) 209 { 210 int ret; 211 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif, 212 rc_update_work); 213 struct wl1271 *wl = wlvif->wl; 214 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); 215 216 mutex_lock(&wl->mutex); 217 218 if (unlikely(wl->state != WLCORE_STATE_ON)) 219 goto out; 220 221 ret = pm_runtime_resume_and_get(wl->dev); 222 if (ret < 0) 223 goto out; 224 225 if (ieee80211_vif_is_mesh(vif)) { 226 ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap, 227 true, wlvif->sta.hlid); 228 if (ret < 0) 229 goto out_sleep; 230 } else { 231 wlcore_hw_sta_rc_update(wl, wlvif); 232 } 233 234 out_sleep: 235 pm_runtime_put_autosuspend(wl->dev); 236 out: 237 mutex_unlock(&wl->mutex); 238 } 239 240 static void wl12xx_tx_watchdog_work(struct work_struct *work) 241 { 242 struct delayed_work *dwork; 243 struct wl1271 *wl; 244 245 dwork = to_delayed_work(work); 246 wl = container_of(dwork, struct wl1271, tx_watchdog_work); 247 248 mutex_lock(&wl->mutex); 249 250 if (unlikely(wl->state != WLCORE_STATE_ON)) 251 goto out; 252 253 /* Tx went out in the meantime - everything is ok */ 254 if (unlikely(wl->tx_allocated_blocks == 0)) 255 goto out; 256 257 /* 258 * if a ROC is in progress, we might not have any Tx for a long 259 * time (e.g. pending Tx on the non-ROC channels) 260 */ 261 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) { 262 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC", 263 wl->conf.tx.tx_watchdog_timeout); 264 wl12xx_rearm_tx_watchdog_locked(wl); 265 goto out; 266 } 267 268 /* 269 * if a scan is in progress, we might not have any Tx for a long 270 * time 271 */ 272 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) { 273 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan", 274 wl->conf.tx.tx_watchdog_timeout); 275 wl12xx_rearm_tx_watchdog_locked(wl); 276 goto out; 277 } 278 279 /* 280 * AP might cache a frame for a long time for a sleeping station, 281 * so rearm the timer if there's an AP interface with stations. If 282 * Tx is genuinely stuck we will most hopefully discover it when all 283 * stations are removed due to inactivity. 284 */ 285 if (wl->active_sta_count) { 286 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has " 287 " %d stations", 288 wl->conf.tx.tx_watchdog_timeout, 289 wl->active_sta_count); 290 wl12xx_rearm_tx_watchdog_locked(wl); 291 goto out; 292 } 293 294 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery", 295 wl->conf.tx.tx_watchdog_timeout); 296 wl12xx_queue_recovery_work(wl); 297 298 out: 299 mutex_unlock(&wl->mutex); 300 } 301 302 static void wlcore_adjust_conf(struct wl1271 *wl) 303 { 304 305 if (fwlog_param) { 306 if (!strcmp(fwlog_param, "continuous")) { 307 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS; 308 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST; 309 } else if (!strcmp(fwlog_param, "dbgpins")) { 310 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS; 311 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS; 312 } else if (!strcmp(fwlog_param, "disable")) { 313 wl->conf.fwlog.mem_blocks = 0; 314 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE; 315 } else { 316 wl1271_error("Unknown fwlog parameter %s", fwlog_param); 317 } 318 } 319 320 if (bug_on_recovery != -1) 321 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery; 322 323 if (no_recovery != -1) 324 wl->conf.recovery.no_recovery = (u8) no_recovery; 325 } 326 327 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, 328 struct wl12xx_vif *wlvif, 329 u8 hlid, u8 tx_pkts) 330 { 331 bool fw_ps; 332 333 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map); 334 335 /* 336 * Wake up from high level PS if the STA is asleep with too little 337 * packets in FW or if the STA is awake. 338 */ 339 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS) 340 wl12xx_ps_link_end(wl, wlvif, hlid); 341 342 /* 343 * Start high-level PS if the STA is asleep with enough blocks in FW. 344 * Make an exception if this is the only connected link. In this 345 * case FW-memory congestion is less of a problem. 346 * Note that a single connected STA means 2*ap_count + 1 active links, 347 * since we must account for the global and broadcast AP links 348 * for each AP. The "fw_ps" check assures us the other link is a STA 349 * connected to the AP. Otherwise the FW would not set the PSM bit. 350 */ 351 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps && 352 tx_pkts >= WL1271_PS_STA_MAX_PACKETS) 353 wl12xx_ps_link_start(wl, wlvif, hlid, true); 354 } 355 356 static void wl12xx_irq_update_links_status(struct wl1271 *wl, 357 struct wl12xx_vif *wlvif, 358 struct wl_fw_status *status) 359 { 360 unsigned long cur_fw_ps_map; 361 u8 hlid; 362 363 cur_fw_ps_map = status->link_ps_bitmap; 364 if (wl->ap_fw_ps_map != cur_fw_ps_map) { 365 wl1271_debug(DEBUG_PSM, 366 "link ps prev 0x%lx cur 0x%lx changed 0x%lx", 367 wl->ap_fw_ps_map, cur_fw_ps_map, 368 wl->ap_fw_ps_map ^ cur_fw_ps_map); 369 370 wl->ap_fw_ps_map = cur_fw_ps_map; 371 } 372 373 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links) 374 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid, 375 wl->links[hlid].allocated_pkts); 376 } 377 378 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status) 379 { 380 struct wl12xx_vif *wlvifsta; 381 struct wl12xx_vif *wlvifap; 382 struct wl12xx_vif *wlvif; 383 u32 old_tx_blk_count = wl->tx_blocks_available; 384 int avail, freed_blocks; 385 int i; 386 int ret; 387 struct wl1271_link *lnk; 388 389 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, 390 wl->raw_fw_status, 391 wl->fw_status_len, false); 392 if (ret < 0) 393 return ret; 394 395 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, status); 396 397 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " 398 "drv_rx_counter = %d, tx_results_counter = %d)", 399 status->intr, 400 status->fw_rx_counter, 401 status->drv_rx_counter, 402 status->tx_results_counter); 403 404 for (i = 0; i < NUM_TX_QUEUES; i++) { 405 /* prevent wrap-around in freed-packets counter */ 406 wl->tx_allocated_pkts[i] -= 407 (status->counters.tx_released_pkts[i] - 408 wl->tx_pkts_freed[i]) & 0xff; 409 410 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i]; 411 } 412 413 /* Find an authorized STA vif */ 414 wlvifsta = NULL; 415 wl12xx_for_each_wlvif_sta(wl, wlvif) { 416 if (wlvif->sta.hlid != WL12XX_INVALID_LINK_ID && 417 test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags)) { 418 wlvifsta = wlvif; 419 break; 420 } 421 } 422 423 /* Find a started AP vif */ 424 wlvifap = NULL; 425 wl12xx_for_each_wlvif(wl, wlvif) { 426 if (wlvif->bss_type == BSS_TYPE_AP_BSS && 427 wlvif->inconn_count == 0 && 428 test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) { 429 wlvifap = wlvif; 430 break; 431 } 432 } 433 434 for_each_set_bit(i, wl->links_map, wl->num_links) { 435 u16 diff16, sec_pn16; 436 u8 diff, tx_lnk_free_pkts; 437 438 lnk = &wl->links[i]; 439 440 /* prevent wrap-around in freed-packets counter */ 441 tx_lnk_free_pkts = status->counters.tx_lnk_free_pkts[i]; 442 diff = (tx_lnk_free_pkts - lnk->prev_freed_pkts) & 0xff; 443 444 if (diff) { 445 lnk->allocated_pkts -= diff; 446 lnk->prev_freed_pkts = tx_lnk_free_pkts; 447 } 448 449 /* Get the current sec_pn16 value if present */ 450 if (status->counters.tx_lnk_sec_pn16) 451 sec_pn16 = __le16_to_cpu(status->counters.tx_lnk_sec_pn16[i]); 452 else 453 sec_pn16 = 0; 454 /* prevent wrap-around in pn16 counter */ 455 diff16 = (sec_pn16 - lnk->prev_sec_pn16) & 0xffff; 456 457 /* FIXME: since free_pkts is a 8-bit counter of packets that 458 * rolls over, it can become zero. If it is zero, then we 459 * omit processing below. Is that really correct? 460 */ 461 if (tx_lnk_free_pkts <= 0) 462 continue; 463 464 /* For a station that has an authorized link: */ 465 if (wlvifsta && wlvifsta->sta.hlid == i) { 466 if (wlvifsta->encryption_type == KEY_TKIP || 467 wlvifsta->encryption_type == KEY_AES) { 468 if (diff16) { 469 lnk->prev_sec_pn16 = sec_pn16; 470 /* accumulate the prev_freed_pkts 471 * counter according to the PN from 472 * firmware 473 */ 474 lnk->total_freed_pkts += diff16; 475 } 476 } else { 477 if (diff) 478 /* accumulate the prev_freed_pkts 479 * counter according to the free packets 480 * count from firmware 481 */ 482 lnk->total_freed_pkts += diff; 483 } 484 } 485 486 /* For an AP that has been started */ 487 if (wlvifap && test_bit(i, wlvifap->ap.sta_hlid_map)) { 488 if (wlvifap->encryption_type == KEY_TKIP || 489 wlvifap->encryption_type == KEY_AES) { 490 if (diff16) { 491 lnk->prev_sec_pn16 = sec_pn16; 492 /* accumulate the prev_freed_pkts 493 * counter according to the PN from 494 * firmware 495 */ 496 lnk->total_freed_pkts += diff16; 497 } 498 } else { 499 if (diff) 500 /* accumulate the prev_freed_pkts 501 * counter according to the free packets 502 * count from firmware 503 */ 504 lnk->total_freed_pkts += diff; 505 } 506 } 507 } 508 509 /* prevent wrap-around in total blocks counter */ 510 if (likely(wl->tx_blocks_freed <= status->total_released_blks)) 511 freed_blocks = status->total_released_blks - 512 wl->tx_blocks_freed; 513 else 514 freed_blocks = 0x100000000LL - wl->tx_blocks_freed + 515 status->total_released_blks; 516 517 wl->tx_blocks_freed = status->total_released_blks; 518 519 wl->tx_allocated_blocks -= freed_blocks; 520 521 /* 522 * If the FW freed some blocks: 523 * If we still have allocated blocks - re-arm the timer, Tx is 524 * not stuck. Otherwise, cancel the timer (no Tx currently). 525 */ 526 if (freed_blocks) { 527 if (wl->tx_allocated_blocks) 528 wl12xx_rearm_tx_watchdog_locked(wl); 529 else 530 cancel_delayed_work(&wl->tx_watchdog_work); 531 } 532 533 avail = status->tx_total - wl->tx_allocated_blocks; 534 535 /* 536 * The FW might change the total number of TX memblocks before 537 * we get a notification about blocks being released. Thus, the 538 * available blocks calculation might yield a temporary result 539 * which is lower than the actual available blocks. Keeping in 540 * mind that only blocks that were allocated can be moved from 541 * TX to RX, tx_blocks_available should never decrease here. 542 */ 543 wl->tx_blocks_available = max((int)wl->tx_blocks_available, 544 avail); 545 546 /* if more blocks are available now, tx work can be scheduled */ 547 if (wl->tx_blocks_available > old_tx_blk_count) 548 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); 549 550 /* for AP update num of allocated TX blocks per link and ps status */ 551 wl12xx_for_each_wlvif_ap(wl, wlvif) { 552 wl12xx_irq_update_links_status(wl, wlvif, status); 553 } 554 555 /* update the host-chipset time offset */ 556 wl->time_offset = (ktime_get_boottime_ns() >> 10) - 557 (s64)(status->fw_localtime); 558 559 wl->fw_fast_lnk_map = status->link_fast_bitmap; 560 561 return 0; 562 } 563 564 static void wl1271_flush_deferred_work(struct wl1271 *wl) 565 { 566 struct sk_buff *skb; 567 568 /* Pass all received frames to the network stack */ 569 while ((skb = skb_dequeue(&wl->deferred_rx_queue))) 570 ieee80211_rx_ni(wl->hw, skb); 571 572 /* Return sent skbs to the network stack */ 573 while ((skb = skb_dequeue(&wl->deferred_tx_queue))) 574 ieee80211_tx_status_ni(wl->hw, skb); 575 } 576 577 static void wl1271_netstack_work(struct work_struct *work) 578 { 579 struct wl1271 *wl = 580 container_of(work, struct wl1271, netstack_work); 581 582 do { 583 wl1271_flush_deferred_work(wl); 584 } while (skb_queue_len(&wl->deferred_rx_queue)); 585 } 586 587 #define WL1271_IRQ_MAX_LOOPS 256 588 589 static int wlcore_irq_locked(struct wl1271 *wl) 590 { 591 int ret = 0; 592 u32 intr; 593 int loopcount = WL1271_IRQ_MAX_LOOPS; 594 bool run_tx_queue = true; 595 bool done = false; 596 unsigned int defer_count; 597 unsigned long flags; 598 599 /* 600 * In case edge triggered interrupt must be used, we cannot iterate 601 * more than once without introducing race conditions with the hardirq. 602 */ 603 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) 604 loopcount = 1; 605 606 wl1271_debug(DEBUG_IRQ, "IRQ work"); 607 608 if (unlikely(wl->state != WLCORE_STATE_ON)) 609 goto out; 610 611 ret = pm_runtime_resume_and_get(wl->dev); 612 if (ret < 0) 613 goto out; 614 615 while (!done && loopcount--) { 616 smp_mb__after_atomic(); 617 618 ret = wlcore_fw_status(wl, wl->fw_status); 619 if (ret < 0) 620 goto err_ret; 621 622 wlcore_hw_tx_immediate_compl(wl); 623 624 intr = wl->fw_status->intr; 625 intr &= WLCORE_ALL_INTR_MASK; 626 if (!intr) { 627 done = true; 628 continue; 629 } 630 631 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) { 632 wl1271_error("HW watchdog interrupt received! starting recovery."); 633 wl->watchdog_recovery = true; 634 ret = -EIO; 635 636 /* restarting the chip. ignore any other interrupt. */ 637 goto err_ret; 638 } 639 640 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) { 641 wl1271_error("SW watchdog interrupt received! " 642 "starting recovery."); 643 wl->watchdog_recovery = true; 644 ret = -EIO; 645 646 /* restarting the chip. ignore any other interrupt. */ 647 goto err_ret; 648 } 649 650 if (likely(intr & WL1271_ACX_INTR_DATA)) { 651 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); 652 653 ret = wlcore_rx(wl, wl->fw_status); 654 if (ret < 0) 655 goto err_ret; 656 657 /* Check if any tx blocks were freed */ 658 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) { 659 if (spin_trylock_irqsave(&wl->wl_lock, flags)) { 660 if (!wl1271_tx_total_queue_count(wl)) 661 run_tx_queue = false; 662 spin_unlock_irqrestore(&wl->wl_lock, flags); 663 } 664 665 /* 666 * In order to avoid starvation of the TX path, 667 * call the work function directly. 668 */ 669 if (run_tx_queue) { 670 ret = wlcore_tx_work_locked(wl); 671 if (ret < 0) 672 goto err_ret; 673 } 674 } 675 676 /* check for tx results */ 677 ret = wlcore_hw_tx_delayed_compl(wl); 678 if (ret < 0) 679 goto err_ret; 680 681 /* Make sure the deferred queues don't get too long */ 682 defer_count = skb_queue_len(&wl->deferred_tx_queue) + 683 skb_queue_len(&wl->deferred_rx_queue); 684 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT) 685 wl1271_flush_deferred_work(wl); 686 } 687 688 if (intr & WL1271_ACX_INTR_EVENT_A) { 689 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A"); 690 ret = wl1271_event_handle(wl, 0); 691 if (ret < 0) 692 goto err_ret; 693 } 694 695 if (intr & WL1271_ACX_INTR_EVENT_B) { 696 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B"); 697 ret = wl1271_event_handle(wl, 1); 698 if (ret < 0) 699 goto err_ret; 700 } 701 702 if (intr & WL1271_ACX_INTR_INIT_COMPLETE) 703 wl1271_debug(DEBUG_IRQ, 704 "WL1271_ACX_INTR_INIT_COMPLETE"); 705 706 if (intr & WL1271_ACX_INTR_HW_AVAILABLE) 707 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE"); 708 } 709 710 err_ret: 711 pm_runtime_put_autosuspend(wl->dev); 712 713 out: 714 return ret; 715 } 716 717 static irqreturn_t wlcore_irq(int irq, void *cookie) 718 { 719 int ret; 720 unsigned long flags; 721 struct wl1271 *wl = cookie; 722 bool queue_tx_work = true; 723 724 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); 725 726 /* complete the ELP completion */ 727 if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) { 728 spin_lock_irqsave(&wl->wl_lock, flags); 729 if (wl->elp_compl) 730 complete(wl->elp_compl); 731 spin_unlock_irqrestore(&wl->wl_lock, flags); 732 } 733 734 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) { 735 /* don't enqueue a work right now. mark it as pending */ 736 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags); 737 wl1271_debug(DEBUG_IRQ, "should not enqueue work"); 738 spin_lock_irqsave(&wl->wl_lock, flags); 739 disable_irq_nosync(wl->irq); 740 pm_wakeup_event(wl->dev, 0); 741 spin_unlock_irqrestore(&wl->wl_lock, flags); 742 goto out_handled; 743 } 744 745 /* TX might be handled here, avoid redundant work */ 746 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags); 747 cancel_work_sync(&wl->tx_work); 748 749 mutex_lock(&wl->mutex); 750 751 ret = wlcore_irq_locked(wl); 752 if (ret) 753 wl12xx_queue_recovery_work(wl); 754 755 /* In case TX was not handled in wlcore_irq_locked(), queue TX work */ 756 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags); 757 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) { 758 if (spin_trylock_irqsave(&wl->wl_lock, flags)) { 759 if (!wl1271_tx_total_queue_count(wl)) 760 queue_tx_work = false; 761 spin_unlock_irqrestore(&wl->wl_lock, flags); 762 } 763 if (queue_tx_work) 764 ieee80211_queue_work(wl->hw, &wl->tx_work); 765 } 766 767 mutex_unlock(&wl->mutex); 768 769 out_handled: 770 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); 771 772 return IRQ_HANDLED; 773 } 774 775 struct vif_counter_data { 776 u8 counter; 777 778 struct ieee80211_vif *cur_vif; 779 bool cur_vif_running; 780 }; 781 782 static void wl12xx_vif_count_iter(void *data, u8 *mac, 783 struct ieee80211_vif *vif) 784 { 785 struct vif_counter_data *counter = data; 786 787 counter->counter++; 788 if (counter->cur_vif == vif) 789 counter->cur_vif_running = true; 790 } 791 792 /* caller must not hold wl->mutex, as it might deadlock */ 793 static void wl12xx_get_vif_count(struct ieee80211_hw *hw, 794 struct ieee80211_vif *cur_vif, 795 struct vif_counter_data *data) 796 { 797 memset(data, 0, sizeof(*data)); 798 data->cur_vif = cur_vif; 799 800 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL, 801 wl12xx_vif_count_iter, data); 802 } 803 804 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt) 805 { 806 const struct firmware *fw; 807 const char *fw_name; 808 enum wl12xx_fw_type fw_type; 809 int ret; 810 811 if (plt) { 812 fw_type = WL12XX_FW_TYPE_PLT; 813 fw_name = wl->plt_fw_name; 814 } else { 815 /* 816 * we can't call wl12xx_get_vif_count() here because 817 * wl->mutex is taken, so use the cached last_vif_count value 818 */ 819 if (wl->last_vif_count > 1 && wl->mr_fw_name) { 820 fw_type = WL12XX_FW_TYPE_MULTI; 821 fw_name = wl->mr_fw_name; 822 } else { 823 fw_type = WL12XX_FW_TYPE_NORMAL; 824 fw_name = wl->sr_fw_name; 825 } 826 } 827 828 if (wl->fw_type == fw_type) 829 return 0; 830 831 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name); 832 833 ret = request_firmware(&fw, fw_name, wl->dev); 834 835 if (ret < 0) { 836 wl1271_error("could not get firmware %s: %d", fw_name, ret); 837 return ret; 838 } 839 840 if (fw->size % 4) { 841 wl1271_error("firmware size is not multiple of 32 bits: %zu", 842 fw->size); 843 ret = -EILSEQ; 844 goto out; 845 } 846 847 vfree(wl->fw); 848 wl->fw_type = WL12XX_FW_TYPE_NONE; 849 wl->fw_len = fw->size; 850 wl->fw = vmalloc(wl->fw_len); 851 852 if (!wl->fw) { 853 wl1271_error("could not allocate memory for the firmware"); 854 ret = -ENOMEM; 855 goto out; 856 } 857 858 memcpy(wl->fw, fw->data, wl->fw_len); 859 ret = 0; 860 wl->fw_type = fw_type; 861 out: 862 release_firmware(fw); 863 864 return ret; 865 } 866 867 void wl12xx_queue_recovery_work(struct wl1271 *wl) 868 { 869 /* Avoid a recursive recovery */ 870 if (wl->state == WLCORE_STATE_ON) { 871 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, 872 &wl->flags)); 873 874 wl->state = WLCORE_STATE_RESTARTING; 875 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); 876 ieee80211_queue_work(wl->hw, &wl->recovery_work); 877 } 878 } 879 880 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen) 881 { 882 size_t len; 883 884 /* Make sure we have enough room */ 885 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size); 886 887 /* Fill the FW log file, consumed by the sysfs fwlog entry */ 888 memcpy(wl->fwlog + wl->fwlog_size, memblock, len); 889 wl->fwlog_size += len; 890 891 return len; 892 } 893 894 static void wl12xx_read_fwlog_panic(struct wl1271 *wl) 895 { 896 u32 end_of_log = 0; 897 int error; 898 899 if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) 900 return; 901 902 wl1271_info("Reading FW panic log"); 903 904 /* 905 * Make sure the chip is awake and the logger isn't active. 906 * Do not send a stop fwlog command if the fw is hanged or if 907 * dbgpins are used (due to some fw bug). 908 */ 909 error = pm_runtime_resume_and_get(wl->dev); 910 if (error < 0) 911 return; 912 if (!wl->watchdog_recovery && 913 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS) 914 wl12xx_cmd_stop_fwlog(wl); 915 916 /* Traverse the memory blocks linked list */ 917 do { 918 end_of_log = wlcore_event_fw_logger(wl); 919 if (end_of_log == 0) { 920 msleep(100); 921 end_of_log = wlcore_event_fw_logger(wl); 922 } 923 } while (end_of_log != 0); 924 } 925 926 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif, 927 u8 hlid, struct ieee80211_sta *sta) 928 { 929 struct wl1271_station *wl_sta; 930 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING; 931 932 wl_sta = (void *)sta->drv_priv; 933 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts; 934 935 /* 936 * increment the initial seq number on recovery to account for 937 * transmitted packets that we haven't yet got in the FW status 938 */ 939 if (wlvif->encryption_type == KEY_GEM) 940 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM; 941 942 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) 943 wl_sta->total_freed_pkts += sqn_recovery_padding; 944 } 945 946 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl, 947 struct wl12xx_vif *wlvif, 948 u8 hlid, const u8 *addr) 949 { 950 struct ieee80211_sta *sta; 951 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); 952 953 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID || 954 is_zero_ether_addr(addr))) 955 return; 956 957 rcu_read_lock(); 958 sta = ieee80211_find_sta(vif, addr); 959 if (sta) 960 wlcore_save_freed_pkts(wl, wlvif, hlid, sta); 961 rcu_read_unlock(); 962 } 963 964 static void wlcore_print_recovery(struct wl1271 *wl) 965 { 966 u32 pc = 0; 967 u32 hint_sts = 0; 968 int ret; 969 970 wl1271_info("Hardware recovery in progress. FW ver: %s", 971 wl->chip.fw_ver_str); 972 973 /* change partitions momentarily so we can read the FW pc */ 974 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]); 975 if (ret < 0) 976 return; 977 978 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc); 979 if (ret < 0) 980 return; 981 982 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts); 983 if (ret < 0) 984 return; 985 986 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d", 987 pc, hint_sts, ++wl->recovery_count); 988 989 wlcore_set_partition(wl, &wl->ptable[PART_WORK]); 990 } 991 992 993 static void wl1271_recovery_work(struct work_struct *work) 994 { 995 struct wl1271 *wl = 996 container_of(work, struct wl1271, recovery_work); 997 struct wl12xx_vif *wlvif; 998 struct ieee80211_vif *vif; 999 int error; 1000 1001 mutex_lock(&wl->mutex); 1002 1003 if (wl->state == WLCORE_STATE_OFF || wl->plt) 1004 goto out_unlock; 1005 1006 error = pm_runtime_resume_and_get(wl->dev); 1007 if (error < 0) 1008 wl1271_warning("Enable for recovery failed"); 1009 wlcore_disable_interrupts_nosync(wl); 1010 1011 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) { 1012 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST) 1013 wl12xx_read_fwlog_panic(wl); 1014 wlcore_print_recovery(wl); 1015 } 1016 1017 BUG_ON(wl->conf.recovery.bug_on_recovery && 1018 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)); 1019 1020 clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); 1021 1022 if (wl->conf.recovery.no_recovery) { 1023 wl1271_info("No recovery (chosen on module load). Fw will remain stuck."); 1024 goto out_unlock; 1025 } 1026 1027 /* Prevent spurious TX during FW restart */ 1028 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART); 1029 1030 /* reboot the chipset */ 1031 while (!list_empty(&wl->wlvif_list)) { 1032 wlvif = list_first_entry(&wl->wlvif_list, 1033 struct wl12xx_vif, list); 1034 vif = wl12xx_wlvif_to_vif(wlvif); 1035 1036 if (wlvif->bss_type == BSS_TYPE_STA_BSS && 1037 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) { 1038 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid, 1039 vif->bss_conf.bssid); 1040 } 1041 1042 __wl1271_op_remove_interface(wl, vif, false); 1043 } 1044 1045 wlcore_op_stop_locked(wl); 1046 pm_runtime_put_autosuspend(wl->dev); 1047 1048 ieee80211_restart_hw(wl->hw); 1049 1050 /* 1051 * Its safe to enable TX now - the queues are stopped after a request 1052 * to restart the HW. 1053 */ 1054 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART); 1055 1056 out_unlock: 1057 wl->watchdog_recovery = false; 1058 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); 1059 mutex_unlock(&wl->mutex); 1060 } 1061 1062 static int wlcore_fw_wakeup(struct wl1271 *wl) 1063 { 1064 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); 1065 } 1066 1067 static int wl1271_setup(struct wl1271 *wl) 1068 { 1069 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL); 1070 if (!wl->raw_fw_status) 1071 goto err; 1072 1073 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL); 1074 if (!wl->fw_status) 1075 goto err; 1076 1077 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL); 1078 if (!wl->tx_res_if) 1079 goto err; 1080 1081 return 0; 1082 err: 1083 kfree(wl->fw_status); 1084 kfree(wl->raw_fw_status); 1085 return -ENOMEM; 1086 } 1087 1088 static int wl12xx_set_power_on(struct wl1271 *wl) 1089 { 1090 int ret; 1091 1092 msleep(WL1271_PRE_POWER_ON_SLEEP); 1093 ret = wl1271_power_on(wl); 1094 if (ret < 0) 1095 goto out; 1096 msleep(WL1271_POWER_ON_SLEEP); 1097 wl1271_io_reset(wl); 1098 wl1271_io_init(wl); 1099 1100 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]); 1101 if (ret < 0) 1102 goto fail; 1103 1104 /* ELP module wake up */ 1105 ret = wlcore_fw_wakeup(wl); 1106 if (ret < 0) 1107 goto fail; 1108 1109 out: 1110 return ret; 1111 1112 fail: 1113 wl1271_power_off(wl); 1114 return ret; 1115 } 1116 1117 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt) 1118 { 1119 int ret = 0; 1120 1121 ret = wl12xx_set_power_on(wl); 1122 if (ret < 0) 1123 goto out; 1124 1125 /* 1126 * For wl127x based devices we could use the default block 1127 * size (512 bytes), but due to a bug in the sdio driver, we 1128 * need to set it explicitly after the chip is powered on. To 1129 * simplify the code and since the performance impact is 1130 * negligible, we use the same block size for all different 1131 * chip types. 1132 * 1133 * Check if the bus supports blocksize alignment and, if it 1134 * doesn't, make sure we don't have the quirk. 1135 */ 1136 if (!wl1271_set_block_size(wl)) 1137 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN; 1138 1139 /* TODO: make sure the lower driver has set things up correctly */ 1140 1141 ret = wl1271_setup(wl); 1142 if (ret < 0) 1143 goto out; 1144 1145 ret = wl12xx_fetch_firmware(wl, plt); 1146 if (ret < 0) { 1147 kfree(wl->fw_status); 1148 kfree(wl->raw_fw_status); 1149 kfree(wl->tx_res_if); 1150 } 1151 1152 out: 1153 return ret; 1154 } 1155 1156 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode) 1157 { 1158 int retries = WL1271_BOOT_RETRIES; 1159 struct wiphy *wiphy = wl->hw->wiphy; 1160 1161 static const char* const PLT_MODE[] = { 1162 "PLT_OFF", 1163 "PLT_ON", 1164 "PLT_FEM_DETECT", 1165 "PLT_CHIP_AWAKE" 1166 }; 1167 1168 int ret; 1169 1170 mutex_lock(&wl->mutex); 1171 1172 wl1271_notice("power up"); 1173 1174 if (wl->state != WLCORE_STATE_OFF) { 1175 wl1271_error("cannot go into PLT state because not " 1176 "in off state: %d", wl->state); 1177 ret = -EBUSY; 1178 goto out; 1179 } 1180 1181 /* Indicate to lower levels that we are now in PLT mode */ 1182 wl->plt = true; 1183 wl->plt_mode = plt_mode; 1184 1185 while (retries) { 1186 retries--; 1187 ret = wl12xx_chip_wakeup(wl, true); 1188 if (ret < 0) 1189 goto power_off; 1190 1191 if (plt_mode != PLT_CHIP_AWAKE) { 1192 ret = wl->ops->plt_init(wl); 1193 if (ret < 0) 1194 goto power_off; 1195 } 1196 1197 wl->state = WLCORE_STATE_ON; 1198 wl1271_notice("firmware booted in PLT mode %s (%s)", 1199 PLT_MODE[plt_mode], 1200 wl->chip.fw_ver_str); 1201 1202 /* update hw/fw version info in wiphy struct */ 1203 wiphy->hw_version = wl->chip.id; 1204 strscpy(wiphy->fw_version, wl->chip.fw_ver_str, 1205 sizeof(wiphy->fw_version)); 1206 1207 goto out; 1208 1209 power_off: 1210 wl1271_power_off(wl); 1211 } 1212 1213 wl->plt = false; 1214 wl->plt_mode = PLT_OFF; 1215 1216 wl1271_error("firmware boot in PLT mode failed despite %d retries", 1217 WL1271_BOOT_RETRIES); 1218 out: 1219 mutex_unlock(&wl->mutex); 1220 1221 return ret; 1222 } 1223 1224 int wl1271_plt_stop(struct wl1271 *wl) 1225 { 1226 int ret = 0; 1227 1228 wl1271_notice("power down"); 1229 1230 /* 1231 * Interrupts must be disabled before setting the state to OFF. 1232 * Otherwise, the interrupt handler might be called and exit without 1233 * reading the interrupt status. 1234 */ 1235 wlcore_disable_interrupts(wl); 1236 mutex_lock(&wl->mutex); 1237 if (!wl->plt) { 1238 mutex_unlock(&wl->mutex); 1239 1240 /* 1241 * This will not necessarily enable interrupts as interrupts 1242 * may have been disabled when op_stop was called. It will, 1243 * however, balance the above call to disable_interrupts(). 1244 */ 1245 wlcore_enable_interrupts(wl); 1246 1247 wl1271_error("cannot power down because not in PLT " 1248 "state: %d", wl->state); 1249 ret = -EBUSY; 1250 goto out; 1251 } 1252 1253 mutex_unlock(&wl->mutex); 1254 1255 wl1271_flush_deferred_work(wl); 1256 cancel_work_sync(&wl->netstack_work); 1257 cancel_work_sync(&wl->recovery_work); 1258 cancel_delayed_work_sync(&wl->tx_watchdog_work); 1259 1260 mutex_lock(&wl->mutex); 1261 wl1271_power_off(wl); 1262 wl->flags = 0; 1263 wl->sleep_auth = WL1271_PSM_ILLEGAL; 1264 wl->state = WLCORE_STATE_OFF; 1265 wl->plt = false; 1266 wl->plt_mode = PLT_OFF; 1267 wl->rx_counter = 0; 1268 mutex_unlock(&wl->mutex); 1269 1270 out: 1271 return ret; 1272 } 1273 1274 static void wl1271_op_tx(struct ieee80211_hw *hw, 1275 struct ieee80211_tx_control *control, 1276 struct sk_buff *skb) 1277 { 1278 struct wl1271 *wl = hw->priv; 1279 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1280 struct ieee80211_vif *vif = info->control.vif; 1281 struct wl12xx_vif *wlvif = NULL; 1282 unsigned long flags; 1283 int q, mapping; 1284 u8 hlid; 1285 1286 if (!vif) { 1287 wl1271_debug(DEBUG_TX, "DROP skb with no vif"); 1288 ieee80211_free_txskb(hw, skb); 1289 return; 1290 } 1291 1292 wlvif = wl12xx_vif_to_data(vif); 1293 mapping = skb_get_queue_mapping(skb); 1294 q = wl1271_tx_get_queue(mapping); 1295 1296 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta); 1297 1298 spin_lock_irqsave(&wl->wl_lock, flags); 1299 1300 /* 1301 * drop the packet if the link is invalid or the queue is stopped 1302 * for any reason but watermark. Watermark is a "soft"-stop so we 1303 * allow these packets through. 1304 */ 1305 if (hlid == WL12XX_INVALID_LINK_ID || 1306 (!test_bit(hlid, wlvif->links_map)) || 1307 (wlcore_is_queue_stopped_locked(wl, wlvif, q) && 1308 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q, 1309 WLCORE_QUEUE_STOP_REASON_WATERMARK))) { 1310 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q); 1311 ieee80211_free_txskb(hw, skb); 1312 goto out; 1313 } 1314 1315 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d", 1316 hlid, q, skb->len); 1317 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb); 1318 1319 wl->tx_queue_count[q]++; 1320 wlvif->tx_queue_count[q]++; 1321 1322 /* 1323 * The workqueue is slow to process the tx_queue and we need stop 1324 * the queue here, otherwise the queue will get too long. 1325 */ 1326 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK && 1327 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q, 1328 WLCORE_QUEUE_STOP_REASON_WATERMARK)) { 1329 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q); 1330 wlcore_stop_queue_locked(wl, wlvif, q, 1331 WLCORE_QUEUE_STOP_REASON_WATERMARK); 1332 } 1333 1334 /* 1335 * The chip specific setup must run before the first TX packet - 1336 * before that, the tx_work will not be initialized! 1337 */ 1338 1339 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && 1340 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags)) 1341 ieee80211_queue_work(wl->hw, &wl->tx_work); 1342 1343 out: 1344 spin_unlock_irqrestore(&wl->wl_lock, flags); 1345 } 1346 1347 int wl1271_tx_dummy_packet(struct wl1271 *wl) 1348 { 1349 unsigned long flags; 1350 int q; 1351 1352 /* no need to queue a new dummy packet if one is already pending */ 1353 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) 1354 return 0; 1355 1356 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet)); 1357 1358 spin_lock_irqsave(&wl->wl_lock, flags); 1359 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); 1360 wl->tx_queue_count[q]++; 1361 spin_unlock_irqrestore(&wl->wl_lock, flags); 1362 1363 /* The FW is low on RX memory blocks, so send the dummy packet asap */ 1364 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) 1365 return wlcore_tx_work_locked(wl); 1366 1367 /* 1368 * If the FW TX is busy, TX work will be scheduled by the threaded 1369 * interrupt handler function 1370 */ 1371 return 0; 1372 } 1373 1374 /* 1375 * The size of the dummy packet should be at least 1400 bytes. However, in 1376 * order to minimize the number of bus transactions, aligning it to 512 bytes 1377 * boundaries could be beneficial, performance wise 1378 */ 1379 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512)) 1380 1381 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl) 1382 { 1383 struct sk_buff *skb; 1384 struct ieee80211_hdr_3addr *hdr; 1385 unsigned int dummy_packet_size; 1386 1387 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE - 1388 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr); 1389 1390 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE); 1391 if (!skb) { 1392 wl1271_warning("Failed to allocate a dummy packet skb"); 1393 return NULL; 1394 } 1395 1396 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr)); 1397 1398 hdr = skb_put_zero(skb, sizeof(*hdr)); 1399 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | 1400 IEEE80211_STYPE_NULLFUNC | 1401 IEEE80211_FCTL_TODS); 1402 1403 skb_put_zero(skb, dummy_packet_size); 1404 1405 /* Dummy packets require the TID to be management */ 1406 skb->priority = WL1271_TID_MGMT; 1407 1408 /* Initialize all fields that might be used */ 1409 skb_set_queue_mapping(skb, 0); 1410 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info)); 1411 1412 return skb; 1413 } 1414 1415 1416 static int 1417 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p) 1418 { 1419 int num_fields = 0, in_field = 0, fields_size = 0; 1420 int i, pattern_len = 0; 1421 1422 if (!p->mask) { 1423 wl1271_warning("No mask in WoWLAN pattern"); 1424 return -EINVAL; 1425 } 1426 1427 /* 1428 * The pattern is broken up into segments of bytes at different offsets 1429 * that need to be checked by the FW filter. Each segment is called 1430 * a field in the FW API. We verify that the total number of fields 1431 * required for this pattern won't exceed FW limits (8) 1432 * as well as the total fields buffer won't exceed the FW limit. 1433 * Note that if there's a pattern which crosses Ethernet/IP header 1434 * boundary a new field is required. 1435 */ 1436 for (i = 0; i < p->pattern_len; i++) { 1437 if (test_bit(i, (unsigned long *)p->mask)) { 1438 if (!in_field) { 1439 in_field = 1; 1440 pattern_len = 1; 1441 } else { 1442 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) { 1443 num_fields++; 1444 fields_size += pattern_len + 1445 RX_FILTER_FIELD_OVERHEAD; 1446 pattern_len = 1; 1447 } else 1448 pattern_len++; 1449 } 1450 } else { 1451 if (in_field) { 1452 in_field = 0; 1453 fields_size += pattern_len + 1454 RX_FILTER_FIELD_OVERHEAD; 1455 num_fields++; 1456 } 1457 } 1458 } 1459 1460 if (in_field) { 1461 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD; 1462 num_fields++; 1463 } 1464 1465 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) { 1466 wl1271_warning("RX Filter too complex. Too many segments"); 1467 return -EINVAL; 1468 } 1469 1470 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) { 1471 wl1271_warning("RX filter pattern is too big"); 1472 return -E2BIG; 1473 } 1474 1475 return 0; 1476 } 1477 1478 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void) 1479 { 1480 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL); 1481 } 1482 1483 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter) 1484 { 1485 int i; 1486 1487 if (filter == NULL) 1488 return; 1489 1490 for (i = 0; i < filter->num_fields; i++) 1491 kfree(filter->fields[i].pattern); 1492 1493 kfree(filter); 1494 } 1495 1496 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter, 1497 u16 offset, u8 flags, 1498 const u8 *pattern, u8 len) 1499 { 1500 struct wl12xx_rx_filter_field *field; 1501 1502 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) { 1503 wl1271_warning("Max fields per RX filter. can't alloc another"); 1504 return -EINVAL; 1505 } 1506 1507 field = &filter->fields[filter->num_fields]; 1508 1509 field->pattern = kmemdup(pattern, len, GFP_KERNEL); 1510 if (!field->pattern) { 1511 wl1271_warning("Failed to allocate RX filter pattern"); 1512 return -ENOMEM; 1513 } 1514 1515 filter->num_fields++; 1516 1517 field->offset = cpu_to_le16(offset); 1518 field->flags = flags; 1519 field->len = len; 1520 1521 return 0; 1522 } 1523 1524 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter) 1525 { 1526 int i, fields_size = 0; 1527 1528 for (i = 0; i < filter->num_fields; i++) 1529 fields_size += filter->fields[i].len + 1530 sizeof(struct wl12xx_rx_filter_field) - 1531 sizeof(u8 *); 1532 1533 return fields_size; 1534 } 1535 1536 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter, 1537 u8 *buf) 1538 { 1539 int i; 1540 struct wl12xx_rx_filter_field *field; 1541 1542 for (i = 0; i < filter->num_fields; i++) { 1543 field = (struct wl12xx_rx_filter_field *)buf; 1544 1545 field->offset = filter->fields[i].offset; 1546 field->flags = filter->fields[i].flags; 1547 field->len = filter->fields[i].len; 1548 1549 memcpy(&field->pattern, filter->fields[i].pattern, field->len); 1550 buf += sizeof(struct wl12xx_rx_filter_field) - 1551 sizeof(u8 *) + field->len; 1552 } 1553 } 1554 1555 /* 1556 * Allocates an RX filter returned through f 1557 * which needs to be freed using rx_filter_free() 1558 */ 1559 static int 1560 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p, 1561 struct wl12xx_rx_filter **f) 1562 { 1563 int i, j, ret = 0; 1564 struct wl12xx_rx_filter *filter; 1565 u16 offset; 1566 u8 flags, len; 1567 1568 filter = wl1271_rx_filter_alloc(); 1569 if (!filter) { 1570 wl1271_warning("Failed to alloc rx filter"); 1571 ret = -ENOMEM; 1572 goto err; 1573 } 1574 1575 i = 0; 1576 while (i < p->pattern_len) { 1577 if (!test_bit(i, (unsigned long *)p->mask)) { 1578 i++; 1579 continue; 1580 } 1581 1582 for (j = i; j < p->pattern_len; j++) { 1583 if (!test_bit(j, (unsigned long *)p->mask)) 1584 break; 1585 1586 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE && 1587 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE) 1588 break; 1589 } 1590 1591 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) { 1592 offset = i; 1593 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER; 1594 } else { 1595 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE; 1596 flags = WL1271_RX_FILTER_FLAG_IP_HEADER; 1597 } 1598 1599 len = j - i; 1600 1601 ret = wl1271_rx_filter_alloc_field(filter, 1602 offset, 1603 flags, 1604 &p->pattern[i], len); 1605 if (ret) 1606 goto err; 1607 1608 i = j; 1609 } 1610 1611 filter->action = FILTER_SIGNAL; 1612 1613 *f = filter; 1614 return 0; 1615 1616 err: 1617 wl1271_rx_filter_free(filter); 1618 *f = NULL; 1619 1620 return ret; 1621 } 1622 1623 static int wl1271_configure_wowlan(struct wl1271 *wl, 1624 struct cfg80211_wowlan *wow) 1625 { 1626 int i, ret; 1627 1628 if (!wow || wow->any || !wow->n_patterns) { 1629 ret = wl1271_acx_default_rx_filter_enable(wl, 0, 1630 FILTER_SIGNAL); 1631 if (ret) 1632 goto out; 1633 1634 ret = wl1271_rx_filter_clear_all(wl); 1635 if (ret) 1636 goto out; 1637 1638 return 0; 1639 } 1640 1641 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS)) 1642 return -EINVAL; 1643 1644 /* Validate all incoming patterns before clearing current FW state */ 1645 for (i = 0; i < wow->n_patterns; i++) { 1646 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]); 1647 if (ret) { 1648 wl1271_warning("Bad wowlan pattern %d", i); 1649 return ret; 1650 } 1651 } 1652 1653 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL); 1654 if (ret) 1655 goto out; 1656 1657 ret = wl1271_rx_filter_clear_all(wl); 1658 if (ret) 1659 goto out; 1660 1661 /* Translate WoWLAN patterns into filters */ 1662 for (i = 0; i < wow->n_patterns; i++) { 1663 struct cfg80211_pkt_pattern *p; 1664 struct wl12xx_rx_filter *filter = NULL; 1665 1666 p = &wow->patterns[i]; 1667 1668 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter); 1669 if (ret) { 1670 wl1271_warning("Failed to create an RX filter from " 1671 "wowlan pattern %d", i); 1672 goto out; 1673 } 1674 1675 ret = wl1271_rx_filter_enable(wl, i, 1, filter); 1676 1677 wl1271_rx_filter_free(filter); 1678 if (ret) 1679 goto out; 1680 } 1681 1682 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP); 1683 1684 out: 1685 return ret; 1686 } 1687 1688 static int wl1271_configure_suspend_sta(struct wl1271 *wl, 1689 struct wl12xx_vif *wlvif, 1690 struct cfg80211_wowlan *wow) 1691 { 1692 int ret = 0; 1693 1694 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) 1695 goto out; 1696 1697 ret = wl1271_configure_wowlan(wl, wow); 1698 if (ret < 0) 1699 goto out; 1700 1701 if ((wl->conf.conn.suspend_wake_up_event == 1702 wl->conf.conn.wake_up_event) && 1703 (wl->conf.conn.suspend_listen_interval == 1704 wl->conf.conn.listen_interval)) 1705 goto out; 1706 1707 ret = wl1271_acx_wake_up_conditions(wl, wlvif, 1708 wl->conf.conn.suspend_wake_up_event, 1709 wl->conf.conn.suspend_listen_interval); 1710 1711 if (ret < 0) 1712 wl1271_error("suspend: set wake up conditions failed: %d", ret); 1713 out: 1714 return ret; 1715 1716 } 1717 1718 static int wl1271_configure_suspend_ap(struct wl1271 *wl, 1719 struct wl12xx_vif *wlvif, 1720 struct cfg80211_wowlan *wow) 1721 { 1722 int ret = 0; 1723 1724 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) 1725 goto out; 1726 1727 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true); 1728 if (ret < 0) 1729 goto out; 1730 1731 ret = wl1271_configure_wowlan(wl, wow); 1732 if (ret < 0) 1733 goto out; 1734 1735 out: 1736 return ret; 1737 1738 } 1739 1740 static int wl1271_configure_suspend(struct wl1271 *wl, 1741 struct wl12xx_vif *wlvif, 1742 struct cfg80211_wowlan *wow) 1743 { 1744 if (wlvif->bss_type == BSS_TYPE_STA_BSS) 1745 return wl1271_configure_suspend_sta(wl, wlvif, wow); 1746 if (wlvif->bss_type == BSS_TYPE_AP_BSS) 1747 return wl1271_configure_suspend_ap(wl, wlvif, wow); 1748 return 0; 1749 } 1750 1751 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif) 1752 { 1753 int ret = 0; 1754 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS; 1755 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS; 1756 1757 if ((!is_ap) && (!is_sta)) 1758 return; 1759 1760 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) || 1761 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))) 1762 return; 1763 1764 wl1271_configure_wowlan(wl, NULL); 1765 1766 if (is_sta) { 1767 if ((wl->conf.conn.suspend_wake_up_event == 1768 wl->conf.conn.wake_up_event) && 1769 (wl->conf.conn.suspend_listen_interval == 1770 wl->conf.conn.listen_interval)) 1771 return; 1772 1773 ret = wl1271_acx_wake_up_conditions(wl, wlvif, 1774 wl->conf.conn.wake_up_event, 1775 wl->conf.conn.listen_interval); 1776 1777 if (ret < 0) 1778 wl1271_error("resume: wake up conditions failed: %d", 1779 ret); 1780 1781 } else if (is_ap) { 1782 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false); 1783 } 1784 } 1785 1786 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw, 1787 struct cfg80211_wowlan *wow) 1788 { 1789 struct wl1271 *wl = hw->priv; 1790 struct wl12xx_vif *wlvif; 1791 unsigned long flags; 1792 int ret; 1793 1794 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow); 1795 WARN_ON(!wow); 1796 1797 /* we want to perform the recovery before suspending */ 1798 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) { 1799 wl1271_warning("postponing suspend to perform recovery"); 1800 return -EBUSY; 1801 } 1802 1803 wl1271_tx_flush(wl); 1804 1805 mutex_lock(&wl->mutex); 1806 1807 ret = pm_runtime_resume_and_get(wl->dev); 1808 if (ret < 0) { 1809 mutex_unlock(&wl->mutex); 1810 return ret; 1811 } 1812 1813 wl->wow_enabled = true; 1814 wl12xx_for_each_wlvif(wl, wlvif) { 1815 if (wlcore_is_p2p_mgmt(wlvif)) 1816 continue; 1817 1818 ret = wl1271_configure_suspend(wl, wlvif, wow); 1819 if (ret < 0) { 1820 goto out_sleep; 1821 } 1822 } 1823 1824 /* disable fast link flow control notifications from FW */ 1825 ret = wlcore_hw_interrupt_notify(wl, false); 1826 if (ret < 0) 1827 goto out_sleep; 1828 1829 /* if filtering is enabled, configure the FW to drop all RX BA frames */ 1830 ret = wlcore_hw_rx_ba_filter(wl, 1831 !!wl->conf.conn.suspend_rx_ba_activity); 1832 if (ret < 0) 1833 goto out_sleep; 1834 1835 out_sleep: 1836 pm_runtime_put_noidle(wl->dev); 1837 mutex_unlock(&wl->mutex); 1838 1839 if (ret < 0) { 1840 wl1271_warning("couldn't prepare device to suspend"); 1841 return ret; 1842 } 1843 1844 /* flush any remaining work */ 1845 wl1271_debug(DEBUG_MAC80211, "flushing remaining works"); 1846 1847 flush_work(&wl->tx_work); 1848 1849 /* 1850 * Cancel the watchdog even if above tx_flush failed. We will detect 1851 * it on resume anyway. 1852 */ 1853 cancel_delayed_work(&wl->tx_watchdog_work); 1854 1855 /* 1856 * set suspended flag to avoid triggering a new threaded_irq 1857 * work. 1858 */ 1859 spin_lock_irqsave(&wl->wl_lock, flags); 1860 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags); 1861 spin_unlock_irqrestore(&wl->wl_lock, flags); 1862 1863 return pm_runtime_force_suspend(wl->dev); 1864 } 1865 1866 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw) 1867 { 1868 struct wl1271 *wl = hw->priv; 1869 struct wl12xx_vif *wlvif; 1870 unsigned long flags; 1871 bool run_irq_work = false, pending_recovery; 1872 int ret; 1873 1874 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d", 1875 wl->wow_enabled); 1876 WARN_ON(!wl->wow_enabled); 1877 1878 ret = pm_runtime_force_resume(wl->dev); 1879 if (ret < 0) { 1880 wl1271_error("ELP wakeup failure!"); 1881 goto out_sleep; 1882 } 1883 1884 /* 1885 * re-enable irq_work enqueuing, and call irq_work directly if 1886 * there is a pending work. 1887 */ 1888 spin_lock_irqsave(&wl->wl_lock, flags); 1889 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags); 1890 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags)) 1891 run_irq_work = true; 1892 spin_unlock_irqrestore(&wl->wl_lock, flags); 1893 1894 mutex_lock(&wl->mutex); 1895 1896 /* test the recovery flag before calling any SDIO functions */ 1897 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, 1898 &wl->flags); 1899 1900 if (run_irq_work) { 1901 wl1271_debug(DEBUG_MAC80211, 1902 "run postponed irq_work directly"); 1903 1904 /* don't talk to the HW if recovery is pending */ 1905 if (!pending_recovery) { 1906 ret = wlcore_irq_locked(wl); 1907 if (ret) 1908 wl12xx_queue_recovery_work(wl); 1909 } 1910 1911 wlcore_enable_interrupts(wl); 1912 } 1913 1914 if (pending_recovery) { 1915 wl1271_warning("queuing forgotten recovery on resume"); 1916 ieee80211_queue_work(wl->hw, &wl->recovery_work); 1917 goto out_sleep; 1918 } 1919 1920 ret = pm_runtime_resume_and_get(wl->dev); 1921 if (ret < 0) 1922 goto out; 1923 1924 wl12xx_for_each_wlvif(wl, wlvif) { 1925 if (wlcore_is_p2p_mgmt(wlvif)) 1926 continue; 1927 1928 wl1271_configure_resume(wl, wlvif); 1929 } 1930 1931 ret = wlcore_hw_interrupt_notify(wl, true); 1932 if (ret < 0) 1933 goto out_sleep; 1934 1935 /* if filtering is enabled, configure the FW to drop all RX BA frames */ 1936 ret = wlcore_hw_rx_ba_filter(wl, false); 1937 if (ret < 0) 1938 goto out_sleep; 1939 1940 out_sleep: 1941 pm_runtime_put_autosuspend(wl->dev); 1942 1943 out: 1944 wl->wow_enabled = false; 1945 1946 /* 1947 * Set a flag to re-init the watchdog on the first Tx after resume. 1948 * That way we avoid possible conditions where Tx-complete interrupts 1949 * fail to arrive and we perform a spurious recovery. 1950 */ 1951 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags); 1952 mutex_unlock(&wl->mutex); 1953 1954 return 0; 1955 } 1956 1957 static int wl1271_op_start(struct ieee80211_hw *hw) 1958 { 1959 wl1271_debug(DEBUG_MAC80211, "mac80211 start"); 1960 1961 /* 1962 * We have to delay the booting of the hardware because 1963 * we need to know the local MAC address before downloading and 1964 * initializing the firmware. The MAC address cannot be changed 1965 * after boot, and without the proper MAC address, the firmware 1966 * will not function properly. 1967 * 1968 * The MAC address is first known when the corresponding interface 1969 * is added. That is where we will initialize the hardware. 1970 */ 1971 1972 return 0; 1973 } 1974 1975 static void wlcore_op_stop_locked(struct wl1271 *wl) 1976 { 1977 int i; 1978 1979 if (wl->state == WLCORE_STATE_OFF) { 1980 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, 1981 &wl->flags)) 1982 wlcore_enable_interrupts(wl); 1983 1984 return; 1985 } 1986 1987 /* 1988 * this must be before the cancel_work calls below, so that the work 1989 * functions don't perform further work. 1990 */ 1991 wl->state = WLCORE_STATE_OFF; 1992 1993 /* 1994 * Use the nosync variant to disable interrupts, so the mutex could be 1995 * held while doing so without deadlocking. 1996 */ 1997 wlcore_disable_interrupts_nosync(wl); 1998 1999 mutex_unlock(&wl->mutex); 2000 2001 wlcore_synchronize_interrupts(wl); 2002 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) 2003 cancel_work_sync(&wl->recovery_work); 2004 wl1271_flush_deferred_work(wl); 2005 cancel_delayed_work_sync(&wl->scan_complete_work); 2006 cancel_work_sync(&wl->netstack_work); 2007 cancel_work_sync(&wl->tx_work); 2008 cancel_delayed_work_sync(&wl->tx_watchdog_work); 2009 2010 /* let's notify MAC80211 about the remaining pending TX frames */ 2011 mutex_lock(&wl->mutex); 2012 wl12xx_tx_reset(wl); 2013 2014 wl1271_power_off(wl); 2015 /* 2016 * In case a recovery was scheduled, interrupts were disabled to avoid 2017 * an interrupt storm. Now that the power is down, it is safe to 2018 * re-enable interrupts to balance the disable depth 2019 */ 2020 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) 2021 wlcore_enable_interrupts(wl); 2022 2023 wl->band = NL80211_BAND_2GHZ; 2024 2025 wl->rx_counter = 0; 2026 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 2027 wl->channel_type = NL80211_CHAN_NO_HT; 2028 wl->tx_blocks_available = 0; 2029 wl->tx_allocated_blocks = 0; 2030 wl->tx_results_count = 0; 2031 wl->tx_packets_count = 0; 2032 wl->time_offset = 0; 2033 wl->ap_fw_ps_map = 0; 2034 wl->ap_ps_map = 0; 2035 wl->sleep_auth = WL1271_PSM_ILLEGAL; 2036 memset(wl->roles_map, 0, sizeof(wl->roles_map)); 2037 memset(wl->links_map, 0, sizeof(wl->links_map)); 2038 memset(wl->roc_map, 0, sizeof(wl->roc_map)); 2039 memset(wl->session_ids, 0, sizeof(wl->session_ids)); 2040 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled)); 2041 wl->active_sta_count = 0; 2042 wl->active_link_count = 0; 2043 2044 /* The system link is always allocated */ 2045 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0; 2046 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0; 2047 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map); 2048 2049 /* 2050 * this is performed after the cancel_work calls and the associated 2051 * mutex_lock, so that wl1271_op_add_interface does not accidentally 2052 * get executed before all these vars have been reset. 2053 */ 2054 wl->flags = 0; 2055 2056 wl->tx_blocks_freed = 0; 2057 2058 for (i = 0; i < NUM_TX_QUEUES; i++) { 2059 wl->tx_pkts_freed[i] = 0; 2060 wl->tx_allocated_pkts[i] = 0; 2061 } 2062 2063 wl1271_debugfs_reset(wl); 2064 2065 kfree(wl->raw_fw_status); 2066 wl->raw_fw_status = NULL; 2067 kfree(wl->fw_status); 2068 wl->fw_status = NULL; 2069 kfree(wl->tx_res_if); 2070 wl->tx_res_if = NULL; 2071 kfree(wl->target_mem_map); 2072 wl->target_mem_map = NULL; 2073 2074 /* 2075 * FW channels must be re-calibrated after recovery, 2076 * save current Reg-Domain channel configuration and clear it. 2077 */ 2078 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last, 2079 sizeof(wl->reg_ch_conf_pending)); 2080 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last)); 2081 } 2082 2083 static void wlcore_op_stop(struct ieee80211_hw *hw, bool suspend) 2084 { 2085 struct wl1271 *wl = hw->priv; 2086 2087 wl1271_debug(DEBUG_MAC80211, "mac80211 stop"); 2088 2089 mutex_lock(&wl->mutex); 2090 2091 wlcore_op_stop_locked(wl); 2092 2093 mutex_unlock(&wl->mutex); 2094 } 2095 2096 static void wlcore_channel_switch_work(struct work_struct *work) 2097 { 2098 struct delayed_work *dwork; 2099 struct wl1271 *wl; 2100 struct ieee80211_vif *vif; 2101 struct wl12xx_vif *wlvif; 2102 int ret; 2103 2104 dwork = to_delayed_work(work); 2105 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work); 2106 wl = wlvif->wl; 2107 2108 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id); 2109 2110 mutex_lock(&wl->mutex); 2111 2112 if (unlikely(wl->state != WLCORE_STATE_ON)) 2113 goto out; 2114 2115 /* check the channel switch is still ongoing */ 2116 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) 2117 goto out; 2118 2119 vif = wl12xx_wlvif_to_vif(wlvif); 2120 ieee80211_chswitch_done(vif, false, 0); 2121 2122 ret = pm_runtime_resume_and_get(wl->dev); 2123 if (ret < 0) 2124 goto out; 2125 2126 wl12xx_cmd_stop_channel_switch(wl, wlvif); 2127 2128 pm_runtime_put_autosuspend(wl->dev); 2129 out: 2130 mutex_unlock(&wl->mutex); 2131 } 2132 2133 static void wlcore_connection_loss_work(struct work_struct *work) 2134 { 2135 struct delayed_work *dwork; 2136 struct wl1271 *wl; 2137 struct ieee80211_vif *vif; 2138 struct wl12xx_vif *wlvif; 2139 2140 dwork = to_delayed_work(work); 2141 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work); 2142 wl = wlvif->wl; 2143 2144 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id); 2145 2146 mutex_lock(&wl->mutex); 2147 2148 if (unlikely(wl->state != WLCORE_STATE_ON)) 2149 goto out; 2150 2151 /* Call mac80211 connection loss */ 2152 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) 2153 goto out; 2154 2155 vif = wl12xx_wlvif_to_vif(wlvif); 2156 ieee80211_connection_loss(vif); 2157 out: 2158 mutex_unlock(&wl->mutex); 2159 } 2160 2161 static void wlcore_pending_auth_complete_work(struct work_struct *work) 2162 { 2163 struct delayed_work *dwork; 2164 struct wl1271 *wl; 2165 struct wl12xx_vif *wlvif; 2166 unsigned long time_spare; 2167 int ret; 2168 2169 dwork = to_delayed_work(work); 2170 wlvif = container_of(dwork, struct wl12xx_vif, 2171 pending_auth_complete_work); 2172 wl = wlvif->wl; 2173 2174 mutex_lock(&wl->mutex); 2175 2176 if (unlikely(wl->state != WLCORE_STATE_ON)) 2177 goto out; 2178 2179 /* 2180 * Make sure a second really passed since the last auth reply. Maybe 2181 * a second auth reply arrived while we were stuck on the mutex. 2182 * Check for a little less than the timeout to protect from scheduler 2183 * irregularities. 2184 */ 2185 time_spare = jiffies + 2186 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50); 2187 if (!time_after(time_spare, wlvif->pending_auth_reply_time)) 2188 goto out; 2189 2190 ret = pm_runtime_resume_and_get(wl->dev); 2191 if (ret < 0) 2192 goto out; 2193 2194 /* cancel the ROC if active */ 2195 wlcore_update_inconn_sta(wl, wlvif, NULL, false); 2196 2197 pm_runtime_put_autosuspend(wl->dev); 2198 out: 2199 mutex_unlock(&wl->mutex); 2200 } 2201 2202 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx) 2203 { 2204 u8 policy = find_first_zero_bit(wl->rate_policies_map, 2205 WL12XX_MAX_RATE_POLICIES); 2206 if (policy >= WL12XX_MAX_RATE_POLICIES) 2207 return -EBUSY; 2208 2209 __set_bit(policy, wl->rate_policies_map); 2210 *idx = policy; 2211 return 0; 2212 } 2213 2214 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx) 2215 { 2216 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES)) 2217 return; 2218 2219 __clear_bit(*idx, wl->rate_policies_map); 2220 *idx = WL12XX_MAX_RATE_POLICIES; 2221 } 2222 2223 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx) 2224 { 2225 u8 policy = find_first_zero_bit(wl->klv_templates_map, 2226 WLCORE_MAX_KLV_TEMPLATES); 2227 if (policy >= WLCORE_MAX_KLV_TEMPLATES) 2228 return -EBUSY; 2229 2230 __set_bit(policy, wl->klv_templates_map); 2231 *idx = policy; 2232 return 0; 2233 } 2234 2235 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx) 2236 { 2237 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES)) 2238 return; 2239 2240 __clear_bit(*idx, wl->klv_templates_map); 2241 *idx = WLCORE_MAX_KLV_TEMPLATES; 2242 } 2243 2244 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif) 2245 { 2246 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); 2247 2248 switch (wlvif->bss_type) { 2249 case BSS_TYPE_AP_BSS: 2250 if (wlvif->p2p) 2251 return WL1271_ROLE_P2P_GO; 2252 else if (ieee80211_vif_is_mesh(vif)) 2253 return WL1271_ROLE_MESH_POINT; 2254 else 2255 return WL1271_ROLE_AP; 2256 2257 case BSS_TYPE_STA_BSS: 2258 if (wlvif->p2p) 2259 return WL1271_ROLE_P2P_CL; 2260 else 2261 return WL1271_ROLE_STA; 2262 2263 case BSS_TYPE_IBSS: 2264 return WL1271_ROLE_IBSS; 2265 2266 default: 2267 wl1271_error("invalid bss_type: %d", wlvif->bss_type); 2268 } 2269 return WL12XX_INVALID_ROLE_TYPE; 2270 } 2271 2272 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif) 2273 { 2274 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 2275 int i; 2276 2277 /* clear everything but the persistent data */ 2278 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent)); 2279 2280 switch (ieee80211_vif_type_p2p(vif)) { 2281 case NL80211_IFTYPE_P2P_CLIENT: 2282 wlvif->p2p = 1; 2283 fallthrough; 2284 case NL80211_IFTYPE_STATION: 2285 case NL80211_IFTYPE_P2P_DEVICE: 2286 wlvif->bss_type = BSS_TYPE_STA_BSS; 2287 break; 2288 case NL80211_IFTYPE_ADHOC: 2289 wlvif->bss_type = BSS_TYPE_IBSS; 2290 break; 2291 case NL80211_IFTYPE_P2P_GO: 2292 wlvif->p2p = 1; 2293 fallthrough; 2294 case NL80211_IFTYPE_AP: 2295 case NL80211_IFTYPE_MESH_POINT: 2296 wlvif->bss_type = BSS_TYPE_AP_BSS; 2297 break; 2298 default: 2299 wlvif->bss_type = MAX_BSS_TYPE; 2300 return -EOPNOTSUPP; 2301 } 2302 2303 wlvif->role_id = WL12XX_INVALID_ROLE_ID; 2304 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID; 2305 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID; 2306 2307 if (wlvif->bss_type == BSS_TYPE_STA_BSS || 2308 wlvif->bss_type == BSS_TYPE_IBSS) { 2309 /* init sta/ibss data */ 2310 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID; 2311 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx); 2312 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx); 2313 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx); 2314 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id); 2315 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC; 2316 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC; 2317 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC; 2318 } else { 2319 /* init ap data */ 2320 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID; 2321 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID; 2322 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx); 2323 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx); 2324 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++) 2325 wl12xx_allocate_rate_policy(wl, 2326 &wlvif->ap.ucast_rate_idx[i]); 2327 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES; 2328 /* 2329 * TODO: check if basic_rate shouldn't be 2330 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); 2331 * instead (the same thing for STA above). 2332 */ 2333 wlvif->basic_rate = CONF_TX_ENABLED_RATES; 2334 /* TODO: this seems to be used only for STA, check it */ 2335 wlvif->rate_set = CONF_TX_ENABLED_RATES; 2336 } 2337 2338 wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate; 2339 wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5; 2340 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT; 2341 2342 /* 2343 * mac80211 configures some values globally, while we treat them 2344 * per-interface. thus, on init, we have to copy them from wl 2345 */ 2346 wlvif->band = wl->band; 2347 wlvif->channel = wl->channel; 2348 wlvif->power_level = wl->power_level; 2349 wlvif->channel_type = wl->channel_type; 2350 2351 INIT_WORK(&wlvif->rx_streaming_enable_work, 2352 wl1271_rx_streaming_enable_work); 2353 INIT_WORK(&wlvif->rx_streaming_disable_work, 2354 wl1271_rx_streaming_disable_work); 2355 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work); 2356 INIT_DELAYED_WORK(&wlvif->channel_switch_work, 2357 wlcore_channel_switch_work); 2358 INIT_DELAYED_WORK(&wlvif->connection_loss_work, 2359 wlcore_connection_loss_work); 2360 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work, 2361 wlcore_pending_auth_complete_work); 2362 INIT_LIST_HEAD(&wlvif->list); 2363 2364 timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0); 2365 return 0; 2366 } 2367 2368 static int wl12xx_init_fw(struct wl1271 *wl) 2369 { 2370 int retries = WL1271_BOOT_RETRIES; 2371 bool booted = false; 2372 struct wiphy *wiphy = wl->hw->wiphy; 2373 int ret; 2374 2375 while (retries) { 2376 retries--; 2377 ret = wl12xx_chip_wakeup(wl, false); 2378 if (ret < 0) 2379 goto power_off; 2380 2381 ret = wl->ops->boot(wl); 2382 if (ret < 0) 2383 goto power_off; 2384 2385 ret = wl1271_hw_init(wl); 2386 if (ret < 0) 2387 goto irq_disable; 2388 2389 booted = true; 2390 break; 2391 2392 irq_disable: 2393 mutex_unlock(&wl->mutex); 2394 /* Unlocking the mutex in the middle of handling is 2395 inherently unsafe. In this case we deem it safe to do, 2396 because we need to let any possibly pending IRQ out of 2397 the system (and while we are WLCORE_STATE_OFF the IRQ 2398 work function will not do anything.) Also, any other 2399 possible concurrent operations will fail due to the 2400 current state, hence the wl1271 struct should be safe. */ 2401 wlcore_disable_interrupts(wl); 2402 wl1271_flush_deferred_work(wl); 2403 cancel_work_sync(&wl->netstack_work); 2404 mutex_lock(&wl->mutex); 2405 power_off: 2406 wl1271_power_off(wl); 2407 } 2408 2409 if (!booted) { 2410 wl1271_error("firmware boot failed despite %d retries", 2411 WL1271_BOOT_RETRIES); 2412 goto out; 2413 } 2414 2415 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str); 2416 2417 /* update hw/fw version info in wiphy struct */ 2418 wiphy->hw_version = wl->chip.id; 2419 strscpy(wiphy->fw_version, wl->chip.fw_ver_str, 2420 sizeof(wiphy->fw_version)); 2421 2422 /* 2423 * Now we know if 11a is supported (info from the NVS), so disable 2424 * 11a channels if not supported 2425 */ 2426 if (!wl->enable_11a) 2427 wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0; 2428 2429 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported", 2430 wl->enable_11a ? "" : "not "); 2431 2432 wl->state = WLCORE_STATE_ON; 2433 out: 2434 return ret; 2435 } 2436 2437 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif) 2438 { 2439 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID; 2440 } 2441 2442 /* 2443 * Check whether a fw switch (i.e. moving from one loaded 2444 * fw to another) is needed. This function is also responsible 2445 * for updating wl->last_vif_count, so it must be called before 2446 * loading a non-plt fw (so the correct fw (single-role/multi-role) 2447 * will be used). 2448 */ 2449 static bool wl12xx_need_fw_change(struct wl1271 *wl, 2450 struct vif_counter_data vif_counter_data, 2451 bool add) 2452 { 2453 enum wl12xx_fw_type current_fw = wl->fw_type; 2454 u8 vif_count = vif_counter_data.counter; 2455 2456 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags)) 2457 return false; 2458 2459 /* increase the vif count if this is a new vif */ 2460 if (add && !vif_counter_data.cur_vif_running) 2461 vif_count++; 2462 2463 wl->last_vif_count = vif_count; 2464 2465 /* no need for fw change if the device is OFF */ 2466 if (wl->state == WLCORE_STATE_OFF) 2467 return false; 2468 2469 /* no need for fw change if a single fw is used */ 2470 if (!wl->mr_fw_name) 2471 return false; 2472 2473 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL) 2474 return true; 2475 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI) 2476 return true; 2477 2478 return false; 2479 } 2480 2481 /* 2482 * Enter "forced psm". Make sure the sta is in psm against the ap, 2483 * to make the fw switch a bit more disconnection-persistent. 2484 */ 2485 static void wl12xx_force_active_psm(struct wl1271 *wl) 2486 { 2487 struct wl12xx_vif *wlvif; 2488 2489 wl12xx_for_each_wlvif_sta(wl, wlvif) { 2490 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE); 2491 } 2492 } 2493 2494 struct wlcore_hw_queue_iter_data { 2495 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)]; 2496 /* current vif */ 2497 struct ieee80211_vif *vif; 2498 /* is the current vif among those iterated */ 2499 bool cur_running; 2500 }; 2501 2502 static void wlcore_hw_queue_iter(void *data, u8 *mac, 2503 struct ieee80211_vif *vif) 2504 { 2505 struct wlcore_hw_queue_iter_data *iter_data = data; 2506 2507 if (vif->type == NL80211_IFTYPE_P2P_DEVICE || 2508 WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE)) 2509 return; 2510 2511 if (iter_data->cur_running || vif == iter_data->vif) { 2512 iter_data->cur_running = true; 2513 return; 2514 } 2515 2516 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map); 2517 } 2518 2519 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl, 2520 struct wl12xx_vif *wlvif) 2521 { 2522 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); 2523 struct wlcore_hw_queue_iter_data iter_data = {}; 2524 int i, q_base; 2525 2526 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 2527 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; 2528 return 0; 2529 } 2530 2531 iter_data.vif = vif; 2532 2533 /* mark all bits taken by active interfaces */ 2534 ieee80211_iterate_active_interfaces_atomic(wl->hw, 2535 IEEE80211_IFACE_ITER_RESUME_ALL, 2536 wlcore_hw_queue_iter, &iter_data); 2537 2538 /* the current vif is already running in mac80211 (resume/recovery) */ 2539 if (iter_data.cur_running) { 2540 wlvif->hw_queue_base = vif->hw_queue[0]; 2541 wl1271_debug(DEBUG_MAC80211, 2542 "using pre-allocated hw queue base %d", 2543 wlvif->hw_queue_base); 2544 2545 /* interface type might have changed type */ 2546 goto adjust_cab_queue; 2547 } 2548 2549 q_base = find_first_zero_bit(iter_data.hw_queue_map, 2550 WLCORE_NUM_MAC_ADDRESSES); 2551 if (q_base >= WLCORE_NUM_MAC_ADDRESSES) 2552 return -EBUSY; 2553 2554 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES; 2555 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d", 2556 wlvif->hw_queue_base); 2557 2558 for (i = 0; i < NUM_TX_QUEUES; i++) { 2559 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0; 2560 /* register hw queues in mac80211 */ 2561 vif->hw_queue[i] = wlvif->hw_queue_base + i; 2562 } 2563 2564 adjust_cab_queue: 2565 /* the last places are reserved for cab queues per interface */ 2566 if (wlvif->bss_type == BSS_TYPE_AP_BSS) 2567 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES + 2568 wlvif->hw_queue_base / NUM_TX_QUEUES; 2569 else 2570 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; 2571 2572 return 0; 2573 } 2574 2575 static int wl1271_op_add_interface(struct ieee80211_hw *hw, 2576 struct ieee80211_vif *vif) 2577 { 2578 struct wl1271 *wl = hw->priv; 2579 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 2580 struct vif_counter_data vif_count; 2581 int ret = 0; 2582 u8 role_type; 2583 2584 if (wl->plt) { 2585 wl1271_error("Adding Interface not allowed while in PLT mode"); 2586 return -EBUSY; 2587 } 2588 2589 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | 2590 IEEE80211_VIF_SUPPORTS_UAPSD | 2591 IEEE80211_VIF_SUPPORTS_CQM_RSSI; 2592 2593 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", 2594 ieee80211_vif_type_p2p(vif), vif->addr); 2595 2596 wl12xx_get_vif_count(hw, vif, &vif_count); 2597 2598 mutex_lock(&wl->mutex); 2599 2600 /* 2601 * in some very corner case HW recovery scenarios its possible to 2602 * get here before __wl1271_op_remove_interface is complete, so 2603 * opt out if that is the case. 2604 */ 2605 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) || 2606 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) { 2607 ret = -EBUSY; 2608 goto out_unlock; 2609 } 2610 2611 2612 ret = wl12xx_init_vif_data(wl, vif); 2613 if (ret < 0) 2614 goto out_unlock; 2615 2616 wlvif->wl = wl; 2617 role_type = wl12xx_get_role_type(wl, wlvif); 2618 if (role_type == WL12XX_INVALID_ROLE_TYPE) { 2619 ret = -EINVAL; 2620 goto out_unlock; 2621 } 2622 2623 ret = wlcore_allocate_hw_queue_base(wl, wlvif); 2624 if (ret < 0) 2625 goto out_unlock; 2626 2627 /* 2628 * TODO: after the nvs issue will be solved, move this block 2629 * to start(), and make sure here the driver is ON. 2630 */ 2631 if (wl->state == WLCORE_STATE_OFF) { 2632 /* 2633 * we still need this in order to configure the fw 2634 * while uploading the nvs 2635 */ 2636 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN); 2637 2638 ret = wl12xx_init_fw(wl); 2639 if (ret < 0) 2640 goto out_unlock; 2641 } 2642 2643 /* 2644 * Call runtime PM only after possible wl12xx_init_fw() above 2645 * is done. Otherwise we do not have interrupts enabled. 2646 */ 2647 ret = pm_runtime_resume_and_get(wl->dev); 2648 if (ret < 0) 2649 goto out_unlock; 2650 2651 if (wl12xx_need_fw_change(wl, vif_count, true)) { 2652 wl12xx_force_active_psm(wl); 2653 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); 2654 mutex_unlock(&wl->mutex); 2655 wl1271_recovery_work(&wl->recovery_work); 2656 return 0; 2657 } 2658 2659 if (!wlcore_is_p2p_mgmt(wlvif)) { 2660 ret = wl12xx_cmd_role_enable(wl, vif->addr, 2661 role_type, &wlvif->role_id); 2662 if (ret < 0) 2663 goto out; 2664 2665 ret = wl1271_init_vif_specific(wl, vif); 2666 if (ret < 0) 2667 goto out; 2668 2669 } else { 2670 ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE, 2671 &wlvif->dev_role_id); 2672 if (ret < 0) 2673 goto out; 2674 2675 /* needed mainly for configuring rate policies */ 2676 ret = wl1271_sta_hw_init(wl, wlvif); 2677 if (ret < 0) 2678 goto out; 2679 } 2680 2681 list_add(&wlvif->list, &wl->wlvif_list); 2682 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags); 2683 2684 if (wlvif->bss_type == BSS_TYPE_AP_BSS) 2685 wl->ap_count++; 2686 else 2687 wl->sta_count++; 2688 out: 2689 pm_runtime_put_autosuspend(wl->dev); 2690 out_unlock: 2691 mutex_unlock(&wl->mutex); 2692 2693 return ret; 2694 } 2695 2696 static void __wl1271_op_remove_interface(struct wl1271 *wl, 2697 struct ieee80211_vif *vif, 2698 bool reset_tx_queues) 2699 { 2700 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 2701 int i, ret; 2702 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); 2703 2704 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface"); 2705 2706 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) 2707 return; 2708 2709 /* because of hardware recovery, we may get here twice */ 2710 if (wl->state == WLCORE_STATE_OFF) 2711 return; 2712 2713 wl1271_info("down"); 2714 2715 if (wl->scan.state != WL1271_SCAN_STATE_IDLE && 2716 wl->scan_wlvif == wlvif) { 2717 struct cfg80211_scan_info info = { 2718 .aborted = true, 2719 }; 2720 2721 /* 2722 * Rearm the tx watchdog just before idling scan. This 2723 * prevents just-finished scans from triggering the watchdog 2724 */ 2725 wl12xx_rearm_tx_watchdog_locked(wl); 2726 2727 wl->scan.state = WL1271_SCAN_STATE_IDLE; 2728 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); 2729 wl->scan_wlvif = NULL; 2730 wl->scan.req = NULL; 2731 ieee80211_scan_completed(wl->hw, &info); 2732 } 2733 2734 if (wl->sched_vif == wlvif) 2735 wl->sched_vif = NULL; 2736 2737 if (wl->roc_vif == vif) { 2738 wl->roc_vif = NULL; 2739 ieee80211_remain_on_channel_expired(wl->hw); 2740 } 2741 2742 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) { 2743 /* disable active roles */ 2744 ret = pm_runtime_resume_and_get(wl->dev); 2745 if (ret < 0) 2746 goto deinit; 2747 2748 if (wlvif->bss_type == BSS_TYPE_STA_BSS || 2749 wlvif->bss_type == BSS_TYPE_IBSS) { 2750 if (wl12xx_dev_role_started(wlvif)) 2751 wl12xx_stop_dev(wl, wlvif); 2752 } 2753 2754 if (!wlcore_is_p2p_mgmt(wlvif)) { 2755 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id); 2756 if (ret < 0) { 2757 pm_runtime_put_noidle(wl->dev); 2758 goto deinit; 2759 } 2760 } else { 2761 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id); 2762 if (ret < 0) { 2763 pm_runtime_put_noidle(wl->dev); 2764 goto deinit; 2765 } 2766 } 2767 2768 pm_runtime_put_autosuspend(wl->dev); 2769 } 2770 deinit: 2771 wl12xx_tx_reset_wlvif(wl, wlvif); 2772 2773 /* clear all hlids (except system_hlid) */ 2774 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID; 2775 2776 if (wlvif->bss_type == BSS_TYPE_STA_BSS || 2777 wlvif->bss_type == BSS_TYPE_IBSS) { 2778 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID; 2779 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx); 2780 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx); 2781 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx); 2782 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id); 2783 } else { 2784 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID; 2785 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID; 2786 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx); 2787 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx); 2788 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++) 2789 wl12xx_free_rate_policy(wl, 2790 &wlvif->ap.ucast_rate_idx[i]); 2791 wl1271_free_ap_keys(wl, wlvif); 2792 } 2793 2794 dev_kfree_skb(wlvif->probereq); 2795 wlvif->probereq = NULL; 2796 if (wl->last_wlvif == wlvif) 2797 wl->last_wlvif = NULL; 2798 list_del(&wlvif->list); 2799 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map)); 2800 wlvif->role_id = WL12XX_INVALID_ROLE_ID; 2801 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID; 2802 2803 if (is_ap) 2804 wl->ap_count--; 2805 else 2806 wl->sta_count--; 2807 2808 /* 2809 * Last AP, have more stations. Configure sleep auth according to STA. 2810 * Don't do thin on unintended recovery. 2811 */ 2812 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) && 2813 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) 2814 goto unlock; 2815 2816 if (wl->ap_count == 0 && is_ap) { 2817 /* mask ap events */ 2818 wl->event_mask &= ~wl->ap_event_mask; 2819 wl1271_event_unmask(wl); 2820 } 2821 2822 if (wl->ap_count == 0 && is_ap && wl->sta_count) { 2823 u8 sta_auth = wl->conf.conn.sta_sleep_auth; 2824 /* Configure for power according to debugfs */ 2825 if (sta_auth != WL1271_PSM_ILLEGAL) 2826 wl1271_acx_sleep_auth(wl, sta_auth); 2827 /* Configure for ELP power saving */ 2828 else 2829 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP); 2830 } 2831 2832 unlock: 2833 mutex_unlock(&wl->mutex); 2834 2835 timer_delete_sync(&wlvif->rx_streaming_timer); 2836 cancel_work_sync(&wlvif->rx_streaming_enable_work); 2837 cancel_work_sync(&wlvif->rx_streaming_disable_work); 2838 cancel_work_sync(&wlvif->rc_update_work); 2839 cancel_delayed_work_sync(&wlvif->connection_loss_work); 2840 cancel_delayed_work_sync(&wlvif->channel_switch_work); 2841 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work); 2842 2843 mutex_lock(&wl->mutex); 2844 } 2845 2846 static void wl1271_op_remove_interface(struct ieee80211_hw *hw, 2847 struct ieee80211_vif *vif) 2848 { 2849 struct wl1271 *wl = hw->priv; 2850 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 2851 struct wl12xx_vif *iter; 2852 struct vif_counter_data vif_count; 2853 2854 wl12xx_get_vif_count(hw, vif, &vif_count); 2855 mutex_lock(&wl->mutex); 2856 2857 if (wl->state == WLCORE_STATE_OFF || 2858 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) 2859 goto out; 2860 2861 /* 2862 * wl->vif can be null here if someone shuts down the interface 2863 * just when hardware recovery has been started. 2864 */ 2865 wl12xx_for_each_wlvif(wl, iter) { 2866 if (iter != wlvif) 2867 continue; 2868 2869 __wl1271_op_remove_interface(wl, vif, true); 2870 break; 2871 } 2872 WARN_ON(iter != wlvif); 2873 if (wl12xx_need_fw_change(wl, vif_count, false)) { 2874 wl12xx_force_active_psm(wl); 2875 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); 2876 wl12xx_queue_recovery_work(wl); 2877 } 2878 out: 2879 mutex_unlock(&wl->mutex); 2880 } 2881 2882 static int wl12xx_op_change_interface(struct ieee80211_hw *hw, 2883 struct ieee80211_vif *vif, 2884 enum nl80211_iftype new_type, bool p2p) 2885 { 2886 struct wl1271 *wl = hw->priv; 2887 int ret; 2888 2889 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags); 2890 wl1271_op_remove_interface(hw, vif); 2891 2892 vif->type = new_type; 2893 vif->p2p = p2p; 2894 ret = wl1271_op_add_interface(hw, vif); 2895 2896 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags); 2897 return ret; 2898 } 2899 2900 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif) 2901 { 2902 int ret; 2903 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS); 2904 2905 /* 2906 * One of the side effects of the JOIN command is that is clears 2907 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated 2908 * to a WPA/WPA2 access point will therefore kill the data-path. 2909 * Currently the only valid scenario for JOIN during association 2910 * is on roaming, in which case we will also be given new keys. 2911 * Keep the below message for now, unless it starts bothering 2912 * users who really like to roam a lot :) 2913 */ 2914 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) 2915 wl1271_info("JOIN while associated."); 2916 2917 /* clear encryption type */ 2918 wlvif->encryption_type = KEY_NONE; 2919 2920 if (is_ibss) 2921 ret = wl12xx_cmd_role_start_ibss(wl, wlvif); 2922 else 2923 ret = wl12xx_cmd_role_start_sta(wl, wlvif); 2924 2925 return ret; 2926 } 2927 2928 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb, 2929 int offset) 2930 { 2931 u8 ssid_len; 2932 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset, 2933 skb->len - offset); 2934 2935 if (!ptr) { 2936 wl1271_error("No SSID in IEs!"); 2937 return -ENOENT; 2938 } 2939 2940 ssid_len = ptr[1]; 2941 if (ssid_len > IEEE80211_MAX_SSID_LEN) { 2942 wl1271_error("SSID is too long!"); 2943 return -EINVAL; 2944 } 2945 2946 wlvif->ssid_len = ssid_len; 2947 memcpy(wlvif->ssid, ptr+2, ssid_len); 2948 return 0; 2949 } 2950 2951 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif) 2952 { 2953 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); 2954 struct sk_buff *skb; 2955 int ieoffset; 2956 2957 /* we currently only support setting the ssid from the ap probe req */ 2958 if (wlvif->bss_type != BSS_TYPE_STA_BSS) 2959 return -EINVAL; 2960 2961 skb = ieee80211_ap_probereq_get(wl->hw, vif); 2962 if (!skb) 2963 return -EINVAL; 2964 2965 ieoffset = offsetof(struct ieee80211_mgmt, 2966 u.probe_req.variable); 2967 wl1271_ssid_set(wlvif, skb, ieoffset); 2968 dev_kfree_skb(skb); 2969 2970 return 0; 2971 } 2972 2973 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif, 2974 struct ieee80211_bss_conf *bss_conf, 2975 u32 sta_rate_set) 2976 { 2977 struct ieee80211_vif *vif = container_of(bss_conf, struct ieee80211_vif, 2978 bss_conf); 2979 int ieoffset; 2980 int ret; 2981 2982 wlvif->aid = vif->cfg.aid; 2983 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chanreq.oper); 2984 wlvif->beacon_int = bss_conf->beacon_int; 2985 wlvif->wmm_enabled = bss_conf->qos; 2986 2987 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags); 2988 2989 /* 2990 * with wl1271, we don't need to update the 2991 * beacon_int and dtim_period, because the firmware 2992 * updates it by itself when the first beacon is 2993 * received after a join. 2994 */ 2995 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid); 2996 if (ret < 0) 2997 return ret; 2998 2999 /* 3000 * Get a template for hardware connection maintenance 3001 */ 3002 dev_kfree_skb(wlvif->probereq); 3003 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl, 3004 wlvif, 3005 NULL); 3006 ieoffset = offsetof(struct ieee80211_mgmt, 3007 u.probe_req.variable); 3008 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset); 3009 3010 /* enable the connection monitoring feature */ 3011 ret = wl1271_acx_conn_monit_params(wl, wlvif, true); 3012 if (ret < 0) 3013 return ret; 3014 3015 /* 3016 * The join command disable the keep-alive mode, shut down its process, 3017 * and also clear the template config, so we need to reset it all after 3018 * the join. The acx_aid starts the keep-alive process, and the order 3019 * of the commands below is relevant. 3020 */ 3021 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true); 3022 if (ret < 0) 3023 return ret; 3024 3025 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid); 3026 if (ret < 0) 3027 return ret; 3028 3029 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif); 3030 if (ret < 0) 3031 return ret; 3032 3033 ret = wl1271_acx_keep_alive_config(wl, wlvif, 3034 wlvif->sta.klv_template_id, 3035 ACX_KEEP_ALIVE_TPL_VALID); 3036 if (ret < 0) 3037 return ret; 3038 3039 /* 3040 * The default fw psm configuration is AUTO, while mac80211 default 3041 * setting is off (ACTIVE), so sync the fw with the correct value. 3042 */ 3043 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE); 3044 if (ret < 0) 3045 return ret; 3046 3047 if (sta_rate_set) { 3048 wlvif->rate_set = 3049 wl1271_tx_enabled_rates_get(wl, 3050 sta_rate_set, 3051 wlvif->band); 3052 ret = wl1271_acx_sta_rate_policies(wl, wlvif); 3053 if (ret < 0) 3054 return ret; 3055 } 3056 3057 return ret; 3058 } 3059 3060 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif) 3061 { 3062 int ret; 3063 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS; 3064 3065 /* make sure we are connected (sta) joined */ 3066 if (sta && 3067 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) 3068 return false; 3069 3070 /* make sure we are joined (ibss) */ 3071 if (!sta && 3072 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) 3073 return false; 3074 3075 if (sta) { 3076 /* use defaults when not associated */ 3077 wlvif->aid = 0; 3078 3079 /* free probe-request template */ 3080 dev_kfree_skb(wlvif->probereq); 3081 wlvif->probereq = NULL; 3082 3083 /* disable connection monitor features */ 3084 ret = wl1271_acx_conn_monit_params(wl, wlvif, false); 3085 if (ret < 0) 3086 return ret; 3087 3088 /* Disable the keep-alive feature */ 3089 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false); 3090 if (ret < 0) 3091 return ret; 3092 3093 /* disable beacon filtering */ 3094 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false); 3095 if (ret < 0) 3096 return ret; 3097 } 3098 3099 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) { 3100 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); 3101 3102 wl12xx_cmd_stop_channel_switch(wl, wlvif); 3103 ieee80211_chswitch_done(vif, false, 0); 3104 cancel_delayed_work(&wlvif->channel_switch_work); 3105 } 3106 3107 /* invalidate keep-alive template */ 3108 wl1271_acx_keep_alive_config(wl, wlvif, 3109 wlvif->sta.klv_template_id, 3110 ACX_KEEP_ALIVE_TPL_INVALID); 3111 3112 return 0; 3113 } 3114 3115 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif) 3116 { 3117 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band]; 3118 wlvif->rate_set = wlvif->basic_rate_set; 3119 } 3120 3121 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif, 3122 bool idle) 3123 { 3124 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags); 3125 3126 if (idle == cur_idle) 3127 return; 3128 3129 if (idle) { 3130 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags); 3131 } else { 3132 /* The current firmware only supports sched_scan in idle */ 3133 if (wl->sched_vif == wlvif) 3134 wl->ops->sched_scan_stop(wl, wlvif); 3135 3136 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags); 3137 } 3138 } 3139 3140 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif, 3141 struct ieee80211_conf *conf, u32 changed) 3142 { 3143 int ret; 3144 3145 if (wlcore_is_p2p_mgmt(wlvif)) 3146 return 0; 3147 3148 if (conf->power_level != wlvif->power_level) { 3149 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level); 3150 if (ret < 0) 3151 return ret; 3152 3153 wlvif->power_level = conf->power_level; 3154 } 3155 3156 return 0; 3157 } 3158 3159 static int wl1271_op_config(struct ieee80211_hw *hw, int radio_idx, u32 changed) 3160 { 3161 struct wl1271 *wl = hw->priv; 3162 struct wl12xx_vif *wlvif; 3163 struct ieee80211_conf *conf = &hw->conf; 3164 int ret = 0; 3165 3166 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s" 3167 " changed 0x%x", 3168 conf->flags & IEEE80211_CONF_PS ? "on" : "off", 3169 conf->power_level, 3170 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use", 3171 changed); 3172 3173 mutex_lock(&wl->mutex); 3174 3175 if (changed & IEEE80211_CONF_CHANGE_POWER) 3176 wl->power_level = conf->power_level; 3177 3178 if (unlikely(wl->state != WLCORE_STATE_ON)) 3179 goto out; 3180 3181 ret = pm_runtime_resume_and_get(wl->dev); 3182 if (ret < 0) 3183 goto out; 3184 3185 /* configure each interface */ 3186 wl12xx_for_each_wlvif(wl, wlvif) { 3187 ret = wl12xx_config_vif(wl, wlvif, conf, changed); 3188 if (ret < 0) 3189 goto out_sleep; 3190 } 3191 3192 out_sleep: 3193 pm_runtime_put_autosuspend(wl->dev); 3194 3195 out: 3196 mutex_unlock(&wl->mutex); 3197 3198 return ret; 3199 } 3200 3201 struct wl1271_filter_params { 3202 bool enabled; 3203 int mc_list_length; 3204 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN]; 3205 }; 3206 3207 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, 3208 struct netdev_hw_addr_list *mc_list) 3209 { 3210 struct wl1271_filter_params *fp; 3211 struct netdev_hw_addr *ha; 3212 3213 fp = kzalloc(sizeof(*fp), GFP_ATOMIC); 3214 if (!fp) { 3215 wl1271_error("Out of memory setting filters."); 3216 return 0; 3217 } 3218 3219 /* update multicast filtering parameters */ 3220 fp->mc_list_length = 0; 3221 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) { 3222 fp->enabled = false; 3223 } else { 3224 fp->enabled = true; 3225 netdev_hw_addr_list_for_each(ha, mc_list) { 3226 memcpy(fp->mc_list[fp->mc_list_length], 3227 ha->addr, ETH_ALEN); 3228 fp->mc_list_length++; 3229 } 3230 } 3231 3232 return (u64)(unsigned long)fp; 3233 } 3234 3235 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \ 3236 FIF_FCSFAIL | \ 3237 FIF_BCN_PRBRESP_PROMISC | \ 3238 FIF_CONTROL | \ 3239 FIF_OTHER_BSS) 3240 3241 static void wl1271_op_configure_filter(struct ieee80211_hw *hw, 3242 unsigned int changed, 3243 unsigned int *total, u64 multicast) 3244 { 3245 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast; 3246 struct wl1271 *wl = hw->priv; 3247 struct wl12xx_vif *wlvif; 3248 3249 int ret; 3250 3251 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x" 3252 " total %x", changed, *total); 3253 3254 mutex_lock(&wl->mutex); 3255 3256 *total &= WL1271_SUPPORTED_FILTERS; 3257 changed &= WL1271_SUPPORTED_FILTERS; 3258 3259 if (unlikely(wl->state != WLCORE_STATE_ON)) 3260 goto out; 3261 3262 ret = pm_runtime_resume_and_get(wl->dev); 3263 if (ret < 0) 3264 goto out; 3265 3266 wl12xx_for_each_wlvif(wl, wlvif) { 3267 if (wlcore_is_p2p_mgmt(wlvif)) 3268 continue; 3269 3270 if (wlvif->bss_type != BSS_TYPE_AP_BSS) { 3271 if (*total & FIF_ALLMULTI) 3272 ret = wl1271_acx_group_address_tbl(wl, wlvif, 3273 false, 3274 NULL, 0); 3275 else if (fp) 3276 ret = wl1271_acx_group_address_tbl(wl, wlvif, 3277 fp->enabled, 3278 fp->mc_list, 3279 fp->mc_list_length); 3280 if (ret < 0) 3281 goto out_sleep; 3282 } 3283 3284 /* 3285 * If interface in AP mode and created with allmulticast then disable 3286 * the firmware filters so that all multicast packets are passed 3287 * This is mandatory for MDNS based discovery protocols 3288 */ 3289 if (wlvif->bss_type == BSS_TYPE_AP_BSS) { 3290 if (*total & FIF_ALLMULTI) { 3291 ret = wl1271_acx_group_address_tbl(wl, wlvif, 3292 false, 3293 NULL, 0); 3294 if (ret < 0) 3295 goto out_sleep; 3296 } 3297 } 3298 } 3299 3300 /* 3301 * the fw doesn't provide an api to configure the filters. instead, 3302 * the filters configuration is based on the active roles / ROC 3303 * state. 3304 */ 3305 3306 out_sleep: 3307 pm_runtime_put_autosuspend(wl->dev); 3308 3309 out: 3310 mutex_unlock(&wl->mutex); 3311 kfree(fp); 3312 } 3313 3314 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, 3315 u8 id, u8 key_type, u8 key_size, 3316 const u8 *key, u8 hlid, u32 tx_seq_32, 3317 u16 tx_seq_16, bool is_pairwise) 3318 { 3319 struct wl1271_ap_key *ap_key; 3320 int i; 3321 3322 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id); 3323 3324 if (key_size > MAX_KEY_SIZE) 3325 return -EINVAL; 3326 3327 /* 3328 * Find next free entry in ap_keys. Also check we are not replacing 3329 * an existing key. 3330 */ 3331 for (i = 0; i < MAX_NUM_KEYS; i++) { 3332 if (wlvif->ap.recorded_keys[i] == NULL) 3333 break; 3334 3335 if (wlvif->ap.recorded_keys[i]->id == id) { 3336 wl1271_warning("trying to record key replacement"); 3337 return -EINVAL; 3338 } 3339 } 3340 3341 if (i == MAX_NUM_KEYS) 3342 return -EBUSY; 3343 3344 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL); 3345 if (!ap_key) 3346 return -ENOMEM; 3347 3348 ap_key->id = id; 3349 ap_key->key_type = key_type; 3350 ap_key->key_size = key_size; 3351 memcpy(ap_key->key, key, key_size); 3352 ap_key->hlid = hlid; 3353 ap_key->tx_seq_32 = tx_seq_32; 3354 ap_key->tx_seq_16 = tx_seq_16; 3355 ap_key->is_pairwise = is_pairwise; 3356 3357 wlvif->ap.recorded_keys[i] = ap_key; 3358 return 0; 3359 } 3360 3361 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif) 3362 { 3363 int i; 3364 3365 for (i = 0; i < MAX_NUM_KEYS; i++) { 3366 kfree(wlvif->ap.recorded_keys[i]); 3367 wlvif->ap.recorded_keys[i] = NULL; 3368 } 3369 } 3370 3371 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif) 3372 { 3373 int i, ret = 0; 3374 struct wl1271_ap_key *key; 3375 bool wep_key_added = false; 3376 3377 for (i = 0; i < MAX_NUM_KEYS; i++) { 3378 u8 hlid; 3379 if (wlvif->ap.recorded_keys[i] == NULL) 3380 break; 3381 3382 key = wlvif->ap.recorded_keys[i]; 3383 hlid = key->hlid; 3384 if (hlid == WL12XX_INVALID_LINK_ID) 3385 hlid = wlvif->ap.bcast_hlid; 3386 3387 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE, 3388 key->id, key->key_type, 3389 key->key_size, key->key, 3390 hlid, key->tx_seq_32, 3391 key->tx_seq_16, key->is_pairwise); 3392 if (ret < 0) 3393 goto out; 3394 3395 if (key->key_type == KEY_WEP) 3396 wep_key_added = true; 3397 } 3398 3399 if (wep_key_added) { 3400 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key, 3401 wlvif->ap.bcast_hlid); 3402 if (ret < 0) 3403 goto out; 3404 } 3405 3406 out: 3407 wl1271_free_ap_keys(wl, wlvif); 3408 return ret; 3409 } 3410 3411 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif, 3412 u16 action, u8 id, u8 key_type, 3413 u8 key_size, const u8 *key, u32 tx_seq_32, 3414 u16 tx_seq_16, struct ieee80211_sta *sta, 3415 bool is_pairwise) 3416 { 3417 int ret; 3418 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); 3419 3420 if (is_ap) { 3421 struct wl1271_station *wl_sta; 3422 u8 hlid; 3423 3424 if (sta) { 3425 wl_sta = (struct wl1271_station *)sta->drv_priv; 3426 hlid = wl_sta->hlid; 3427 } else { 3428 hlid = wlvif->ap.bcast_hlid; 3429 } 3430 3431 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) { 3432 /* 3433 * We do not support removing keys after AP shutdown. 3434 * Pretend we do to make mac80211 happy. 3435 */ 3436 if (action != KEY_ADD_OR_REPLACE) 3437 return 0; 3438 3439 ret = wl1271_record_ap_key(wl, wlvif, id, 3440 key_type, key_size, 3441 key, hlid, tx_seq_32, 3442 tx_seq_16, is_pairwise); 3443 } else { 3444 ret = wl1271_cmd_set_ap_key(wl, wlvif, action, 3445 id, key_type, key_size, 3446 key, hlid, tx_seq_32, 3447 tx_seq_16, is_pairwise); 3448 } 3449 3450 if (ret < 0) 3451 return ret; 3452 } else { 3453 const u8 *addr; 3454 static const u8 bcast_addr[ETH_ALEN] = { 3455 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 3456 }; 3457 3458 addr = sta ? sta->addr : bcast_addr; 3459 3460 if (is_zero_ether_addr(addr)) { 3461 /* We dont support TX only encryption */ 3462 return -EOPNOTSUPP; 3463 } 3464 3465 /* The wl1271 does not allow to remove unicast keys - they 3466 will be cleared automatically on next CMD_JOIN. Ignore the 3467 request silently, as we dont want the mac80211 to emit 3468 an error message. */ 3469 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr)) 3470 return 0; 3471 3472 /* don't remove key if hlid was already deleted */ 3473 if (action == KEY_REMOVE && 3474 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) 3475 return 0; 3476 3477 ret = wl1271_cmd_set_sta_key(wl, wlvif, action, 3478 id, key_type, key_size, 3479 key, addr, tx_seq_32, 3480 tx_seq_16); 3481 if (ret < 0) 3482 return ret; 3483 3484 } 3485 3486 return 0; 3487 } 3488 3489 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 3490 struct ieee80211_vif *vif, 3491 struct ieee80211_sta *sta, 3492 struct ieee80211_key_conf *key_conf) 3493 { 3494 struct wl1271 *wl = hw->priv; 3495 int ret; 3496 bool might_change_spare = 3497 key_conf->cipher == WL1271_CIPHER_SUITE_GEM || 3498 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP; 3499 3500 if (might_change_spare) { 3501 /* 3502 * stop the queues and flush to ensure the next packets are 3503 * in sync with FW spare block accounting 3504 */ 3505 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK); 3506 wl1271_tx_flush(wl); 3507 } 3508 3509 mutex_lock(&wl->mutex); 3510 3511 if (unlikely(wl->state != WLCORE_STATE_ON)) { 3512 ret = -EAGAIN; 3513 goto out_wake_queues; 3514 } 3515 3516 ret = pm_runtime_resume_and_get(wl->dev); 3517 if (ret < 0) 3518 goto out_wake_queues; 3519 3520 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf); 3521 3522 pm_runtime_put_autosuspend(wl->dev); 3523 3524 out_wake_queues: 3525 if (might_change_spare) 3526 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK); 3527 3528 mutex_unlock(&wl->mutex); 3529 3530 return ret; 3531 } 3532 3533 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd, 3534 struct ieee80211_vif *vif, 3535 struct ieee80211_sta *sta, 3536 struct ieee80211_key_conf *key_conf) 3537 { 3538 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 3539 int ret; 3540 u32 tx_seq_32 = 0; 3541 u16 tx_seq_16 = 0; 3542 u8 key_type; 3543 u8 hlid; 3544 bool is_pairwise; 3545 3546 wl1271_debug(DEBUG_MAC80211, "mac80211 set key"); 3547 3548 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta); 3549 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x", 3550 key_conf->cipher, key_conf->keyidx, 3551 key_conf->keylen, key_conf->flags); 3552 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen); 3553 3554 if (wlvif->bss_type == BSS_TYPE_AP_BSS) 3555 if (sta) { 3556 struct wl1271_station *wl_sta = (void *)sta->drv_priv; 3557 hlid = wl_sta->hlid; 3558 } else { 3559 hlid = wlvif->ap.bcast_hlid; 3560 } 3561 else 3562 hlid = wlvif->sta.hlid; 3563 3564 if (hlid != WL12XX_INVALID_LINK_ID) { 3565 u64 tx_seq = wl->links[hlid].total_freed_pkts; 3566 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq); 3567 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq); 3568 } 3569 3570 switch (key_conf->cipher) { 3571 case WLAN_CIPHER_SUITE_WEP40: 3572 case WLAN_CIPHER_SUITE_WEP104: 3573 key_type = KEY_WEP; 3574 3575 key_conf->hw_key_idx = key_conf->keyidx; 3576 break; 3577 case WLAN_CIPHER_SUITE_TKIP: 3578 key_type = KEY_TKIP; 3579 key_conf->hw_key_idx = key_conf->keyidx; 3580 break; 3581 case WLAN_CIPHER_SUITE_CCMP: 3582 key_type = KEY_AES; 3583 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; 3584 break; 3585 case WL1271_CIPHER_SUITE_GEM: 3586 key_type = KEY_GEM; 3587 break; 3588 default: 3589 wl1271_error("Unknown key algo 0x%x", key_conf->cipher); 3590 3591 return -EOPNOTSUPP; 3592 } 3593 3594 is_pairwise = key_conf->flags & IEEE80211_KEY_FLAG_PAIRWISE; 3595 3596 switch (cmd) { 3597 case SET_KEY: 3598 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE, 3599 key_conf->keyidx, key_type, 3600 key_conf->keylen, key_conf->key, 3601 tx_seq_32, tx_seq_16, sta, is_pairwise); 3602 if (ret < 0) { 3603 wl1271_error("Could not add or replace key"); 3604 return ret; 3605 } 3606 3607 /* Store AP encryption key type */ 3608 if (wlvif->bss_type == BSS_TYPE_AP_BSS) 3609 wlvif->encryption_type = key_type; 3610 3611 /* 3612 * reconfiguring arp response if the unicast (or common) 3613 * encryption key type was changed 3614 */ 3615 if (wlvif->bss_type == BSS_TYPE_STA_BSS && 3616 (sta || key_type == KEY_WEP) && 3617 wlvif->encryption_type != key_type) { 3618 wlvif->encryption_type = key_type; 3619 ret = wl1271_cmd_build_arp_rsp(wl, wlvif); 3620 if (ret < 0) { 3621 wl1271_warning("build arp rsp failed: %d", ret); 3622 return ret; 3623 } 3624 } 3625 break; 3626 3627 case DISABLE_KEY: 3628 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE, 3629 key_conf->keyidx, key_type, 3630 key_conf->keylen, key_conf->key, 3631 0, 0, sta, is_pairwise); 3632 if (ret < 0) { 3633 wl1271_error("Could not remove key"); 3634 return ret; 3635 } 3636 break; 3637 3638 default: 3639 wl1271_error("Unsupported key cmd 0x%x", cmd); 3640 return -EOPNOTSUPP; 3641 } 3642 3643 return ret; 3644 } 3645 EXPORT_SYMBOL_GPL(wlcore_set_key); 3646 3647 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw, 3648 struct ieee80211_vif *vif, 3649 int key_idx) 3650 { 3651 struct wl1271 *wl = hw->priv; 3652 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 3653 int ret; 3654 3655 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d", 3656 key_idx); 3657 3658 /* we don't handle unsetting of default key */ 3659 if (key_idx == -1) 3660 return; 3661 3662 mutex_lock(&wl->mutex); 3663 3664 if (unlikely(wl->state != WLCORE_STATE_ON)) { 3665 ret = -EAGAIN; 3666 goto out_unlock; 3667 } 3668 3669 ret = pm_runtime_resume_and_get(wl->dev); 3670 if (ret < 0) 3671 goto out_unlock; 3672 3673 wlvif->default_key = key_idx; 3674 3675 /* the default WEP key needs to be configured at least once */ 3676 if (wlvif->encryption_type == KEY_WEP) { 3677 ret = wl12xx_cmd_set_default_wep_key(wl, 3678 key_idx, 3679 wlvif->sta.hlid); 3680 if (ret < 0) 3681 goto out_sleep; 3682 } 3683 3684 out_sleep: 3685 pm_runtime_put_autosuspend(wl->dev); 3686 3687 out_unlock: 3688 mutex_unlock(&wl->mutex); 3689 } 3690 3691 void wlcore_regdomain_config(struct wl1271 *wl) 3692 { 3693 int ret; 3694 3695 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF)) 3696 return; 3697 3698 mutex_lock(&wl->mutex); 3699 3700 if (unlikely(wl->state != WLCORE_STATE_ON)) 3701 goto out; 3702 3703 ret = pm_runtime_resume_and_get(wl->dev); 3704 if (ret < 0) 3705 goto out; 3706 3707 ret = wlcore_cmd_regdomain_config_locked(wl); 3708 if (ret < 0) { 3709 wl12xx_queue_recovery_work(wl); 3710 goto out; 3711 } 3712 3713 pm_runtime_put_autosuspend(wl->dev); 3714 out: 3715 mutex_unlock(&wl->mutex); 3716 } 3717 3718 static int wl1271_op_hw_scan(struct ieee80211_hw *hw, 3719 struct ieee80211_vif *vif, 3720 struct ieee80211_scan_request *hw_req) 3721 { 3722 struct cfg80211_scan_request *req = &hw_req->req; 3723 struct wl1271 *wl = hw->priv; 3724 int ret; 3725 u8 *ssid = NULL; 3726 size_t len = 0; 3727 3728 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan"); 3729 3730 if (req->n_ssids) { 3731 ssid = req->ssids[0].ssid; 3732 len = req->ssids[0].ssid_len; 3733 } 3734 3735 mutex_lock(&wl->mutex); 3736 3737 if (unlikely(wl->state != WLCORE_STATE_ON)) { 3738 /* 3739 * We cannot return -EBUSY here because cfg80211 will expect 3740 * a call to ieee80211_scan_completed if we do - in this case 3741 * there won't be any call. 3742 */ 3743 ret = -EAGAIN; 3744 goto out; 3745 } 3746 3747 ret = pm_runtime_resume_and_get(wl->dev); 3748 if (ret < 0) 3749 goto out; 3750 3751 /* fail if there is any role in ROC */ 3752 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) { 3753 /* don't allow scanning right now */ 3754 ret = -EBUSY; 3755 goto out_sleep; 3756 } 3757 3758 ret = wlcore_scan(hw->priv, vif, ssid, len, req); 3759 out_sleep: 3760 pm_runtime_put_autosuspend(wl->dev); 3761 out: 3762 mutex_unlock(&wl->mutex); 3763 3764 return ret; 3765 } 3766 3767 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw, 3768 struct ieee80211_vif *vif) 3769 { 3770 struct wl1271 *wl = hw->priv; 3771 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 3772 struct cfg80211_scan_info info = { 3773 .aborted = true, 3774 }; 3775 int ret; 3776 3777 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan"); 3778 3779 mutex_lock(&wl->mutex); 3780 3781 if (unlikely(wl->state != WLCORE_STATE_ON)) 3782 goto out; 3783 3784 if (wl->scan.state == WL1271_SCAN_STATE_IDLE) 3785 goto out; 3786 3787 ret = pm_runtime_resume_and_get(wl->dev); 3788 if (ret < 0) 3789 goto out; 3790 3791 if (wl->scan.state != WL1271_SCAN_STATE_DONE) { 3792 ret = wl->ops->scan_stop(wl, wlvif); 3793 if (ret < 0) 3794 goto out_sleep; 3795 } 3796 3797 /* 3798 * Rearm the tx watchdog just before idling scan. This 3799 * prevents just-finished scans from triggering the watchdog 3800 */ 3801 wl12xx_rearm_tx_watchdog_locked(wl); 3802 3803 wl->scan.state = WL1271_SCAN_STATE_IDLE; 3804 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); 3805 wl->scan_wlvif = NULL; 3806 wl->scan.req = NULL; 3807 ieee80211_scan_completed(wl->hw, &info); 3808 3809 out_sleep: 3810 pm_runtime_put_autosuspend(wl->dev); 3811 out: 3812 mutex_unlock(&wl->mutex); 3813 3814 cancel_delayed_work_sync(&wl->scan_complete_work); 3815 } 3816 3817 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw, 3818 struct ieee80211_vif *vif, 3819 struct cfg80211_sched_scan_request *req, 3820 struct ieee80211_scan_ies *ies) 3821 { 3822 struct wl1271 *wl = hw->priv; 3823 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 3824 int ret; 3825 3826 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start"); 3827 3828 mutex_lock(&wl->mutex); 3829 3830 if (unlikely(wl->state != WLCORE_STATE_ON)) { 3831 ret = -EAGAIN; 3832 goto out; 3833 } 3834 3835 ret = pm_runtime_resume_and_get(wl->dev); 3836 if (ret < 0) 3837 goto out; 3838 3839 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies); 3840 if (ret < 0) 3841 goto out_sleep; 3842 3843 wl->sched_vif = wlvif; 3844 3845 out_sleep: 3846 pm_runtime_put_autosuspend(wl->dev); 3847 out: 3848 mutex_unlock(&wl->mutex); 3849 return ret; 3850 } 3851 3852 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw, 3853 struct ieee80211_vif *vif) 3854 { 3855 struct wl1271 *wl = hw->priv; 3856 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 3857 int ret; 3858 3859 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop"); 3860 3861 mutex_lock(&wl->mutex); 3862 3863 if (unlikely(wl->state != WLCORE_STATE_ON)) 3864 goto out; 3865 3866 ret = pm_runtime_resume_and_get(wl->dev); 3867 if (ret < 0) 3868 goto out; 3869 3870 wl->ops->sched_scan_stop(wl, wlvif); 3871 3872 pm_runtime_put_autosuspend(wl->dev); 3873 out: 3874 mutex_unlock(&wl->mutex); 3875 3876 return 0; 3877 } 3878 3879 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, 3880 int radio_idx, u32 value) 3881 { 3882 struct wl1271 *wl = hw->priv; 3883 int ret = 0; 3884 3885 mutex_lock(&wl->mutex); 3886 3887 if (unlikely(wl->state != WLCORE_STATE_ON)) { 3888 ret = -EAGAIN; 3889 goto out; 3890 } 3891 3892 ret = pm_runtime_resume_and_get(wl->dev); 3893 if (ret < 0) 3894 goto out; 3895 3896 ret = wl1271_acx_frag_threshold(wl, value); 3897 if (ret < 0) 3898 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret); 3899 3900 pm_runtime_put_autosuspend(wl->dev); 3901 3902 out: 3903 mutex_unlock(&wl->mutex); 3904 3905 return ret; 3906 } 3907 3908 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx, 3909 u32 value) 3910 { 3911 struct wl1271 *wl = hw->priv; 3912 struct wl12xx_vif *wlvif; 3913 int ret = 0; 3914 3915 mutex_lock(&wl->mutex); 3916 3917 if (unlikely(wl->state != WLCORE_STATE_ON)) { 3918 ret = -EAGAIN; 3919 goto out; 3920 } 3921 3922 ret = pm_runtime_resume_and_get(wl->dev); 3923 if (ret < 0) 3924 goto out; 3925 3926 wl12xx_for_each_wlvif(wl, wlvif) { 3927 ret = wl1271_acx_rts_threshold(wl, wlvif, value); 3928 if (ret < 0) 3929 wl1271_warning("set rts threshold failed: %d", ret); 3930 } 3931 pm_runtime_put_autosuspend(wl->dev); 3932 3933 out: 3934 mutex_unlock(&wl->mutex); 3935 3936 return ret; 3937 } 3938 3939 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset) 3940 { 3941 int len; 3942 const u8 *next, *end = skb->data + skb->len; 3943 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset, 3944 skb->len - ieoffset); 3945 if (!ie) 3946 return; 3947 len = ie[1] + 2; 3948 next = ie + len; 3949 memmove(ie, next, end - next); 3950 skb_trim(skb, skb->len - len); 3951 } 3952 3953 static void wl12xx_remove_vendor_ie(struct sk_buff *skb, 3954 unsigned int oui, u8 oui_type, 3955 int ieoffset) 3956 { 3957 int len; 3958 const u8 *next, *end = skb->data + skb->len; 3959 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, 3960 skb->data + ieoffset, 3961 skb->len - ieoffset); 3962 if (!ie) 3963 return; 3964 len = ie[1] + 2; 3965 next = ie + len; 3966 memmove(ie, next, end - next); 3967 skb_trim(skb, skb->len - len); 3968 } 3969 3970 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates, 3971 struct ieee80211_vif *vif) 3972 { 3973 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 3974 struct sk_buff *skb; 3975 int ret; 3976 3977 skb = ieee80211_proberesp_get(wl->hw, vif); 3978 if (!skb) 3979 return -EOPNOTSUPP; 3980 3981 ret = wl1271_cmd_template_set(wl, wlvif->role_id, 3982 CMD_TEMPL_AP_PROBE_RESPONSE, 3983 skb->data, 3984 skb->len, 0, 3985 rates); 3986 dev_kfree_skb(skb); 3987 3988 if (ret < 0) 3989 goto out; 3990 3991 wl1271_debug(DEBUG_AP, "probe response updated"); 3992 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags); 3993 3994 out: 3995 return ret; 3996 } 3997 3998 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl, 3999 struct ieee80211_vif *vif, 4000 u8 *probe_rsp_data, 4001 size_t probe_rsp_len, 4002 u32 rates) 4003 { 4004 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 4005 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE]; 4006 int ssid_ie_offset, ie_offset, templ_len; 4007 const u8 *ptr; 4008 4009 /* no need to change probe response if the SSID is set correctly */ 4010 if (wlvif->ssid_len > 0) 4011 return wl1271_cmd_template_set(wl, wlvif->role_id, 4012 CMD_TEMPL_AP_PROBE_RESPONSE, 4013 probe_rsp_data, 4014 probe_rsp_len, 0, 4015 rates); 4016 4017 if (probe_rsp_len + vif->cfg.ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) { 4018 wl1271_error("probe_rsp template too big"); 4019 return -EINVAL; 4020 } 4021 4022 /* start searching from IE offset */ 4023 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); 4024 4025 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset, 4026 probe_rsp_len - ie_offset); 4027 if (!ptr) { 4028 wl1271_error("No SSID in beacon!"); 4029 return -EINVAL; 4030 } 4031 4032 ssid_ie_offset = ptr - probe_rsp_data; 4033 ptr += (ptr[1] + 2); 4034 4035 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset); 4036 4037 /* insert SSID from bss_conf */ 4038 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID; 4039 probe_rsp_templ[ssid_ie_offset + 1] = vif->cfg.ssid_len; 4040 memcpy(probe_rsp_templ + ssid_ie_offset + 2, 4041 vif->cfg.ssid, vif->cfg.ssid_len); 4042 templ_len = ssid_ie_offset + 2 + vif->cfg.ssid_len; 4043 4044 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + vif->cfg.ssid_len, 4045 ptr, probe_rsp_len - (ptr - probe_rsp_data)); 4046 templ_len += probe_rsp_len - (ptr - probe_rsp_data); 4047 4048 return wl1271_cmd_template_set(wl, wlvif->role_id, 4049 CMD_TEMPL_AP_PROBE_RESPONSE, 4050 probe_rsp_templ, 4051 templ_len, 0, 4052 rates); 4053 } 4054 4055 static int wl1271_bss_erp_info_changed(struct wl1271 *wl, 4056 struct ieee80211_vif *vif, 4057 struct ieee80211_bss_conf *bss_conf, 4058 u32 changed) 4059 { 4060 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 4061 int ret = 0; 4062 4063 if (changed & BSS_CHANGED_ERP_SLOT) { 4064 if (bss_conf->use_short_slot) 4065 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT); 4066 else 4067 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG); 4068 if (ret < 0) { 4069 wl1271_warning("Set slot time failed %d", ret); 4070 goto out; 4071 } 4072 } 4073 4074 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 4075 if (bss_conf->use_short_preamble) 4076 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT); 4077 else 4078 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG); 4079 } 4080 4081 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 4082 if (bss_conf->use_cts_prot) 4083 ret = wl1271_acx_cts_protect(wl, wlvif, 4084 CTSPROTECT_ENABLE); 4085 else 4086 ret = wl1271_acx_cts_protect(wl, wlvif, 4087 CTSPROTECT_DISABLE); 4088 if (ret < 0) { 4089 wl1271_warning("Set ctsprotect failed %d", ret); 4090 goto out; 4091 } 4092 } 4093 4094 out: 4095 return ret; 4096 } 4097 4098 static int wlcore_set_beacon_template(struct wl1271 *wl, 4099 struct ieee80211_vif *vif, 4100 bool is_ap) 4101 { 4102 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 4103 struct ieee80211_hdr *hdr; 4104 u32 min_rate; 4105 int ret; 4106 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable); 4107 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif, 0); 4108 u16 tmpl_id; 4109 4110 if (!beacon) { 4111 ret = -EINVAL; 4112 goto out; 4113 } 4114 4115 wl1271_debug(DEBUG_MASTER, "beacon updated"); 4116 4117 ret = wl1271_ssid_set(wlvif, beacon, ieoffset); 4118 if (ret < 0) { 4119 dev_kfree_skb(beacon); 4120 goto out; 4121 } 4122 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); 4123 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON : 4124 CMD_TEMPL_BEACON; 4125 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id, 4126 beacon->data, 4127 beacon->len, 0, 4128 min_rate); 4129 if (ret < 0) { 4130 dev_kfree_skb(beacon); 4131 goto out; 4132 } 4133 4134 wlvif->wmm_enabled = 4135 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, 4136 WLAN_OUI_TYPE_MICROSOFT_WMM, 4137 beacon->data + ieoffset, 4138 beacon->len - ieoffset); 4139 4140 /* 4141 * In case we already have a probe-resp beacon set explicitly 4142 * by usermode, don't use the beacon data. 4143 */ 4144 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags)) 4145 goto end_bcn; 4146 4147 /* remove TIM ie from probe response */ 4148 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset); 4149 4150 /* 4151 * remove p2p ie from probe response. 4152 * the fw reponds to probe requests that don't include 4153 * the p2p ie. probe requests with p2p ie will be passed, 4154 * and will be responded by the supplicant (the spec 4155 * forbids including the p2p ie when responding to probe 4156 * requests that didn't include it). 4157 */ 4158 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA, 4159 WLAN_OUI_TYPE_WFA_P2P, ieoffset); 4160 4161 hdr = (struct ieee80211_hdr *) beacon->data; 4162 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 4163 IEEE80211_STYPE_PROBE_RESP); 4164 if (is_ap) 4165 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif, 4166 beacon->data, 4167 beacon->len, 4168 min_rate); 4169 else 4170 ret = wl1271_cmd_template_set(wl, wlvif->role_id, 4171 CMD_TEMPL_PROBE_RESPONSE, 4172 beacon->data, 4173 beacon->len, 0, 4174 min_rate); 4175 end_bcn: 4176 dev_kfree_skb(beacon); 4177 if (ret < 0) 4178 goto out; 4179 4180 out: 4181 return ret; 4182 } 4183 4184 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl, 4185 struct ieee80211_vif *vif, 4186 struct ieee80211_bss_conf *bss_conf, 4187 u32 changed) 4188 { 4189 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 4190 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); 4191 int ret = 0; 4192 4193 if (changed & BSS_CHANGED_BEACON_INT) { 4194 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d", 4195 bss_conf->beacon_int); 4196 4197 wlvif->beacon_int = bss_conf->beacon_int; 4198 } 4199 4200 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) { 4201 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); 4202 4203 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif); 4204 } 4205 4206 if (changed & BSS_CHANGED_BEACON) { 4207 ret = wlcore_set_beacon_template(wl, vif, is_ap); 4208 if (ret < 0) 4209 goto out; 4210 4211 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED, 4212 &wlvif->flags)) { 4213 ret = wlcore_hw_dfs_master_restart(wl, wlvif); 4214 if (ret < 0) 4215 goto out; 4216 } 4217 } 4218 out: 4219 if (ret != 0) 4220 wl1271_error("beacon info change failed: %d", ret); 4221 return ret; 4222 } 4223 4224 /* AP mode changes */ 4225 static void wl1271_bss_info_changed_ap(struct wl1271 *wl, 4226 struct ieee80211_vif *vif, 4227 struct ieee80211_bss_conf *bss_conf, 4228 u32 changed) 4229 { 4230 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 4231 int ret = 0; 4232 4233 if (changed & BSS_CHANGED_BASIC_RATES) { 4234 u32 rates = bss_conf->basic_rates; 4235 4236 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates, 4237 wlvif->band); 4238 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, 4239 wlvif->basic_rate_set); 4240 4241 ret = wl1271_init_ap_rates(wl, wlvif); 4242 if (ret < 0) { 4243 wl1271_error("AP rate policy change failed %d", ret); 4244 goto out; 4245 } 4246 4247 ret = wl1271_ap_init_templates(wl, vif); 4248 if (ret < 0) 4249 goto out; 4250 4251 /* No need to set probe resp template for mesh */ 4252 if (!ieee80211_vif_is_mesh(vif)) { 4253 ret = wl1271_ap_set_probe_resp_tmpl(wl, 4254 wlvif->basic_rate, 4255 vif); 4256 if (ret < 0) 4257 goto out; 4258 } 4259 4260 ret = wlcore_set_beacon_template(wl, vif, true); 4261 if (ret < 0) 4262 goto out; 4263 } 4264 4265 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed); 4266 if (ret < 0) 4267 goto out; 4268 4269 if (changed & BSS_CHANGED_BEACON_ENABLED) { 4270 if (bss_conf->enable_beacon) { 4271 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) { 4272 ret = wl12xx_cmd_role_start_ap(wl, wlvif); 4273 if (ret < 0) 4274 goto out; 4275 4276 ret = wl1271_ap_init_hwenc(wl, wlvif); 4277 if (ret < 0) 4278 goto out; 4279 4280 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags); 4281 wl1271_debug(DEBUG_AP, "started AP"); 4282 } 4283 } else { 4284 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) { 4285 /* 4286 * AP might be in ROC in case we have just 4287 * sent auth reply. handle it. 4288 */ 4289 if (test_bit(wlvif->role_id, wl->roc_map)) 4290 wl12xx_croc(wl, wlvif->role_id); 4291 4292 ret = wl12xx_cmd_role_stop_ap(wl, wlvif); 4293 if (ret < 0) 4294 goto out; 4295 4296 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags); 4297 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, 4298 &wlvif->flags); 4299 wl1271_debug(DEBUG_AP, "stopped AP"); 4300 } 4301 } 4302 } 4303 4304 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed); 4305 if (ret < 0) 4306 goto out; 4307 4308 /* Handle HT information change */ 4309 if ((changed & BSS_CHANGED_HT) && 4310 (bss_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT)) { 4311 ret = wl1271_acx_set_ht_information(wl, wlvif, 4312 bss_conf->ht_operation_mode); 4313 if (ret < 0) { 4314 wl1271_warning("Set ht information failed %d", ret); 4315 goto out; 4316 } 4317 } 4318 4319 out: 4320 return; 4321 } 4322 4323 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif, 4324 struct ieee80211_vif *vif, u32 sta_rate_set) 4325 { 4326 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 4327 u32 rates; 4328 int ret; 4329 4330 wl1271_debug(DEBUG_MAC80211, 4331 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x", 4332 bss_conf->bssid, vif->cfg.aid, 4333 bss_conf->beacon_int, 4334 bss_conf->basic_rates, sta_rate_set); 4335 4336 wlvif->beacon_int = bss_conf->beacon_int; 4337 rates = bss_conf->basic_rates; 4338 wlvif->basic_rate_set = 4339 wl1271_tx_enabled_rates_get(wl, rates, 4340 wlvif->band); 4341 wlvif->basic_rate = 4342 wl1271_tx_min_rate_get(wl, 4343 wlvif->basic_rate_set); 4344 4345 if (sta_rate_set) 4346 wlvif->rate_set = 4347 wl1271_tx_enabled_rates_get(wl, 4348 sta_rate_set, 4349 wlvif->band); 4350 4351 /* we only support sched_scan while not connected */ 4352 if (wl->sched_vif == wlvif) 4353 wl->ops->sched_scan_stop(wl, wlvif); 4354 4355 ret = wl1271_acx_sta_rate_policies(wl, wlvif); 4356 if (ret < 0) 4357 return ret; 4358 4359 ret = wl12xx_cmd_build_null_data(wl, wlvif); 4360 if (ret < 0) 4361 return ret; 4362 4363 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif)); 4364 if (ret < 0) 4365 return ret; 4366 4367 wlcore_set_ssid(wl, wlvif); 4368 4369 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); 4370 4371 return 0; 4372 } 4373 4374 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif) 4375 { 4376 int ret; 4377 4378 /* revert back to minimum rates for the current band */ 4379 wl1271_set_band_rate(wl, wlvif); 4380 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); 4381 4382 ret = wl1271_acx_sta_rate_policies(wl, wlvif); 4383 if (ret < 0) 4384 return ret; 4385 4386 if (wlvif->bss_type == BSS_TYPE_STA_BSS && 4387 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) { 4388 ret = wl12xx_cmd_role_stop_sta(wl, wlvif); 4389 if (ret < 0) 4390 return ret; 4391 } 4392 4393 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags); 4394 return 0; 4395 } 4396 /* STA/IBSS mode changes */ 4397 static void wl1271_bss_info_changed_sta(struct wl1271 *wl, 4398 struct ieee80211_vif *vif, 4399 struct ieee80211_bss_conf *bss_conf, 4400 u32 changed) 4401 { 4402 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 4403 bool do_join = false; 4404 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS); 4405 bool ibss_joined = false; 4406 u32 sta_rate_set = 0; 4407 int ret; 4408 struct ieee80211_sta *sta; 4409 bool sta_exists = false; 4410 struct ieee80211_sta_ht_cap sta_ht_cap; 4411 4412 if (is_ibss) { 4413 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, 4414 changed); 4415 if (ret < 0) 4416 goto out; 4417 } 4418 4419 if (changed & BSS_CHANGED_IBSS) { 4420 if (vif->cfg.ibss_joined) { 4421 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags); 4422 ibss_joined = true; 4423 } else { 4424 wlcore_unset_assoc(wl, wlvif); 4425 wl12xx_cmd_role_stop_sta(wl, wlvif); 4426 } 4427 } 4428 4429 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined) 4430 do_join = true; 4431 4432 /* Need to update the SSID (for filtering etc) */ 4433 if ((changed & BSS_CHANGED_BEACON) && ibss_joined) 4434 do_join = true; 4435 4436 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) { 4437 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s", 4438 bss_conf->enable_beacon ? "enabled" : "disabled"); 4439 4440 do_join = true; 4441 } 4442 4443 if (changed & BSS_CHANGED_IDLE && !is_ibss) 4444 wl1271_sta_handle_idle(wl, wlvif, vif->cfg.idle); 4445 4446 if (changed & BSS_CHANGED_CQM) { 4447 bool enable = false; 4448 if (bss_conf->cqm_rssi_thold) 4449 enable = true; 4450 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable, 4451 bss_conf->cqm_rssi_thold, 4452 bss_conf->cqm_rssi_hyst); 4453 if (ret < 0) 4454 goto out; 4455 wlvif->rssi_thold = bss_conf->cqm_rssi_thold; 4456 } 4457 4458 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT | 4459 BSS_CHANGED_ASSOC)) { 4460 rcu_read_lock(); 4461 sta = ieee80211_find_sta(vif, bss_conf->bssid); 4462 if (sta) { 4463 u8 *rx_mask = sta->deflink.ht_cap.mcs.rx_mask; 4464 4465 /* save the supp_rates of the ap */ 4466 sta_rate_set = sta->deflink.supp_rates[wlvif->band]; 4467 if (sta->deflink.ht_cap.ht_supported) 4468 sta_rate_set |= 4469 (rx_mask[0] << HW_HT_RATES_OFFSET) | 4470 (rx_mask[1] << HW_MIMO_RATES_OFFSET); 4471 sta_ht_cap = sta->deflink.ht_cap; 4472 sta_exists = true; 4473 } 4474 4475 rcu_read_unlock(); 4476 } 4477 4478 if (changed & BSS_CHANGED_BSSID) { 4479 if (!is_zero_ether_addr(bss_conf->bssid)) { 4480 ret = wlcore_set_bssid(wl, wlvif, vif, 4481 sta_rate_set); 4482 if (ret < 0) 4483 goto out; 4484 4485 /* Need to update the BSSID (for filtering etc) */ 4486 do_join = true; 4487 } else { 4488 ret = wlcore_clear_bssid(wl, wlvif); 4489 if (ret < 0) 4490 goto out; 4491 } 4492 } 4493 4494 if (changed & BSS_CHANGED_IBSS) { 4495 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d", 4496 vif->cfg.ibss_joined); 4497 4498 if (vif->cfg.ibss_joined) { 4499 u32 rates = bss_conf->basic_rates; 4500 wlvif->basic_rate_set = 4501 wl1271_tx_enabled_rates_get(wl, rates, 4502 wlvif->band); 4503 wlvif->basic_rate = 4504 wl1271_tx_min_rate_get(wl, 4505 wlvif->basic_rate_set); 4506 4507 /* by default, use 11b + OFDM rates */ 4508 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES; 4509 ret = wl1271_acx_sta_rate_policies(wl, wlvif); 4510 if (ret < 0) 4511 goto out; 4512 } 4513 } 4514 4515 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) { 4516 /* enable beacon filtering */ 4517 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true); 4518 if (ret < 0) 4519 goto out; 4520 } 4521 4522 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed); 4523 if (ret < 0) 4524 goto out; 4525 4526 if (do_join) { 4527 ret = wlcore_join(wl, wlvif); 4528 if (ret < 0) { 4529 wl1271_warning("cmd join failed %d", ret); 4530 goto out; 4531 } 4532 } 4533 4534 if (changed & BSS_CHANGED_ASSOC) { 4535 if (vif->cfg.assoc) { 4536 ret = wlcore_set_assoc(wl, wlvif, bss_conf, 4537 sta_rate_set); 4538 if (ret < 0) 4539 goto out; 4540 4541 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags)) 4542 wl12xx_set_authorized(wl, wlvif); 4543 } else { 4544 wlcore_unset_assoc(wl, wlvif); 4545 } 4546 } 4547 4548 if (changed & BSS_CHANGED_PS) { 4549 if (vif->cfg.ps && 4550 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) && 4551 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) { 4552 int ps_mode; 4553 char *ps_mode_str; 4554 4555 if (wl->conf.conn.forced_ps) { 4556 ps_mode = STATION_POWER_SAVE_MODE; 4557 ps_mode_str = "forced"; 4558 } else { 4559 ps_mode = STATION_AUTO_PS_MODE; 4560 ps_mode_str = "auto"; 4561 } 4562 4563 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str); 4564 4565 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode); 4566 if (ret < 0) 4567 wl1271_warning("enter %s ps failed %d", 4568 ps_mode_str, ret); 4569 } else if (!vif->cfg.ps && 4570 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) { 4571 wl1271_debug(DEBUG_PSM, "auto ps disabled"); 4572 4573 ret = wl1271_ps_set_mode(wl, wlvif, 4574 STATION_ACTIVE_MODE); 4575 if (ret < 0) 4576 wl1271_warning("exit auto ps failed %d", ret); 4577 } 4578 } 4579 4580 /* Handle new association with HT. Do this after join. */ 4581 if (sta_exists) { 4582 bool enabled = 4583 bss_conf->chanreq.oper.width != NL80211_CHAN_WIDTH_20_NOHT; 4584 4585 ret = wlcore_hw_set_peer_cap(wl, 4586 &sta_ht_cap, 4587 enabled, 4588 wlvif->rate_set, 4589 wlvif->sta.hlid); 4590 if (ret < 0) { 4591 wl1271_warning("Set ht cap failed %d", ret); 4592 goto out; 4593 4594 } 4595 4596 if (enabled) { 4597 ret = wl1271_acx_set_ht_information(wl, wlvif, 4598 bss_conf->ht_operation_mode); 4599 if (ret < 0) { 4600 wl1271_warning("Set ht information failed %d", 4601 ret); 4602 goto out; 4603 } 4604 } 4605 } 4606 4607 /* Handle arp filtering. Done after join. */ 4608 if ((changed & BSS_CHANGED_ARP_FILTER) || 4609 (!is_ibss && (changed & BSS_CHANGED_QOS))) { 4610 __be32 addr = vif->cfg.arp_addr_list[0]; 4611 wlvif->sta.qos = bss_conf->qos; 4612 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS); 4613 4614 if (vif->cfg.arp_addr_cnt == 1 && vif->cfg.assoc) { 4615 wlvif->ip_addr = addr; 4616 /* 4617 * The template should have been configured only upon 4618 * association. however, it seems that the correct ip 4619 * isn't being set (when sending), so we have to 4620 * reconfigure the template upon every ip change. 4621 */ 4622 ret = wl1271_cmd_build_arp_rsp(wl, wlvif); 4623 if (ret < 0) { 4624 wl1271_warning("build arp rsp failed: %d", ret); 4625 goto out; 4626 } 4627 4628 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 4629 (ACX_ARP_FILTER_ARP_FILTERING | 4630 ACX_ARP_FILTER_AUTO_ARP), 4631 addr); 4632 } else { 4633 wlvif->ip_addr = 0; 4634 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr); 4635 } 4636 4637 if (ret < 0) 4638 goto out; 4639 } 4640 4641 out: 4642 return; 4643 } 4644 4645 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, 4646 struct ieee80211_vif *vif, 4647 struct ieee80211_bss_conf *bss_conf, 4648 u64 changed) 4649 { 4650 struct wl1271 *wl = hw->priv; 4651 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 4652 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); 4653 int ret; 4654 4655 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x", 4656 wlvif->role_id, (int)changed); 4657 4658 /* 4659 * make sure to cancel pending disconnections if our association 4660 * state changed 4661 */ 4662 if (!is_ap && (changed & BSS_CHANGED_ASSOC)) 4663 cancel_delayed_work_sync(&wlvif->connection_loss_work); 4664 4665 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) && 4666 !bss_conf->enable_beacon) 4667 wl1271_tx_flush(wl); 4668 4669 mutex_lock(&wl->mutex); 4670 4671 if (unlikely(wl->state != WLCORE_STATE_ON)) 4672 goto out; 4673 4674 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))) 4675 goto out; 4676 4677 ret = pm_runtime_resume_and_get(wl->dev); 4678 if (ret < 0) 4679 goto out; 4680 4681 if ((changed & BSS_CHANGED_TXPOWER) && 4682 bss_conf->txpower != wlvif->power_level) { 4683 4684 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower); 4685 if (ret < 0) 4686 goto out; 4687 4688 wlvif->power_level = bss_conf->txpower; 4689 } 4690 4691 if (is_ap) 4692 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed); 4693 else 4694 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed); 4695 4696 pm_runtime_put_autosuspend(wl->dev); 4697 4698 out: 4699 mutex_unlock(&wl->mutex); 4700 } 4701 4702 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw, 4703 struct ieee80211_chanctx_conf *ctx) 4704 { 4705 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)", 4706 ieee80211_frequency_to_channel(ctx->def.chan->center_freq), 4707 cfg80211_get_chandef_type(&ctx->def)); 4708 return 0; 4709 } 4710 4711 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw, 4712 struct ieee80211_chanctx_conf *ctx) 4713 { 4714 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)", 4715 ieee80211_frequency_to_channel(ctx->def.chan->center_freq), 4716 cfg80211_get_chandef_type(&ctx->def)); 4717 } 4718 4719 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw, 4720 struct ieee80211_chanctx_conf *ctx, 4721 u32 changed) 4722 { 4723 struct wl1271 *wl = hw->priv; 4724 struct wl12xx_vif *wlvif; 4725 int ret; 4726 int channel = ieee80211_frequency_to_channel( 4727 ctx->def.chan->center_freq); 4728 4729 wl1271_debug(DEBUG_MAC80211, 4730 "mac80211 change chanctx %d (type %d) changed 0x%x", 4731 channel, cfg80211_get_chandef_type(&ctx->def), changed); 4732 4733 mutex_lock(&wl->mutex); 4734 4735 ret = pm_runtime_resume_and_get(wl->dev); 4736 if (ret < 0) 4737 goto out; 4738 4739 wl12xx_for_each_wlvif(wl, wlvif) { 4740 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif); 4741 4742 rcu_read_lock(); 4743 if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != ctx) { 4744 rcu_read_unlock(); 4745 continue; 4746 } 4747 rcu_read_unlock(); 4748 4749 /* start radar if needed */ 4750 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR && 4751 wlvif->bss_type == BSS_TYPE_AP_BSS && 4752 ctx->radar_enabled && !wlvif->radar_enabled && 4753 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) { 4754 wl1271_debug(DEBUG_MAC80211, "Start radar detection"); 4755 wlcore_hw_set_cac(wl, wlvif, true); 4756 wlvif->radar_enabled = true; 4757 } 4758 } 4759 4760 pm_runtime_put_autosuspend(wl->dev); 4761 out: 4762 mutex_unlock(&wl->mutex); 4763 } 4764 4765 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw, 4766 struct ieee80211_vif *vif, 4767 struct ieee80211_bss_conf *link_conf, 4768 struct ieee80211_chanctx_conf *ctx) 4769 { 4770 struct wl1271 *wl = hw->priv; 4771 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 4772 int channel = ieee80211_frequency_to_channel( 4773 ctx->def.chan->center_freq); 4774 int ret = -EINVAL; 4775 4776 wl1271_debug(DEBUG_MAC80211, 4777 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)", 4778 wlvif->role_id, channel, 4779 cfg80211_get_chandef_type(&ctx->def), 4780 ctx->radar_enabled, ctx->def.chan->dfs_state); 4781 4782 mutex_lock(&wl->mutex); 4783 4784 if (unlikely(wl->state != WLCORE_STATE_ON)) 4785 goto out; 4786 4787 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))) 4788 goto out; 4789 4790 ret = pm_runtime_resume_and_get(wl->dev); 4791 if (ret < 0) 4792 goto out; 4793 4794 wlvif->band = ctx->def.chan->band; 4795 wlvif->channel = channel; 4796 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def); 4797 4798 /* update default rates according to the band */ 4799 wl1271_set_band_rate(wl, wlvif); 4800 4801 if (ctx->radar_enabled && 4802 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) { 4803 wl1271_debug(DEBUG_MAC80211, "Start radar detection"); 4804 wlcore_hw_set_cac(wl, wlvif, true); 4805 wlvif->radar_enabled = true; 4806 } 4807 4808 pm_runtime_put_autosuspend(wl->dev); 4809 out: 4810 mutex_unlock(&wl->mutex); 4811 4812 return 0; 4813 } 4814 4815 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw, 4816 struct ieee80211_vif *vif, 4817 struct ieee80211_bss_conf *link_conf, 4818 struct ieee80211_chanctx_conf *ctx) 4819 { 4820 struct wl1271 *wl = hw->priv; 4821 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 4822 int ret; 4823 4824 wl1271_debug(DEBUG_MAC80211, 4825 "mac80211 unassign chanctx (role %d) %d (type %d)", 4826 wlvif->role_id, 4827 ieee80211_frequency_to_channel(ctx->def.chan->center_freq), 4828 cfg80211_get_chandef_type(&ctx->def)); 4829 4830 wl1271_tx_flush(wl); 4831 4832 mutex_lock(&wl->mutex); 4833 4834 if (unlikely(wl->state != WLCORE_STATE_ON)) 4835 goto out; 4836 4837 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))) 4838 goto out; 4839 4840 ret = pm_runtime_resume_and_get(wl->dev); 4841 if (ret < 0) 4842 goto out; 4843 4844 if (wlvif->radar_enabled) { 4845 wl1271_debug(DEBUG_MAC80211, "Stop radar detection"); 4846 wlcore_hw_set_cac(wl, wlvif, false); 4847 wlvif->radar_enabled = false; 4848 } 4849 4850 pm_runtime_put_autosuspend(wl->dev); 4851 out: 4852 mutex_unlock(&wl->mutex); 4853 } 4854 4855 static int __wlcore_switch_vif_chan(struct wl1271 *wl, 4856 struct wl12xx_vif *wlvif, 4857 struct ieee80211_chanctx_conf *new_ctx) 4858 { 4859 int channel = ieee80211_frequency_to_channel( 4860 new_ctx->def.chan->center_freq); 4861 4862 wl1271_debug(DEBUG_MAC80211, 4863 "switch vif (role %d) %d -> %d chan_type: %d", 4864 wlvif->role_id, wlvif->channel, channel, 4865 cfg80211_get_chandef_type(&new_ctx->def)); 4866 4867 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS)) 4868 return 0; 4869 4870 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags)); 4871 4872 if (wlvif->radar_enabled) { 4873 wl1271_debug(DEBUG_MAC80211, "Stop radar detection"); 4874 wlcore_hw_set_cac(wl, wlvif, false); 4875 wlvif->radar_enabled = false; 4876 } 4877 4878 wlvif->band = new_ctx->def.chan->band; 4879 wlvif->channel = channel; 4880 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def); 4881 4882 /* start radar if needed */ 4883 if (new_ctx->radar_enabled) { 4884 wl1271_debug(DEBUG_MAC80211, "Start radar detection"); 4885 wlcore_hw_set_cac(wl, wlvif, true); 4886 wlvif->radar_enabled = true; 4887 } 4888 4889 return 0; 4890 } 4891 4892 static int 4893 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw, 4894 struct ieee80211_vif_chanctx_switch *vifs, 4895 int n_vifs, 4896 enum ieee80211_chanctx_switch_mode mode) 4897 { 4898 struct wl1271 *wl = hw->priv; 4899 int i, ret; 4900 4901 wl1271_debug(DEBUG_MAC80211, 4902 "mac80211 switch chanctx n_vifs %d mode %d", 4903 n_vifs, mode); 4904 4905 mutex_lock(&wl->mutex); 4906 4907 ret = pm_runtime_resume_and_get(wl->dev); 4908 if (ret < 0) 4909 goto out; 4910 4911 for (i = 0; i < n_vifs; i++) { 4912 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif); 4913 4914 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx); 4915 if (ret) 4916 goto out_sleep; 4917 } 4918 out_sleep: 4919 pm_runtime_put_autosuspend(wl->dev); 4920 out: 4921 mutex_unlock(&wl->mutex); 4922 4923 return 0; 4924 } 4925 4926 static int wl1271_op_conf_tx(struct ieee80211_hw *hw, 4927 struct ieee80211_vif *vif, 4928 unsigned int link_id, u16 queue, 4929 const struct ieee80211_tx_queue_params *params) 4930 { 4931 struct wl1271 *wl = hw->priv; 4932 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 4933 u8 ps_scheme; 4934 int ret = 0; 4935 4936 if (wlcore_is_p2p_mgmt(wlvif)) 4937 return 0; 4938 4939 mutex_lock(&wl->mutex); 4940 4941 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue); 4942 4943 if (params->uapsd) 4944 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER; 4945 else 4946 ps_scheme = CONF_PS_SCHEME_LEGACY; 4947 4948 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) 4949 goto out; 4950 4951 ret = pm_runtime_resume_and_get(wl->dev); 4952 if (ret < 0) 4953 goto out; 4954 4955 /* 4956 * the txop is confed in units of 32us by the mac80211, 4957 * we need us 4958 */ 4959 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue), 4960 params->cw_min, params->cw_max, 4961 params->aifs, params->txop << 5); 4962 if (ret < 0) 4963 goto out_sleep; 4964 4965 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue), 4966 CONF_CHANNEL_TYPE_EDCF, 4967 wl1271_tx_get_queue(queue), 4968 ps_scheme, CONF_ACK_POLICY_LEGACY, 4969 0, 0); 4970 4971 out_sleep: 4972 pm_runtime_put_autosuspend(wl->dev); 4973 4974 out: 4975 mutex_unlock(&wl->mutex); 4976 4977 return ret; 4978 } 4979 4980 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw, 4981 struct ieee80211_vif *vif) 4982 { 4983 4984 struct wl1271 *wl = hw->priv; 4985 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 4986 u64 mactime = ULLONG_MAX; 4987 int ret; 4988 4989 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf"); 4990 4991 mutex_lock(&wl->mutex); 4992 4993 if (unlikely(wl->state != WLCORE_STATE_ON)) 4994 goto out; 4995 4996 ret = pm_runtime_resume_and_get(wl->dev); 4997 if (ret < 0) 4998 goto out; 4999 5000 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime); 5001 if (ret < 0) 5002 goto out_sleep; 5003 5004 out_sleep: 5005 pm_runtime_put_autosuspend(wl->dev); 5006 5007 out: 5008 mutex_unlock(&wl->mutex); 5009 return mactime; 5010 } 5011 5012 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx, 5013 struct survey_info *survey) 5014 { 5015 struct ieee80211_conf *conf = &hw->conf; 5016 5017 if (idx != 0) 5018 return -ENOENT; 5019 5020 survey->channel = conf->chandef.chan; 5021 survey->filled = 0; 5022 return 0; 5023 } 5024 5025 static int wl1271_allocate_sta(struct wl1271 *wl, 5026 struct wl12xx_vif *wlvif, 5027 struct ieee80211_sta *sta) 5028 { 5029 struct wl1271_station *wl_sta; 5030 int ret; 5031 5032 5033 if (wl->active_sta_count >= wl->max_ap_stations) { 5034 wl1271_warning("could not allocate HLID - too much stations"); 5035 return -EBUSY; 5036 } 5037 5038 wl_sta = (struct wl1271_station *)sta->drv_priv; 5039 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid); 5040 if (ret < 0) { 5041 wl1271_warning("could not allocate HLID - too many links"); 5042 return -EBUSY; 5043 } 5044 5045 /* use the previous security seq, if this is a recovery/resume */ 5046 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts; 5047 5048 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map); 5049 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN); 5050 wl->active_sta_count++; 5051 return 0; 5052 } 5053 5054 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid) 5055 { 5056 if (!test_bit(hlid, wlvif->ap.sta_hlid_map)) 5057 return; 5058 5059 clear_bit(hlid, wlvif->ap.sta_hlid_map); 5060 __clear_bit(hlid, &wl->ap_ps_map); 5061 __clear_bit(hlid, &wl->ap_fw_ps_map); 5062 5063 /* 5064 * save the last used PN in the private part of iee80211_sta, 5065 * in case of recovery/suspend 5066 */ 5067 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr); 5068 5069 wl12xx_free_link(wl, wlvif, &hlid); 5070 wl->active_sta_count--; 5071 5072 /* 5073 * rearm the tx watchdog when the last STA is freed - give the FW a 5074 * chance to return STA-buffered packets before complaining. 5075 */ 5076 if (wl->active_sta_count == 0) 5077 wl12xx_rearm_tx_watchdog_locked(wl); 5078 } 5079 5080 static int wl12xx_sta_add(struct wl1271 *wl, 5081 struct wl12xx_vif *wlvif, 5082 struct ieee80211_sta *sta) 5083 { 5084 struct wl1271_station *wl_sta; 5085 int ret = 0; 5086 u8 hlid; 5087 5088 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid); 5089 5090 ret = wl1271_allocate_sta(wl, wlvif, sta); 5091 if (ret < 0) 5092 return ret; 5093 5094 wl_sta = (struct wl1271_station *)sta->drv_priv; 5095 hlid = wl_sta->hlid; 5096 5097 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid); 5098 if (ret < 0) 5099 wl1271_free_sta(wl, wlvif, hlid); 5100 5101 return ret; 5102 } 5103 5104 static int wl12xx_sta_remove(struct wl1271 *wl, 5105 struct wl12xx_vif *wlvif, 5106 struct ieee80211_sta *sta) 5107 { 5108 struct wl1271_station *wl_sta; 5109 int ret = 0, id; 5110 5111 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid); 5112 5113 wl_sta = (struct wl1271_station *)sta->drv_priv; 5114 id = wl_sta->hlid; 5115 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map))) 5116 return -EINVAL; 5117 5118 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid); 5119 if (ret < 0) 5120 return ret; 5121 5122 wl1271_free_sta(wl, wlvif, wl_sta->hlid); 5123 return ret; 5124 } 5125 5126 static void wlcore_roc_if_possible(struct wl1271 *wl, 5127 struct wl12xx_vif *wlvif) 5128 { 5129 if (find_first_bit(wl->roc_map, 5130 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) 5131 return; 5132 5133 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID)) 5134 return; 5135 5136 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel); 5137 } 5138 5139 /* 5140 * when wl_sta is NULL, we treat this call as if coming from a 5141 * pending auth reply. 5142 * wl->mutex must be taken and the FW must be awake when the call 5143 * takes place. 5144 */ 5145 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, 5146 struct wl1271_station *wl_sta, bool in_conn) 5147 { 5148 if (in_conn) { 5149 if (WARN_ON(wl_sta && wl_sta->in_connection)) 5150 return; 5151 5152 if (!wlvif->ap_pending_auth_reply && 5153 !wlvif->inconn_count) 5154 wlcore_roc_if_possible(wl, wlvif); 5155 5156 if (wl_sta) { 5157 wl_sta->in_connection = true; 5158 wlvif->inconn_count++; 5159 } else { 5160 wlvif->ap_pending_auth_reply = true; 5161 } 5162 } else { 5163 if (wl_sta && !wl_sta->in_connection) 5164 return; 5165 5166 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply)) 5167 return; 5168 5169 if (WARN_ON(wl_sta && !wlvif->inconn_count)) 5170 return; 5171 5172 if (wl_sta) { 5173 wl_sta->in_connection = false; 5174 wlvif->inconn_count--; 5175 } else { 5176 wlvif->ap_pending_auth_reply = false; 5177 } 5178 5179 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply && 5180 test_bit(wlvif->role_id, wl->roc_map)) 5181 wl12xx_croc(wl, wlvif->role_id); 5182 } 5183 } 5184 5185 static int wl12xx_update_sta_state(struct wl1271 *wl, 5186 struct wl12xx_vif *wlvif, 5187 struct ieee80211_sta *sta, 5188 enum ieee80211_sta_state old_state, 5189 enum ieee80211_sta_state new_state) 5190 { 5191 struct wl1271_station *wl_sta; 5192 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS; 5193 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS; 5194 int ret; 5195 5196 wl_sta = (struct wl1271_station *)sta->drv_priv; 5197 5198 /* Add station (AP mode) */ 5199 if (is_ap && 5200 old_state == IEEE80211_STA_AUTH && 5201 new_state == IEEE80211_STA_ASSOC) { 5202 ret = wl12xx_sta_add(wl, wlvif, sta); 5203 if (ret) 5204 return ret; 5205 5206 wl_sta->fw_added = true; 5207 5208 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true); 5209 } 5210 5211 /* Remove station (AP mode) */ 5212 if (is_ap && 5213 old_state == IEEE80211_STA_ASSOC && 5214 new_state == IEEE80211_STA_AUTH) { 5215 wl_sta->fw_added = false; 5216 5217 /* must not fail */ 5218 wl12xx_sta_remove(wl, wlvif, sta); 5219 5220 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false); 5221 } 5222 5223 /* Authorize station (AP mode) */ 5224 if (is_ap && 5225 new_state == IEEE80211_STA_AUTHORIZED) { 5226 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid); 5227 if (ret < 0) 5228 return ret; 5229 5230 ret = wl1271_acx_set_ht_capabilities(wl, &sta->deflink.ht_cap, 5231 true, 5232 wl_sta->hlid); 5233 if (ret) 5234 return ret; 5235 5236 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false); 5237 } 5238 5239 /* Authorize station */ 5240 if (is_sta && 5241 new_state == IEEE80211_STA_AUTHORIZED) { 5242 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags); 5243 ret = wl12xx_set_authorized(wl, wlvif); 5244 if (ret) 5245 return ret; 5246 } 5247 5248 if (is_sta && 5249 old_state == IEEE80211_STA_AUTHORIZED && 5250 new_state == IEEE80211_STA_ASSOC) { 5251 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags); 5252 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags); 5253 } 5254 5255 /* save seq number on disassoc (suspend) */ 5256 if (is_sta && 5257 old_state == IEEE80211_STA_ASSOC && 5258 new_state == IEEE80211_STA_AUTH) { 5259 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta); 5260 wlvif->total_freed_pkts = 0; 5261 } 5262 5263 /* restore seq number on assoc (resume) */ 5264 if (is_sta && 5265 old_state == IEEE80211_STA_AUTH && 5266 new_state == IEEE80211_STA_ASSOC) { 5267 wlvif->total_freed_pkts = wl_sta->total_freed_pkts; 5268 } 5269 5270 /* clear ROCs on failure or authorization */ 5271 if (is_sta && 5272 (new_state == IEEE80211_STA_AUTHORIZED || 5273 new_state == IEEE80211_STA_NOTEXIST)) { 5274 if (test_bit(wlvif->role_id, wl->roc_map)) 5275 wl12xx_croc(wl, wlvif->role_id); 5276 } 5277 5278 if (is_sta && 5279 old_state == IEEE80211_STA_NOTEXIST && 5280 new_state == IEEE80211_STA_NONE) { 5281 if (find_first_bit(wl->roc_map, 5282 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) { 5283 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID); 5284 wl12xx_roc(wl, wlvif, wlvif->role_id, 5285 wlvif->band, wlvif->channel); 5286 } 5287 } 5288 return 0; 5289 } 5290 5291 static int wl12xx_op_sta_state(struct ieee80211_hw *hw, 5292 struct ieee80211_vif *vif, 5293 struct ieee80211_sta *sta, 5294 enum ieee80211_sta_state old_state, 5295 enum ieee80211_sta_state new_state) 5296 { 5297 struct wl1271 *wl = hw->priv; 5298 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 5299 int ret; 5300 5301 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d", 5302 sta->aid, old_state, new_state); 5303 5304 mutex_lock(&wl->mutex); 5305 5306 if (unlikely(wl->state != WLCORE_STATE_ON)) { 5307 ret = -EBUSY; 5308 goto out; 5309 } 5310 5311 ret = pm_runtime_resume_and_get(wl->dev); 5312 if (ret < 0) 5313 goto out; 5314 5315 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state); 5316 5317 pm_runtime_put_autosuspend(wl->dev); 5318 out: 5319 mutex_unlock(&wl->mutex); 5320 if (new_state < old_state) 5321 return 0; 5322 return ret; 5323 } 5324 5325 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, 5326 struct ieee80211_vif *vif, 5327 struct ieee80211_ampdu_params *params) 5328 { 5329 struct wl1271 *wl = hw->priv; 5330 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 5331 int ret; 5332 u8 hlid, *ba_bitmap; 5333 struct ieee80211_sta *sta = params->sta; 5334 enum ieee80211_ampdu_mlme_action action = params->action; 5335 u16 tid = params->tid; 5336 u16 *ssn = ¶ms->ssn; 5337 5338 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action, 5339 tid); 5340 5341 /* sanity check - the fields in FW are only 8bits wide */ 5342 if (WARN_ON(tid > 0xFF)) 5343 return -ENOTSUPP; 5344 5345 mutex_lock(&wl->mutex); 5346 5347 if (unlikely(wl->state != WLCORE_STATE_ON)) { 5348 ret = -EAGAIN; 5349 goto out; 5350 } 5351 5352 if (wlvif->bss_type == BSS_TYPE_STA_BSS) { 5353 hlid = wlvif->sta.hlid; 5354 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) { 5355 struct wl1271_station *wl_sta; 5356 5357 wl_sta = (struct wl1271_station *)sta->drv_priv; 5358 hlid = wl_sta->hlid; 5359 } else { 5360 ret = -EINVAL; 5361 goto out; 5362 } 5363 5364 ba_bitmap = &wl->links[hlid].ba_bitmap; 5365 5366 ret = pm_runtime_resume_and_get(wl->dev); 5367 if (ret < 0) 5368 goto out; 5369 5370 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d", 5371 tid, action); 5372 5373 switch (action) { 5374 case IEEE80211_AMPDU_RX_START: 5375 if (!wlvif->ba_support || !wlvif->ba_allowed) { 5376 ret = -ENOTSUPP; 5377 break; 5378 } 5379 5380 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) { 5381 ret = -EBUSY; 5382 wl1271_debug(DEBUG_RX, "exceeded max RX BA sessions"); 5383 break; 5384 } 5385 5386 if (*ba_bitmap & BIT(tid)) { 5387 ret = -EINVAL; 5388 wl1271_error("cannot enable RX BA session on active " 5389 "tid: %d", tid); 5390 break; 5391 } 5392 5393 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true, 5394 hlid, 5395 params->buf_size); 5396 5397 if (!ret) { 5398 *ba_bitmap |= BIT(tid); 5399 wl->ba_rx_session_count++; 5400 } 5401 break; 5402 5403 case IEEE80211_AMPDU_RX_STOP: 5404 if (!(*ba_bitmap & BIT(tid))) { 5405 /* 5406 * this happens on reconfig - so only output a debug 5407 * message for now, and don't fail the function. 5408 */ 5409 wl1271_debug(DEBUG_MAC80211, 5410 "no active RX BA session on tid: %d", 5411 tid); 5412 ret = 0; 5413 break; 5414 } 5415 5416 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false, 5417 hlid, 0); 5418 if (!ret) { 5419 *ba_bitmap &= ~BIT(tid); 5420 wl->ba_rx_session_count--; 5421 } 5422 break; 5423 5424 /* 5425 * The BA initiator session management in FW independently. 5426 * Falling break here on purpose for all TX APDU commands. 5427 */ 5428 case IEEE80211_AMPDU_TX_START: 5429 case IEEE80211_AMPDU_TX_STOP_CONT: 5430 case IEEE80211_AMPDU_TX_STOP_FLUSH: 5431 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 5432 case IEEE80211_AMPDU_TX_OPERATIONAL: 5433 ret = -EINVAL; 5434 break; 5435 5436 default: 5437 wl1271_error("Incorrect ampdu action id=%x\n", action); 5438 ret = -EINVAL; 5439 } 5440 5441 pm_runtime_put_autosuspend(wl->dev); 5442 5443 out: 5444 mutex_unlock(&wl->mutex); 5445 5446 return ret; 5447 } 5448 5449 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw, 5450 struct ieee80211_vif *vif, 5451 const struct cfg80211_bitrate_mask *mask) 5452 { 5453 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 5454 struct wl1271 *wl = hw->priv; 5455 int i, ret = 0; 5456 5457 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x", 5458 mask->control[NL80211_BAND_2GHZ].legacy, 5459 mask->control[NL80211_BAND_5GHZ].legacy); 5460 5461 mutex_lock(&wl->mutex); 5462 5463 for (i = 0; i < WLCORE_NUM_BANDS; i++) 5464 wlvif->bitrate_masks[i] = 5465 wl1271_tx_enabled_rates_get(wl, 5466 mask->control[i].legacy, 5467 i); 5468 5469 if (unlikely(wl->state != WLCORE_STATE_ON)) 5470 goto out; 5471 5472 if (wlvif->bss_type == BSS_TYPE_STA_BSS && 5473 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) { 5474 5475 ret = pm_runtime_resume_and_get(wl->dev); 5476 if (ret < 0) 5477 goto out; 5478 5479 wl1271_set_band_rate(wl, wlvif); 5480 wlvif->basic_rate = 5481 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set); 5482 ret = wl1271_acx_sta_rate_policies(wl, wlvif); 5483 5484 pm_runtime_put_autosuspend(wl->dev); 5485 } 5486 out: 5487 mutex_unlock(&wl->mutex); 5488 5489 return ret; 5490 } 5491 5492 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw, 5493 struct ieee80211_vif *vif, 5494 struct ieee80211_channel_switch *ch_switch) 5495 { 5496 struct wl1271 *wl = hw->priv; 5497 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 5498 int ret; 5499 5500 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch"); 5501 5502 wl1271_tx_flush(wl); 5503 5504 mutex_lock(&wl->mutex); 5505 5506 if (unlikely(wl->state == WLCORE_STATE_OFF)) { 5507 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) 5508 ieee80211_chswitch_done(vif, false, 0); 5509 goto out; 5510 } else if (unlikely(wl->state != WLCORE_STATE_ON)) { 5511 goto out; 5512 } 5513 5514 ret = pm_runtime_resume_and_get(wl->dev); 5515 if (ret < 0) 5516 goto out; 5517 5518 /* TODO: change mac80211 to pass vif as param */ 5519 5520 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) { 5521 unsigned long delay_usec; 5522 5523 ret = wl->ops->channel_switch(wl, wlvif, ch_switch); 5524 if (ret) 5525 goto out_sleep; 5526 5527 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags); 5528 5529 /* indicate failure 5 seconds after channel switch time */ 5530 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) * 5531 ch_switch->count; 5532 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work, 5533 usecs_to_jiffies(delay_usec) + 5534 msecs_to_jiffies(5000)); 5535 } 5536 5537 out_sleep: 5538 pm_runtime_put_autosuspend(wl->dev); 5539 5540 out: 5541 mutex_unlock(&wl->mutex); 5542 } 5543 5544 static const void *wlcore_get_beacon_ie(struct wl1271 *wl, 5545 struct wl12xx_vif *wlvif, 5546 u8 eid) 5547 { 5548 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable); 5549 struct sk_buff *beacon = 5550 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif), 0); 5551 5552 if (!beacon) 5553 return NULL; 5554 5555 return cfg80211_find_ie(eid, 5556 beacon->data + ieoffset, 5557 beacon->len - ieoffset); 5558 } 5559 5560 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif, 5561 u8 *csa_count) 5562 { 5563 const u8 *ie; 5564 const struct ieee80211_channel_sw_ie *ie_csa; 5565 5566 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH); 5567 if (!ie) 5568 return -EINVAL; 5569 5570 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2]; 5571 *csa_count = ie_csa->count; 5572 5573 return 0; 5574 } 5575 5576 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw, 5577 struct ieee80211_vif *vif, 5578 struct cfg80211_chan_def *chandef) 5579 { 5580 struct wl1271 *wl = hw->priv; 5581 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 5582 struct ieee80211_channel_switch ch_switch = { 5583 .block_tx = true, 5584 .chandef = *chandef, 5585 }; 5586 int ret; 5587 5588 wl1271_debug(DEBUG_MAC80211, 5589 "mac80211 channel switch beacon (role %d)", 5590 wlvif->role_id); 5591 5592 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count); 5593 if (ret < 0) { 5594 wl1271_error("error getting beacon (for CSA counter)"); 5595 return; 5596 } 5597 5598 mutex_lock(&wl->mutex); 5599 5600 if (unlikely(wl->state != WLCORE_STATE_ON)) { 5601 ret = -EBUSY; 5602 goto out; 5603 } 5604 5605 ret = pm_runtime_resume_and_get(wl->dev); 5606 if (ret < 0) 5607 goto out; 5608 5609 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch); 5610 if (ret) 5611 goto out_sleep; 5612 5613 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags); 5614 5615 out_sleep: 5616 pm_runtime_put_autosuspend(wl->dev); 5617 out: 5618 mutex_unlock(&wl->mutex); 5619 } 5620 5621 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 5622 u32 queues, bool drop) 5623 { 5624 struct wl1271 *wl = hw->priv; 5625 5626 wl1271_tx_flush(wl); 5627 } 5628 5629 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw, 5630 struct ieee80211_vif *vif, 5631 struct ieee80211_channel *chan, 5632 int duration, 5633 enum ieee80211_roc_type type) 5634 { 5635 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 5636 struct wl1271 *wl = hw->priv; 5637 int channel, active_roc, ret = 0; 5638 5639 channel = ieee80211_frequency_to_channel(chan->center_freq); 5640 5641 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)", 5642 channel, wlvif->role_id); 5643 5644 mutex_lock(&wl->mutex); 5645 5646 if (unlikely(wl->state != WLCORE_STATE_ON)) 5647 goto out; 5648 5649 /* return EBUSY if we can't ROC right now */ 5650 active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES); 5651 if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) { 5652 wl1271_warning("active roc on role %d", active_roc); 5653 ret = -EBUSY; 5654 goto out; 5655 } 5656 5657 ret = pm_runtime_resume_and_get(wl->dev); 5658 if (ret < 0) 5659 goto out; 5660 5661 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel); 5662 if (ret < 0) 5663 goto out_sleep; 5664 5665 wl->roc_vif = vif; 5666 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work, 5667 msecs_to_jiffies(duration)); 5668 out_sleep: 5669 pm_runtime_put_autosuspend(wl->dev); 5670 out: 5671 mutex_unlock(&wl->mutex); 5672 return ret; 5673 } 5674 5675 static int __wlcore_roc_completed(struct wl1271 *wl) 5676 { 5677 struct wl12xx_vif *wlvif; 5678 int ret; 5679 5680 /* already completed */ 5681 if (unlikely(!wl->roc_vif)) 5682 return 0; 5683 5684 wlvif = wl12xx_vif_to_data(wl->roc_vif); 5685 5686 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) 5687 return -EBUSY; 5688 5689 ret = wl12xx_stop_dev(wl, wlvif); 5690 if (ret < 0) 5691 return ret; 5692 5693 wl->roc_vif = NULL; 5694 5695 return 0; 5696 } 5697 5698 static int wlcore_roc_completed(struct wl1271 *wl) 5699 { 5700 int ret; 5701 5702 wl1271_debug(DEBUG_MAC80211, "roc complete"); 5703 5704 mutex_lock(&wl->mutex); 5705 5706 if (unlikely(wl->state != WLCORE_STATE_ON)) { 5707 ret = -EBUSY; 5708 goto out; 5709 } 5710 5711 ret = pm_runtime_resume_and_get(wl->dev); 5712 if (ret < 0) 5713 goto out; 5714 5715 ret = __wlcore_roc_completed(wl); 5716 5717 pm_runtime_put_autosuspend(wl->dev); 5718 out: 5719 mutex_unlock(&wl->mutex); 5720 5721 return ret; 5722 } 5723 5724 static void wlcore_roc_complete_work(struct work_struct *work) 5725 { 5726 struct delayed_work *dwork; 5727 struct wl1271 *wl; 5728 int ret; 5729 5730 dwork = to_delayed_work(work); 5731 wl = container_of(dwork, struct wl1271, roc_complete_work); 5732 5733 ret = wlcore_roc_completed(wl); 5734 if (!ret) 5735 ieee80211_remain_on_channel_expired(wl->hw); 5736 } 5737 5738 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw, 5739 struct ieee80211_vif *vif) 5740 { 5741 struct wl1271 *wl = hw->priv; 5742 5743 wl1271_debug(DEBUG_MAC80211, "mac80211 croc"); 5744 5745 /* TODO: per-vif */ 5746 wl1271_tx_flush(wl); 5747 5748 /* 5749 * we can't just flush_work here, because it might deadlock 5750 * (as we might get called from the same workqueue) 5751 */ 5752 cancel_delayed_work_sync(&wl->roc_complete_work); 5753 wlcore_roc_completed(wl); 5754 5755 return 0; 5756 } 5757 5758 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw, 5759 struct ieee80211_vif *vif, 5760 struct ieee80211_link_sta *link_sta, 5761 u32 changed) 5762 { 5763 struct ieee80211_sta *sta = link_sta->sta; 5764 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 5765 5766 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update"); 5767 5768 if (!(changed & IEEE80211_RC_BW_CHANGED)) 5769 return; 5770 5771 /* this callback is atomic, so schedule a new work */ 5772 wlvif->rc_update_bw = sta->deflink.bandwidth; 5773 memcpy(&wlvif->rc_ht_cap, &sta->deflink.ht_cap, 5774 sizeof(sta->deflink.ht_cap)); 5775 ieee80211_queue_work(hw, &wlvif->rc_update_work); 5776 } 5777 5778 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw, 5779 struct ieee80211_vif *vif, 5780 struct ieee80211_sta *sta, 5781 struct station_info *sinfo) 5782 { 5783 struct wl1271 *wl = hw->priv; 5784 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 5785 s8 rssi_dbm; 5786 int ret; 5787 5788 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi"); 5789 5790 mutex_lock(&wl->mutex); 5791 5792 if (unlikely(wl->state != WLCORE_STATE_ON)) 5793 goto out; 5794 5795 ret = pm_runtime_resume_and_get(wl->dev); 5796 if (ret < 0) 5797 goto out_sleep; 5798 5799 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm); 5800 if (ret < 0) 5801 goto out_sleep; 5802 5803 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); 5804 sinfo->signal = rssi_dbm; 5805 5806 out_sleep: 5807 pm_runtime_put_autosuspend(wl->dev); 5808 5809 out: 5810 mutex_unlock(&wl->mutex); 5811 } 5812 5813 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw, 5814 struct ieee80211_sta *sta) 5815 { 5816 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv; 5817 struct wl1271 *wl = hw->priv; 5818 u8 hlid = wl_sta->hlid; 5819 5820 /* return in units of Kbps */ 5821 return (wl->links[hlid].fw_rate_mbps * 1000); 5822 } 5823 5824 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw) 5825 { 5826 struct wl1271 *wl = hw->priv; 5827 bool ret = false; 5828 5829 mutex_lock(&wl->mutex); 5830 5831 if (unlikely(wl->state != WLCORE_STATE_ON)) 5832 goto out; 5833 5834 /* packets are considered pending if in the TX queue or the FW */ 5835 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0); 5836 out: 5837 mutex_unlock(&wl->mutex); 5838 5839 return ret; 5840 } 5841 5842 /* can't be const, mac80211 writes to this */ 5843 static struct ieee80211_rate wl1271_rates[] = { 5844 { .bitrate = 10, 5845 .hw_value = CONF_HW_BIT_RATE_1MBPS, 5846 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, }, 5847 { .bitrate = 20, 5848 .hw_value = CONF_HW_BIT_RATE_2MBPS, 5849 .hw_value_short = CONF_HW_BIT_RATE_2MBPS, 5850 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 5851 { .bitrate = 55, 5852 .hw_value = CONF_HW_BIT_RATE_5_5MBPS, 5853 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS, 5854 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 5855 { .bitrate = 110, 5856 .hw_value = CONF_HW_BIT_RATE_11MBPS, 5857 .hw_value_short = CONF_HW_BIT_RATE_11MBPS, 5858 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 5859 { .bitrate = 60, 5860 .hw_value = CONF_HW_BIT_RATE_6MBPS, 5861 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, }, 5862 { .bitrate = 90, 5863 .hw_value = CONF_HW_BIT_RATE_9MBPS, 5864 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, }, 5865 { .bitrate = 120, 5866 .hw_value = CONF_HW_BIT_RATE_12MBPS, 5867 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, }, 5868 { .bitrate = 180, 5869 .hw_value = CONF_HW_BIT_RATE_18MBPS, 5870 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, }, 5871 { .bitrate = 240, 5872 .hw_value = CONF_HW_BIT_RATE_24MBPS, 5873 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, }, 5874 { .bitrate = 360, 5875 .hw_value = CONF_HW_BIT_RATE_36MBPS, 5876 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, }, 5877 { .bitrate = 480, 5878 .hw_value = CONF_HW_BIT_RATE_48MBPS, 5879 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, }, 5880 { .bitrate = 540, 5881 .hw_value = CONF_HW_BIT_RATE_54MBPS, 5882 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, }, 5883 }; 5884 5885 /* can't be const, mac80211 writes to this */ 5886 static struct ieee80211_channel wl1271_channels[] = { 5887 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR }, 5888 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR }, 5889 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR }, 5890 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR }, 5891 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR }, 5892 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR }, 5893 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR }, 5894 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR }, 5895 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR }, 5896 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR }, 5897 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR }, 5898 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR }, 5899 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR }, 5900 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR }, 5901 }; 5902 5903 /* can't be const, mac80211 writes to this */ 5904 static struct ieee80211_supported_band wl1271_band_2ghz = { 5905 .channels = wl1271_channels, 5906 .n_channels = ARRAY_SIZE(wl1271_channels), 5907 .bitrates = wl1271_rates, 5908 .n_bitrates = ARRAY_SIZE(wl1271_rates), 5909 }; 5910 5911 /* 5 GHz data rates for WL1273 */ 5912 static struct ieee80211_rate wl1271_rates_5ghz[] = { 5913 { .bitrate = 60, 5914 .hw_value = CONF_HW_BIT_RATE_6MBPS, 5915 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, }, 5916 { .bitrate = 90, 5917 .hw_value = CONF_HW_BIT_RATE_9MBPS, 5918 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, }, 5919 { .bitrate = 120, 5920 .hw_value = CONF_HW_BIT_RATE_12MBPS, 5921 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, }, 5922 { .bitrate = 180, 5923 .hw_value = CONF_HW_BIT_RATE_18MBPS, 5924 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, }, 5925 { .bitrate = 240, 5926 .hw_value = CONF_HW_BIT_RATE_24MBPS, 5927 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, }, 5928 { .bitrate = 360, 5929 .hw_value = CONF_HW_BIT_RATE_36MBPS, 5930 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, }, 5931 { .bitrate = 480, 5932 .hw_value = CONF_HW_BIT_RATE_48MBPS, 5933 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, }, 5934 { .bitrate = 540, 5935 .hw_value = CONF_HW_BIT_RATE_54MBPS, 5936 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, }, 5937 }; 5938 5939 /* 5 GHz band channels for WL1273 */ 5940 static struct ieee80211_channel wl1271_channels_5ghz[] = { 5941 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR }, 5942 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR }, 5943 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR }, 5944 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR }, 5945 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR }, 5946 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR }, 5947 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR }, 5948 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR }, 5949 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR }, 5950 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR }, 5951 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR }, 5952 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR }, 5953 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR }, 5954 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR }, 5955 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR }, 5956 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR }, 5957 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR }, 5958 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR }, 5959 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR }, 5960 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR }, 5961 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR }, 5962 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR }, 5963 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR }, 5964 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR }, 5965 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR }, 5966 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR }, 5967 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR }, 5968 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR }, 5969 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR }, 5970 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR }, 5971 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR }, 5972 }; 5973 5974 static struct ieee80211_supported_band wl1271_band_5ghz = { 5975 .channels = wl1271_channels_5ghz, 5976 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz), 5977 .bitrates = wl1271_rates_5ghz, 5978 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz), 5979 }; 5980 5981 static const struct ieee80211_ops wl1271_ops = { 5982 .start = wl1271_op_start, 5983 .stop = wlcore_op_stop, 5984 .add_interface = wl1271_op_add_interface, 5985 .remove_interface = wl1271_op_remove_interface, 5986 .change_interface = wl12xx_op_change_interface, 5987 #ifdef CONFIG_PM 5988 .suspend = wl1271_op_suspend, 5989 .resume = wl1271_op_resume, 5990 #endif 5991 .config = wl1271_op_config, 5992 .prepare_multicast = wl1271_op_prepare_multicast, 5993 .configure_filter = wl1271_op_configure_filter, 5994 .tx = wl1271_op_tx, 5995 .wake_tx_queue = ieee80211_handle_wake_tx_queue, 5996 .set_key = wlcore_op_set_key, 5997 .hw_scan = wl1271_op_hw_scan, 5998 .cancel_hw_scan = wl1271_op_cancel_hw_scan, 5999 .sched_scan_start = wl1271_op_sched_scan_start, 6000 .sched_scan_stop = wl1271_op_sched_scan_stop, 6001 .bss_info_changed = wl1271_op_bss_info_changed, 6002 .set_frag_threshold = wl1271_op_set_frag_threshold, 6003 .set_rts_threshold = wl1271_op_set_rts_threshold, 6004 .conf_tx = wl1271_op_conf_tx, 6005 .get_tsf = wl1271_op_get_tsf, 6006 .get_survey = wl1271_op_get_survey, 6007 .sta_state = wl12xx_op_sta_state, 6008 .ampdu_action = wl1271_op_ampdu_action, 6009 .tx_frames_pending = wl1271_tx_frames_pending, 6010 .set_bitrate_mask = wl12xx_set_bitrate_mask, 6011 .set_default_unicast_key = wl1271_op_set_default_key_idx, 6012 .channel_switch = wl12xx_op_channel_switch, 6013 .channel_switch_beacon = wlcore_op_channel_switch_beacon, 6014 .flush = wlcore_op_flush, 6015 .remain_on_channel = wlcore_op_remain_on_channel, 6016 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel, 6017 .add_chanctx = wlcore_op_add_chanctx, 6018 .remove_chanctx = wlcore_op_remove_chanctx, 6019 .change_chanctx = wlcore_op_change_chanctx, 6020 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx, 6021 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx, 6022 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx, 6023 .link_sta_rc_update = wlcore_op_sta_rc_update, 6024 .sta_statistics = wlcore_op_sta_statistics, 6025 .get_expected_throughput = wlcore_op_get_expected_throughput, 6026 CFG80211_TESTMODE_CMD(wl1271_tm_cmd) 6027 }; 6028 6029 6030 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band) 6031 { 6032 u8 idx; 6033 6034 BUG_ON(band >= 2); 6035 6036 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) { 6037 wl1271_error("Illegal RX rate from HW: %d", rate); 6038 return 0; 6039 } 6040 6041 idx = wl->band_rate_to_idx[band][rate]; 6042 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) { 6043 wl1271_error("Unsupported RX rate from HW: %d", rate); 6044 return 0; 6045 } 6046 6047 return idx; 6048 } 6049 6050 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic) 6051 { 6052 int i; 6053 6054 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x", 6055 oui, nic); 6056 6057 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff) 6058 wl1271_warning("NIC part of the MAC address wraps around!"); 6059 6060 for (i = 0; i < wl->num_mac_addr; i++) { 6061 wl->addresses[i].addr[0] = (u8)(oui >> 16); 6062 wl->addresses[i].addr[1] = (u8)(oui >> 8); 6063 wl->addresses[i].addr[2] = (u8) oui; 6064 wl->addresses[i].addr[3] = (u8)(nic >> 16); 6065 wl->addresses[i].addr[4] = (u8)(nic >> 8); 6066 wl->addresses[i].addr[5] = (u8) nic; 6067 nic++; 6068 } 6069 6070 /* we may be one address short at the most */ 6071 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES); 6072 6073 /* 6074 * turn on the LAA bit in the first address and use it as 6075 * the last address. 6076 */ 6077 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) { 6078 int idx = WLCORE_NUM_MAC_ADDRESSES - 1; 6079 memcpy(&wl->addresses[idx], &wl->addresses[0], 6080 sizeof(wl->addresses[0])); 6081 /* LAA bit */ 6082 wl->addresses[idx].addr[0] |= BIT(1); 6083 } 6084 6085 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES; 6086 wl->hw->wiphy->addresses = wl->addresses; 6087 } 6088 6089 static int wl12xx_get_hw_info(struct wl1271 *wl) 6090 { 6091 int ret; 6092 6093 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id); 6094 if (ret < 0) 6095 goto out; 6096 6097 wl->fuse_oui_addr = 0; 6098 wl->fuse_nic_addr = 0; 6099 6100 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver); 6101 if (ret < 0) 6102 goto out; 6103 6104 if (wl->ops->get_mac) 6105 ret = wl->ops->get_mac(wl); 6106 6107 out: 6108 return ret; 6109 } 6110 6111 static int wl1271_register_hw(struct wl1271 *wl) 6112 { 6113 int ret; 6114 u32 oui_addr = 0, nic_addr = 0; 6115 struct platform_device *pdev = wl->pdev; 6116 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev); 6117 6118 if (wl->mac80211_registered) 6119 return 0; 6120 6121 if (wl->nvs_len >= 12) { 6122 /* NOTE: The wl->nvs->nvs element must be first, in 6123 * order to simplify the casting, we assume it is at 6124 * the beginning of the wl->nvs structure. 6125 */ 6126 u8 *nvs_ptr = (u8 *)wl->nvs; 6127 6128 oui_addr = 6129 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6]; 6130 nic_addr = 6131 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3]; 6132 } 6133 6134 /* if the MAC address is zeroed in the NVS derive from fuse */ 6135 if (oui_addr == 0 && nic_addr == 0) { 6136 oui_addr = wl->fuse_oui_addr; 6137 /* fuse has the BD_ADDR, the WLAN addresses are the next two */ 6138 nic_addr = wl->fuse_nic_addr + 1; 6139 } 6140 6141 if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) { 6142 wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead."); 6143 if (!strcmp(pdev_data->family->name, "wl18xx")) { 6144 wl1271_warning("This default nvs file can be removed from the file system"); 6145 } else { 6146 wl1271_warning("Your device performance is not optimized."); 6147 wl1271_warning("Please use the calibrator tool to configure your device."); 6148 } 6149 6150 if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) { 6151 wl1271_warning("Fuse mac address is zero. using random mac"); 6152 /* Use TI oui and a random nic */ 6153 oui_addr = WLCORE_TI_OUI_ADDRESS; 6154 nic_addr = get_random_u32(); 6155 } else { 6156 oui_addr = wl->fuse_oui_addr; 6157 /* fuse has the BD_ADDR, the WLAN addresses are the next two */ 6158 nic_addr = wl->fuse_nic_addr + 1; 6159 } 6160 } 6161 6162 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr); 6163 6164 ret = ieee80211_register_hw(wl->hw); 6165 if (ret < 0) { 6166 wl1271_error("unable to register mac80211 hw: %d", ret); 6167 goto out; 6168 } 6169 6170 wl->mac80211_registered = true; 6171 6172 wl1271_debugfs_init(wl); 6173 6174 wl1271_notice("loaded"); 6175 6176 out: 6177 return ret; 6178 } 6179 6180 static void wl1271_unregister_hw(struct wl1271 *wl) 6181 { 6182 if (wl->plt) 6183 wl1271_plt_stop(wl); 6184 6185 ieee80211_unregister_hw(wl->hw); 6186 wl->mac80211_registered = false; 6187 6188 } 6189 6190 static int wl1271_init_ieee80211(struct wl1271 *wl) 6191 { 6192 int i; 6193 static const u32 cipher_suites[] = { 6194 WLAN_CIPHER_SUITE_WEP40, 6195 WLAN_CIPHER_SUITE_WEP104, 6196 WLAN_CIPHER_SUITE_TKIP, 6197 WLAN_CIPHER_SUITE_CCMP, 6198 WL1271_CIPHER_SUITE_GEM, 6199 }; 6200 6201 /* The tx descriptor buffer */ 6202 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr); 6203 6204 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) 6205 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP; 6206 6207 /* unit us */ 6208 /* FIXME: find a proper value */ 6209 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval; 6210 6211 ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT); 6212 ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA); 6213 ieee80211_hw_set(wl->hw, SUPPORTS_PER_STA_GTK); 6214 ieee80211_hw_set(wl->hw, QUEUE_CONTROL); 6215 ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW); 6216 ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION); 6217 ieee80211_hw_set(wl->hw, AP_LINK_PS); 6218 ieee80211_hw_set(wl->hw, SPECTRUM_MGMT); 6219 ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS); 6220 ieee80211_hw_set(wl->hw, CONNECTION_MONITOR); 6221 ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL); 6222 ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS); 6223 ieee80211_hw_set(wl->hw, SIGNAL_DBM); 6224 ieee80211_hw_set(wl->hw, SUPPORTS_PS); 6225 ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG); 6226 6227 wl->hw->wiphy->cipher_suites = cipher_suites; 6228 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); 6229 6230 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 6231 BIT(NL80211_IFTYPE_AP) | 6232 BIT(NL80211_IFTYPE_P2P_DEVICE) | 6233 BIT(NL80211_IFTYPE_P2P_CLIENT) | 6234 #ifdef CONFIG_MAC80211_MESH 6235 BIT(NL80211_IFTYPE_MESH_POINT) | 6236 #endif 6237 BIT(NL80211_IFTYPE_P2P_GO); 6238 6239 wl->hw->wiphy->max_scan_ssids = 1; 6240 wl->hw->wiphy->max_sched_scan_ssids = 16; 6241 wl->hw->wiphy->max_match_sets = 16; 6242 /* 6243 * Maximum length of elements in scanning probe request templates 6244 * should be the maximum length possible for a template, without 6245 * the IEEE80211 header of the template 6246 */ 6247 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE - 6248 sizeof(struct ieee80211_header); 6249 6250 wl->hw->wiphy->max_sched_scan_reqs = 1; 6251 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE - 6252 sizeof(struct ieee80211_header); 6253 6254 wl->hw->wiphy->max_remain_on_channel_duration = 30000; 6255 6256 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD | 6257 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | 6258 WIPHY_FLAG_HAS_CHANNEL_SWITCH | 6259 WIPHY_FLAG_IBSS_RSN; 6260 6261 wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN; 6262 6263 /* make sure all our channels fit in the scanned_ch bitmask */ 6264 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) + 6265 ARRAY_SIZE(wl1271_channels_5ghz) > 6266 WL1271_MAX_CHANNELS); 6267 /* 6268 * clear channel flags from the previous usage 6269 * and restore max_power & max_antenna_gain values. 6270 */ 6271 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) { 6272 wl1271_band_2ghz.channels[i].flags = 0; 6273 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR; 6274 wl1271_band_2ghz.channels[i].max_antenna_gain = 0; 6275 } 6276 6277 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) { 6278 wl1271_band_5ghz.channels[i].flags = 0; 6279 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR; 6280 wl1271_band_5ghz.channels[i].max_antenna_gain = 0; 6281 } 6282 6283 /* 6284 * We keep local copies of the band structs because we need to 6285 * modify them on a per-device basis. 6286 */ 6287 memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz, 6288 sizeof(wl1271_band_2ghz)); 6289 memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap, 6290 &wl->ht_cap[NL80211_BAND_2GHZ], 6291 sizeof(*wl->ht_cap)); 6292 memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz, 6293 sizeof(wl1271_band_5ghz)); 6294 memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap, 6295 &wl->ht_cap[NL80211_BAND_5GHZ], 6296 sizeof(*wl->ht_cap)); 6297 6298 wl->hw->wiphy->bands[NL80211_BAND_2GHZ] = 6299 &wl->bands[NL80211_BAND_2GHZ]; 6300 wl->hw->wiphy->bands[NL80211_BAND_5GHZ] = 6301 &wl->bands[NL80211_BAND_5GHZ]; 6302 6303 /* 6304 * allow 4 queues per mac address we support + 6305 * 1 cab queue per mac + one global offchannel Tx queue 6306 */ 6307 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1; 6308 6309 /* the last queue is the offchannel queue */ 6310 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1; 6311 wl->hw->max_rates = 1; 6312 6313 wl->hw->wiphy->reg_notifier = wl1271_reg_notify; 6314 6315 /* the FW answers probe-requests in AP-mode */ 6316 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; 6317 wl->hw->wiphy->probe_resp_offload = 6318 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | 6319 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | 6320 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; 6321 6322 /* allowed interface combinations */ 6323 wl->hw->wiphy->iface_combinations = wl->iface_combinations; 6324 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations; 6325 6326 /* register vendor commands */ 6327 wlcore_set_vendor_commands(wl->hw->wiphy); 6328 6329 SET_IEEE80211_DEV(wl->hw, wl->dev); 6330 6331 wl->hw->sta_data_size = sizeof(struct wl1271_station); 6332 wl->hw->vif_data_size = sizeof(struct wl12xx_vif); 6333 6334 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size; 6335 6336 return 0; 6337 } 6338 6339 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size, 6340 u32 mbox_size) 6341 { 6342 struct ieee80211_hw *hw; 6343 struct wl1271 *wl; 6344 int i, j, ret; 6345 unsigned int order; 6346 6347 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops); 6348 if (!hw) { 6349 wl1271_error("could not alloc ieee80211_hw"); 6350 ret = -ENOMEM; 6351 goto err_hw_alloc; 6352 } 6353 6354 wl = hw->priv; 6355 memset(wl, 0, sizeof(*wl)); 6356 6357 wl->priv = kzalloc(priv_size, GFP_KERNEL); 6358 if (!wl->priv) { 6359 wl1271_error("could not alloc wl priv"); 6360 ret = -ENOMEM; 6361 goto err_priv_alloc; 6362 } 6363 6364 INIT_LIST_HEAD(&wl->wlvif_list); 6365 6366 wl->hw = hw; 6367 6368 /* 6369 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS. 6370 * we don't allocate any additional resource here, so that's fine. 6371 */ 6372 for (i = 0; i < NUM_TX_QUEUES; i++) 6373 for (j = 0; j < WLCORE_MAX_LINKS; j++) 6374 skb_queue_head_init(&wl->links[j].tx_queue[i]); 6375 6376 skb_queue_head_init(&wl->deferred_rx_queue); 6377 skb_queue_head_init(&wl->deferred_tx_queue); 6378 6379 INIT_WORK(&wl->netstack_work, wl1271_netstack_work); 6380 INIT_WORK(&wl->tx_work, wl1271_tx_work); 6381 INIT_WORK(&wl->recovery_work, wl1271_recovery_work); 6382 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work); 6383 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work); 6384 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work); 6385 6386 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq"); 6387 if (!wl->freezable_wq) { 6388 ret = -ENOMEM; 6389 goto err_hw; 6390 } 6391 6392 wl->channel = 0; 6393 wl->rx_counter = 0; 6394 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 6395 wl->band = NL80211_BAND_2GHZ; 6396 wl->channel_type = NL80211_CHAN_NO_HT; 6397 wl->flags = 0; 6398 wl->sg_enabled = true; 6399 wl->sleep_auth = WL1271_PSM_ILLEGAL; 6400 wl->recovery_count = 0; 6401 wl->hw_pg_ver = -1; 6402 wl->ap_ps_map = 0; 6403 wl->ap_fw_ps_map = 0; 6404 wl->quirks = 0; 6405 wl->system_hlid = WL12XX_SYSTEM_HLID; 6406 wl->active_sta_count = 0; 6407 wl->active_link_count = 0; 6408 wl->fwlog_size = 0; 6409 6410 /* The system link is always allocated */ 6411 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map); 6412 6413 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map)); 6414 for (i = 0; i < wl->num_tx_desc; i++) 6415 wl->tx_frames[i] = NULL; 6416 6417 spin_lock_init(&wl->wl_lock); 6418 6419 wl->state = WLCORE_STATE_OFF; 6420 wl->fw_type = WL12XX_FW_TYPE_NONE; 6421 mutex_init(&wl->mutex); 6422 mutex_init(&wl->flush_mutex); 6423 init_completion(&wl->nvs_loading_complete); 6424 6425 order = get_order(aggr_buf_size); 6426 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order); 6427 if (!wl->aggr_buf) { 6428 ret = -ENOMEM; 6429 goto err_wq; 6430 } 6431 wl->aggr_buf_size = aggr_buf_size; 6432 6433 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl); 6434 if (!wl->dummy_packet) { 6435 ret = -ENOMEM; 6436 goto err_aggr; 6437 } 6438 6439 /* Allocate one page for the FW log */ 6440 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL); 6441 if (!wl->fwlog) { 6442 ret = -ENOMEM; 6443 goto err_dummy_packet; 6444 } 6445 6446 wl->mbox_size = mbox_size; 6447 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA); 6448 if (!wl->mbox) { 6449 ret = -ENOMEM; 6450 goto err_fwlog; 6451 } 6452 6453 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL); 6454 if (!wl->buffer_32) { 6455 ret = -ENOMEM; 6456 goto err_mbox; 6457 } 6458 6459 return hw; 6460 6461 err_mbox: 6462 kfree(wl->mbox); 6463 6464 err_fwlog: 6465 free_page((unsigned long)wl->fwlog); 6466 6467 err_dummy_packet: 6468 dev_kfree_skb(wl->dummy_packet); 6469 6470 err_aggr: 6471 free_pages((unsigned long)wl->aggr_buf, order); 6472 6473 err_wq: 6474 destroy_workqueue(wl->freezable_wq); 6475 6476 err_hw: 6477 wl1271_debugfs_exit(wl); 6478 kfree(wl->priv); 6479 6480 err_priv_alloc: 6481 ieee80211_free_hw(hw); 6482 6483 err_hw_alloc: 6484 6485 return ERR_PTR(ret); 6486 } 6487 EXPORT_SYMBOL_GPL(wlcore_alloc_hw); 6488 6489 int wlcore_free_hw(struct wl1271 *wl) 6490 { 6491 /* Unblock any fwlog readers */ 6492 mutex_lock(&wl->mutex); 6493 wl->fwlog_size = -1; 6494 mutex_unlock(&wl->mutex); 6495 6496 wlcore_sysfs_free(wl); 6497 6498 kfree(wl->buffer_32); 6499 kfree(wl->mbox); 6500 free_page((unsigned long)wl->fwlog); 6501 dev_kfree_skb(wl->dummy_packet); 6502 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size)); 6503 6504 wl1271_debugfs_exit(wl); 6505 6506 vfree(wl->fw); 6507 wl->fw = NULL; 6508 wl->fw_type = WL12XX_FW_TYPE_NONE; 6509 kfree(wl->nvs); 6510 wl->nvs = NULL; 6511 6512 kfree(wl->raw_fw_status); 6513 kfree(wl->fw_status); 6514 kfree(wl->tx_res_if); 6515 destroy_workqueue(wl->freezable_wq); 6516 6517 kfree(wl->priv); 6518 ieee80211_free_hw(wl->hw); 6519 6520 return 0; 6521 } 6522 EXPORT_SYMBOL_GPL(wlcore_free_hw); 6523 6524 #ifdef CONFIG_PM 6525 static const struct wiphy_wowlan_support wlcore_wowlan_support = { 6526 .flags = WIPHY_WOWLAN_ANY, 6527 .n_patterns = WL1271_MAX_RX_FILTERS, 6528 .pattern_min_len = 1, 6529 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE, 6530 }; 6531 #endif 6532 6533 static irqreturn_t wlcore_hardirq(int irq, void *cookie) 6534 { 6535 return IRQ_WAKE_THREAD; 6536 } 6537 6538 static void wlcore_nvs_cb(const struct firmware *fw, void *context) 6539 { 6540 struct wl1271 *wl = context; 6541 struct platform_device *pdev = wl->pdev; 6542 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev); 6543 struct resource *res; 6544 6545 int ret; 6546 irq_handler_t hardirq_fn = NULL; 6547 6548 if (fw) { 6549 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL); 6550 if (!wl->nvs) { 6551 wl1271_error("Could not allocate nvs data"); 6552 goto out; 6553 } 6554 wl->nvs_len = fw->size; 6555 } else if (pdev_data->family->nvs_name) { 6556 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s", 6557 pdev_data->family->nvs_name); 6558 wl->nvs = NULL; 6559 wl->nvs_len = 0; 6560 } else { 6561 wl->nvs = NULL; 6562 wl->nvs_len = 0; 6563 } 6564 6565 ret = wl->ops->setup(wl); 6566 if (ret < 0) 6567 goto out_free_nvs; 6568 6569 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS); 6570 6571 /* adjust some runtime configuration parameters */ 6572 wlcore_adjust_conf(wl); 6573 6574 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 6575 if (!res) { 6576 wl1271_error("Could not get IRQ resource"); 6577 goto out_free_nvs; 6578 } 6579 6580 wl->irq = res->start; 6581 wl->irq_flags = res->flags & IRQF_TRIGGER_MASK; 6582 wl->if_ops = pdev_data->if_ops; 6583 6584 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) 6585 hardirq_fn = wlcore_hardirq; 6586 else 6587 wl->irq_flags |= IRQF_ONESHOT; 6588 6589 ret = wl12xx_set_power_on(wl); 6590 if (ret < 0) 6591 goto out_free_nvs; 6592 6593 ret = wl12xx_get_hw_info(wl); 6594 if (ret < 0) { 6595 wl1271_error("couldn't get hw info"); 6596 wl1271_power_off(wl); 6597 goto out_free_nvs; 6598 } 6599 6600 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq, 6601 wl->irq_flags, pdev->name, wl); 6602 if (ret < 0) { 6603 wl1271_error("interrupt configuration failed"); 6604 wl1271_power_off(wl); 6605 goto out_free_nvs; 6606 } 6607 6608 #ifdef CONFIG_PM 6609 device_init_wakeup(wl->dev, true); 6610 6611 ret = enable_irq_wake(wl->irq); 6612 if (!ret) { 6613 wl->irq_wake_enabled = true; 6614 if (pdev_data->pwr_in_suspend) 6615 wl->hw->wiphy->wowlan = &wlcore_wowlan_support; 6616 } 6617 6618 res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); 6619 if (res) { 6620 wl->wakeirq = res->start; 6621 wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK; 6622 ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq); 6623 if (ret) 6624 wl->wakeirq = -ENODEV; 6625 } else { 6626 wl->wakeirq = -ENODEV; 6627 } 6628 #endif 6629 disable_irq(wl->irq); 6630 wl1271_power_off(wl); 6631 6632 ret = wl->ops->identify_chip(wl); 6633 if (ret < 0) 6634 goto out_irq; 6635 6636 ret = wl1271_init_ieee80211(wl); 6637 if (ret) 6638 goto out_irq; 6639 6640 ret = wl1271_register_hw(wl); 6641 if (ret) 6642 goto out_irq; 6643 6644 ret = wlcore_sysfs_init(wl); 6645 if (ret) 6646 goto out_unreg; 6647 6648 wl->initialized = true; 6649 goto out; 6650 6651 out_unreg: 6652 wl1271_unregister_hw(wl); 6653 6654 out_irq: 6655 if (wl->wakeirq >= 0) 6656 dev_pm_clear_wake_irq(wl->dev); 6657 device_init_wakeup(wl->dev, false); 6658 free_irq(wl->irq, wl); 6659 6660 out_free_nvs: 6661 kfree(wl->nvs); 6662 6663 out: 6664 release_firmware(fw); 6665 complete_all(&wl->nvs_loading_complete); 6666 } 6667 6668 static int __maybe_unused wlcore_runtime_suspend(struct device *dev) 6669 { 6670 struct wl1271 *wl = dev_get_drvdata(dev); 6671 struct wl12xx_vif *wlvif; 6672 int error; 6673 6674 /* We do not enter elp sleep in PLT mode */ 6675 if (wl->plt) 6676 return 0; 6677 6678 /* Nothing to do if no ELP mode requested */ 6679 if (wl->sleep_auth != WL1271_PSM_ELP) 6680 return 0; 6681 6682 wl12xx_for_each_wlvif(wl, wlvif) { 6683 if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) && 6684 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) 6685 return -EBUSY; 6686 } 6687 6688 wl1271_debug(DEBUG_PSM, "chip to elp"); 6689 error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP); 6690 if (error < 0) { 6691 wl12xx_queue_recovery_work(wl); 6692 6693 return error; 6694 } 6695 6696 set_bit(WL1271_FLAG_IN_ELP, &wl->flags); 6697 6698 return 0; 6699 } 6700 6701 static int __maybe_unused wlcore_runtime_resume(struct device *dev) 6702 { 6703 struct wl1271 *wl = dev_get_drvdata(dev); 6704 DECLARE_COMPLETION_ONSTACK(compl); 6705 unsigned long flags; 6706 int ret; 6707 unsigned long start_time = jiffies; 6708 bool recovery = false; 6709 6710 /* Nothing to do if no ELP mode requested */ 6711 if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) 6712 return 0; 6713 6714 wl1271_debug(DEBUG_PSM, "waking up chip from elp"); 6715 6716 spin_lock_irqsave(&wl->wl_lock, flags); 6717 wl->elp_compl = &compl; 6718 spin_unlock_irqrestore(&wl->wl_lock, flags); 6719 6720 ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP); 6721 if (ret < 0) { 6722 recovery = true; 6723 } else if (!test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) { 6724 ret = wait_for_completion_timeout(&compl, 6725 msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT)); 6726 if (ret == 0) { 6727 wl1271_warning("ELP wakeup timeout!"); 6728 recovery = true; 6729 } 6730 } 6731 6732 spin_lock_irqsave(&wl->wl_lock, flags); 6733 wl->elp_compl = NULL; 6734 spin_unlock_irqrestore(&wl->wl_lock, flags); 6735 clear_bit(WL1271_FLAG_IN_ELP, &wl->flags); 6736 6737 if (recovery) { 6738 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags); 6739 wl12xx_queue_recovery_work(wl); 6740 } else { 6741 wl1271_debug(DEBUG_PSM, "wakeup time: %u ms", 6742 jiffies_to_msecs(jiffies - start_time)); 6743 } 6744 6745 return 0; 6746 } 6747 6748 static const struct dev_pm_ops wlcore_pm_ops = { 6749 SET_RUNTIME_PM_OPS(wlcore_runtime_suspend, 6750 wlcore_runtime_resume, 6751 NULL) 6752 }; 6753 6754 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev) 6755 { 6756 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev); 6757 const char *nvs_name; 6758 int ret = 0; 6759 6760 if (!wl->ops || !wl->ptable || !pdev_data) 6761 return -EINVAL; 6762 6763 wl->dev = &pdev->dev; 6764 wl->pdev = pdev; 6765 platform_set_drvdata(pdev, wl); 6766 6767 if (pdev_data->family && pdev_data->family->nvs_name) { 6768 nvs_name = pdev_data->family->nvs_name; 6769 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT, 6770 nvs_name, &pdev->dev, GFP_KERNEL, 6771 wl, wlcore_nvs_cb); 6772 if (ret < 0) { 6773 wl1271_error("request_firmware_nowait failed for %s: %d", 6774 nvs_name, ret); 6775 complete_all(&wl->nvs_loading_complete); 6776 } 6777 } else { 6778 wlcore_nvs_cb(NULL, wl); 6779 } 6780 6781 wl->dev->driver->pm = &wlcore_pm_ops; 6782 pm_runtime_set_autosuspend_delay(wl->dev, 50); 6783 pm_runtime_use_autosuspend(wl->dev); 6784 pm_runtime_enable(wl->dev); 6785 6786 return ret; 6787 } 6788 EXPORT_SYMBOL_GPL(wlcore_probe); 6789 6790 void wlcore_remove(struct platform_device *pdev) 6791 { 6792 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev); 6793 struct wl1271 *wl = platform_get_drvdata(pdev); 6794 int error; 6795 6796 error = pm_runtime_get_sync(wl->dev); 6797 if (error < 0) 6798 dev_warn(wl->dev, "PM runtime failed: %i\n", error); 6799 6800 wl->dev->driver->pm = NULL; 6801 6802 if (pdev_data->family && pdev_data->family->nvs_name) 6803 wait_for_completion(&wl->nvs_loading_complete); 6804 if (!wl->initialized) 6805 return; 6806 6807 if (wl->wakeirq >= 0) { 6808 dev_pm_clear_wake_irq(wl->dev); 6809 wl->wakeirq = -ENODEV; 6810 } 6811 6812 device_init_wakeup(wl->dev, false); 6813 6814 if (wl->irq_wake_enabled) 6815 disable_irq_wake(wl->irq); 6816 6817 wl1271_unregister_hw(wl); 6818 6819 pm_runtime_put_sync(wl->dev); 6820 pm_runtime_dont_use_autosuspend(wl->dev); 6821 pm_runtime_disable(wl->dev); 6822 6823 free_irq(wl->irq, wl); 6824 wlcore_free_hw(wl); 6825 } 6826 EXPORT_SYMBOL_GPL(wlcore_remove); 6827 6828 u32 wl12xx_debug_level = DEBUG_NONE; 6829 EXPORT_SYMBOL_GPL(wl12xx_debug_level); 6830 module_param_named(debug_level, wl12xx_debug_level, uint, 0600); 6831 MODULE_PARM_DESC(debug_level, "wl12xx debugging level"); 6832 6833 module_param_named(fwlog, fwlog_param, charp, 0); 6834 MODULE_PARM_DESC(fwlog, 6835 "FW logger options: continuous, dbgpins or disable"); 6836 6837 module_param(fwlog_mem_blocks, int, 0600); 6838 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks"); 6839 6840 module_param(bug_on_recovery, int, 0600); 6841 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery"); 6842 6843 module_param(no_recovery, int, 0600); 6844 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck."); 6845 6846 MODULE_DESCRIPTION("TI WLAN core driver"); 6847 MODULE_LICENSE("GPL"); 6848 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); 6849 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 6850