1 /* 2 * This file is part of wl1271 3 * 4 * Copyright (C) 2009 Nokia Corporation 5 * 6 * Contact: Luciano Coelho <luciano.coelho@nokia.com> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * version 2 as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, but 13 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 20 * 02110-1301 USA 21 * 22 */ 23 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/etherdevice.h> 27 #include <linux/spinlock.h> 28 29 #include "wlcore.h" 30 #include "debug.h" 31 #include "io.h" 32 #include "ps.h" 33 #include "tx.h" 34 #include "event.h" 35 #include "hw_ops.h" 36 37 /* 38 * TODO: this is here just for now, it must be removed when the data 39 * operations are in place. 40 */ 41 #include "../wl12xx/reg.h" 42 43 static int wl1271_set_default_wep_key(struct wl1271 *wl, 44 struct wl12xx_vif *wlvif, u8 id) 45 { 46 int ret; 47 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); 48 49 if (is_ap) 50 ret = wl12xx_cmd_set_default_wep_key(wl, id, 51 wlvif->ap.bcast_hlid); 52 else 53 ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid); 54 55 if (ret < 0) 56 return ret; 57 58 wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id); 59 return 0; 60 } 61 62 static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb) 63 { 64 int id; 65 66 id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc); 67 if (id >= wl->num_tx_desc) 68 return -EBUSY; 69 70 __set_bit(id, wl->tx_frames_map); 71 wl->tx_frames[id] = skb; 72 wl->tx_frames_cnt++; 73 return id; 74 } 75 76 void wl1271_free_tx_id(struct wl1271 *wl, int id) 77 { 78 if (__test_and_clear_bit(id, wl->tx_frames_map)) { 79 if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc)) 80 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); 81 82 wl->tx_frames[id] = NULL; 83 wl->tx_frames_cnt--; 84 } 85 } 86 EXPORT_SYMBOL(wl1271_free_tx_id); 87 88 static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl, 89 struct wl12xx_vif *wlvif, 90 struct sk_buff *skb) 91 { 92 struct ieee80211_hdr *hdr; 93 94 hdr = (struct ieee80211_hdr *)(skb->data + 95 sizeof(struct wl1271_tx_hw_descr)); 96 if (!ieee80211_is_auth(hdr->frame_control)) 97 return; 98 99 /* 100 * add the station to the known list before transmitting the 101 * authentication response. this way it won't get de-authed by FW 102 * when transmitting too soon. 103 */ 104 wl1271_acx_set_inconnection_sta(wl, wlvif, hdr->addr1); 105 106 /* 107 * ROC for 1 second on the AP channel for completing the connection. 108 * Note the ROC will be continued by the update_sta_state callbacks 109 * once the station reaches the associated state. 110 */ 111 wlcore_update_inconn_sta(wl, wlvif, NULL, true); 112 wlvif->pending_auth_reply_time = jiffies; 113 cancel_delayed_work(&wlvif->pending_auth_complete_work); 114 ieee80211_queue_delayed_work(wl->hw, 115 &wlvif->pending_auth_complete_work, 116 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT)); 117 } 118 119 static void wl1271_tx_regulate_link(struct wl1271 *wl, 120 struct wl12xx_vif *wlvif, 121 u8 hlid) 122 { 123 bool fw_ps; 124 u8 tx_pkts; 125 126 if (WARN_ON(!test_bit(hlid, wlvif->links_map))) 127 return; 128 129 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map); 130 tx_pkts = wl->links[hlid].allocated_pkts; 131 132 /* 133 * if in FW PS and there is enough data in FW we can put the link 134 * into high-level PS and clean out its TX queues. 135 * Make an exception if this is the only connected link. In this 136 * case FW-memory congestion is less of a problem. 137 * Note that a single connected STA means 2*ap_count + 1 active links, 138 * since we must account for the global and broadcast AP links 139 * for each AP. The "fw_ps" check assures us the other link is a STA 140 * connected to the AP. Otherwise the FW would not set the PSM bit. 141 */ 142 if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps && 143 tx_pkts >= WL1271_PS_STA_MAX_PACKETS) 144 wl12xx_ps_link_start(wl, wlvif, hlid, true); 145 } 146 147 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb) 148 { 149 return wl->dummy_packet == skb; 150 } 151 EXPORT_SYMBOL(wl12xx_is_dummy_packet); 152 153 static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, 154 struct sk_buff *skb, struct ieee80211_sta *sta) 155 { 156 if (sta) { 157 struct wl1271_station *wl_sta; 158 159 wl_sta = (struct wl1271_station *)sta->drv_priv; 160 return wl_sta->hlid; 161 } else { 162 struct ieee80211_hdr *hdr; 163 164 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) 165 return wl->system_hlid; 166 167 hdr = (struct ieee80211_hdr *)skb->data; 168 if (is_multicast_ether_addr(ieee80211_get_DA(hdr))) 169 return wlvif->ap.bcast_hlid; 170 else 171 return wlvif->ap.global_hlid; 172 } 173 } 174 175 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, 176 struct sk_buff *skb, struct ieee80211_sta *sta) 177 { 178 struct ieee80211_tx_info *control; 179 180 if (wlvif->bss_type == BSS_TYPE_AP_BSS) 181 return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta); 182 183 control = IEEE80211_SKB_CB(skb); 184 if (control->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { 185 wl1271_debug(DEBUG_TX, "tx offchannel"); 186 return wlvif->dev_hlid; 187 } 188 189 return wlvif->sta.hlid; 190 } 191 192 unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl, 193 unsigned int packet_length) 194 { 195 if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) || 196 !(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN)) 197 return ALIGN(packet_length, WL1271_TX_ALIGN_TO); 198 else 199 return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE); 200 } 201 EXPORT_SYMBOL(wlcore_calc_packet_alignment); 202 203 static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, 204 struct sk_buff *skb, u32 extra, u32 buf_offset, 205 u8 hlid, bool is_gem) 206 { 207 struct wl1271_tx_hw_descr *desc; 208 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; 209 u32 total_blocks; 210 int id, ret = -EBUSY, ac; 211 u32 spare_blocks; 212 213 if (buf_offset + total_len > wl->aggr_buf_size) 214 return -EAGAIN; 215 216 spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem); 217 218 /* allocate free identifier for the packet */ 219 id = wl1271_alloc_tx_id(wl, skb); 220 if (id < 0) 221 return id; 222 223 total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks); 224 225 if (total_blocks <= wl->tx_blocks_available) { 226 desc = skb_push(skb, total_len - skb->len); 227 228 wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks, 229 spare_blocks); 230 231 desc->id = id; 232 233 wl->tx_blocks_available -= total_blocks; 234 wl->tx_allocated_blocks += total_blocks; 235 236 /* 237 * If the FW was empty before, arm the Tx watchdog. Also do 238 * this on the first Tx after resume, as we always cancel the 239 * watchdog on suspend. 240 */ 241 if (wl->tx_allocated_blocks == total_blocks || 242 test_and_clear_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags)) 243 wl12xx_rearm_tx_watchdog_locked(wl); 244 245 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 246 wl->tx_allocated_pkts[ac]++; 247 248 if (test_bit(hlid, wl->links_map)) 249 wl->links[hlid].allocated_pkts++; 250 251 ret = 0; 252 253 wl1271_debug(DEBUG_TX, 254 "tx_allocate: size: %d, blocks: %d, id: %d", 255 total_len, total_blocks, id); 256 } else { 257 wl1271_free_tx_id(wl, id); 258 } 259 260 return ret; 261 } 262 263 static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, 264 struct sk_buff *skb, u32 extra, 265 struct ieee80211_tx_info *control, u8 hlid) 266 { 267 struct timespec ts; 268 struct wl1271_tx_hw_descr *desc; 269 int ac, rate_idx; 270 s64 hosttime; 271 u16 tx_attr = 0; 272 __le16 frame_control; 273 struct ieee80211_hdr *hdr; 274 u8 *frame_start; 275 bool is_dummy; 276 277 desc = (struct wl1271_tx_hw_descr *) skb->data; 278 frame_start = (u8 *)(desc + 1); 279 hdr = (struct ieee80211_hdr *)(frame_start + extra); 280 frame_control = hdr->frame_control; 281 282 /* relocate space for security header */ 283 if (extra) { 284 int hdrlen = ieee80211_hdrlen(frame_control); 285 memmove(frame_start, hdr, hdrlen); 286 skb_set_network_header(skb, skb_network_offset(skb) + extra); 287 } 288 289 /* configure packet life time */ 290 getnstimeofday(&ts); 291 hosttime = (timespec_to_ns(&ts) >> 10); 292 desc->start_time = cpu_to_le32(hosttime - wl->time_offset); 293 294 is_dummy = wl12xx_is_dummy_packet(wl, skb); 295 if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS) 296 desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU); 297 else 298 desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU); 299 300 /* queue */ 301 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 302 desc->tid = skb->priority; 303 304 if (is_dummy) { 305 /* 306 * FW expects the dummy packet to have an invalid session id - 307 * any session id that is different than the one set in the join 308 */ 309 tx_attr = (SESSION_COUNTER_INVALID << 310 TX_HW_ATTR_OFST_SESSION_COUNTER) & 311 TX_HW_ATTR_SESSION_COUNTER; 312 313 tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ; 314 } else if (wlvif) { 315 u8 session_id = wl->session_ids[hlid]; 316 317 if ((wl->quirks & WLCORE_QUIRK_AP_ZERO_SESSION_ID) && 318 (wlvif->bss_type == BSS_TYPE_AP_BSS)) 319 session_id = 0; 320 321 /* configure the tx attributes */ 322 tx_attr = session_id << TX_HW_ATTR_OFST_SESSION_COUNTER; 323 } 324 325 desc->hlid = hlid; 326 if (is_dummy || !wlvif) 327 rate_idx = 0; 328 else if (wlvif->bss_type != BSS_TYPE_AP_BSS) { 329 /* 330 * if the packets are data packets 331 * send them with AP rate policies (EAPOLs are an exception), 332 * otherwise use default basic rates 333 */ 334 if (skb->protocol == cpu_to_be16(ETH_P_PAE)) 335 rate_idx = wlvif->sta.basic_rate_idx; 336 else if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE) 337 rate_idx = wlvif->sta.p2p_rate_idx; 338 else if (ieee80211_is_data(frame_control)) 339 rate_idx = wlvif->sta.ap_rate_idx; 340 else 341 rate_idx = wlvif->sta.basic_rate_idx; 342 } else { 343 if (hlid == wlvif->ap.global_hlid) 344 rate_idx = wlvif->ap.mgmt_rate_idx; 345 else if (hlid == wlvif->ap.bcast_hlid || 346 skb->protocol == cpu_to_be16(ETH_P_PAE) || 347 !ieee80211_is_data(frame_control)) 348 /* 349 * send non-data, bcast and EAPOLs using the 350 * min basic rate 351 */ 352 rate_idx = wlvif->ap.bcast_rate_idx; 353 else 354 rate_idx = wlvif->ap.ucast_rate_idx[ac]; 355 } 356 357 tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY; 358 359 /* for WEP shared auth - no fw encryption is needed */ 360 if (ieee80211_is_auth(frame_control) && 361 ieee80211_has_protected(frame_control)) 362 tx_attr |= TX_HW_ATTR_HOST_ENCRYPT; 363 364 /* send EAPOL frames as voice */ 365 if (control->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) 366 tx_attr |= TX_HW_ATTR_EAPOL_FRAME; 367 368 desc->tx_attr = cpu_to_le16(tx_attr); 369 370 wlcore_hw_set_tx_desc_csum(wl, desc, skb); 371 wlcore_hw_set_tx_desc_data_len(wl, desc, skb); 372 } 373 374 /* caller must hold wl->mutex */ 375 static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, 376 struct sk_buff *skb, u32 buf_offset, u8 hlid) 377 { 378 struct ieee80211_tx_info *info; 379 u32 extra = 0; 380 int ret = 0; 381 u32 total_len; 382 bool is_dummy; 383 bool is_gem = false; 384 385 if (!skb) { 386 wl1271_error("discarding null skb"); 387 return -EINVAL; 388 } 389 390 if (hlid == WL12XX_INVALID_LINK_ID) { 391 wl1271_error("invalid hlid. dropping skb 0x%p", skb); 392 return -EINVAL; 393 } 394 395 info = IEEE80211_SKB_CB(skb); 396 397 is_dummy = wl12xx_is_dummy_packet(wl, skb); 398 399 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && 400 info->control.hw_key && 401 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) 402 extra = WL1271_EXTRA_SPACE_TKIP; 403 404 if (info->control.hw_key) { 405 bool is_wep; 406 u8 idx = info->control.hw_key->hw_key_idx; 407 u32 cipher = info->control.hw_key->cipher; 408 409 is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) || 410 (cipher == WLAN_CIPHER_SUITE_WEP104); 411 412 if (WARN_ON(is_wep && wlvif && wlvif->default_key != idx)) { 413 ret = wl1271_set_default_wep_key(wl, wlvif, idx); 414 if (ret < 0) 415 return ret; 416 wlvif->default_key = idx; 417 } 418 419 is_gem = (cipher == WL1271_CIPHER_SUITE_GEM); 420 } 421 422 ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid, 423 is_gem); 424 if (ret < 0) 425 return ret; 426 427 wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid); 428 429 if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) { 430 wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb); 431 wl1271_tx_regulate_link(wl, wlvif, hlid); 432 } 433 434 /* 435 * The length of each packet is stored in terms of 436 * words. Thus, we must pad the skb data to make sure its 437 * length is aligned. The number of padding bytes is computed 438 * and set in wl1271_tx_fill_hdr. 439 * In special cases, we want to align to a specific block size 440 * (eg. for wl128x with SDIO we align to 256). 441 */ 442 total_len = wlcore_calc_packet_alignment(wl, skb->len); 443 444 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len); 445 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); 446 447 /* Revert side effects in the dummy packet skb, so it can be reused */ 448 if (is_dummy) 449 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); 450 451 return total_len; 452 } 453 454 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, 455 enum nl80211_band rate_band) 456 { 457 struct ieee80211_supported_band *band; 458 u32 enabled_rates = 0; 459 int bit; 460 461 band = wl->hw->wiphy->bands[rate_band]; 462 for (bit = 0; bit < band->n_bitrates; bit++) { 463 if (rate_set & 0x1) 464 enabled_rates |= band->bitrates[bit].hw_value; 465 rate_set >>= 1; 466 } 467 468 /* MCS rates indication are on bits 16 - 31 */ 469 rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates; 470 471 for (bit = 0; bit < 16; bit++) { 472 if (rate_set & 0x1) 473 enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit); 474 rate_set >>= 1; 475 } 476 477 return enabled_rates; 478 } 479 480 void wl1271_handle_tx_low_watermark(struct wl1271 *wl) 481 { 482 int i; 483 struct wl12xx_vif *wlvif; 484 485 wl12xx_for_each_wlvif(wl, wlvif) { 486 for (i = 0; i < NUM_TX_QUEUES; i++) { 487 if (wlcore_is_queue_stopped_by_reason(wl, wlvif, i, 488 WLCORE_QUEUE_STOP_REASON_WATERMARK) && 489 wlvif->tx_queue_count[i] <= 490 WL1271_TX_QUEUE_LOW_WATERMARK) 491 /* firmware buffer has space, restart queues */ 492 wlcore_wake_queue(wl, wlvif, i, 493 WLCORE_QUEUE_STOP_REASON_WATERMARK); 494 } 495 } 496 } 497 498 static int wlcore_select_ac(struct wl1271 *wl) 499 { 500 int i, q = -1, ac; 501 u32 min_pkts = 0xffffffff; 502 503 /* 504 * Find a non-empty ac where: 505 * 1. There are packets to transmit 506 * 2. The FW has the least allocated blocks 507 * 508 * We prioritize the ACs according to VO>VI>BE>BK 509 */ 510 for (i = 0; i < NUM_TX_QUEUES; i++) { 511 ac = wl1271_tx_get_queue(i); 512 if (wl->tx_queue_count[ac] && 513 wl->tx_allocated_pkts[ac] < min_pkts) { 514 q = ac; 515 min_pkts = wl->tx_allocated_pkts[q]; 516 } 517 } 518 519 return q; 520 } 521 522 static struct sk_buff *wlcore_lnk_dequeue(struct wl1271 *wl, 523 struct wl1271_link *lnk, u8 q) 524 { 525 struct sk_buff *skb; 526 unsigned long flags; 527 528 skb = skb_dequeue(&lnk->tx_queue[q]); 529 if (skb) { 530 spin_lock_irqsave(&wl->wl_lock, flags); 531 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); 532 wl->tx_queue_count[q]--; 533 if (lnk->wlvif) { 534 WARN_ON_ONCE(lnk->wlvif->tx_queue_count[q] <= 0); 535 lnk->wlvif->tx_queue_count[q]--; 536 } 537 spin_unlock_irqrestore(&wl->wl_lock, flags); 538 } 539 540 return skb; 541 } 542 543 static struct sk_buff *wlcore_lnk_dequeue_high_prio(struct wl1271 *wl, 544 u8 hlid, u8 ac, 545 u8 *low_prio_hlid) 546 { 547 struct wl1271_link *lnk = &wl->links[hlid]; 548 549 if (!wlcore_hw_lnk_high_prio(wl, hlid, lnk)) { 550 if (*low_prio_hlid == WL12XX_INVALID_LINK_ID && 551 !skb_queue_empty(&lnk->tx_queue[ac]) && 552 wlcore_hw_lnk_low_prio(wl, hlid, lnk)) 553 /* we found the first non-empty low priority queue */ 554 *low_prio_hlid = hlid; 555 556 return NULL; 557 } 558 559 return wlcore_lnk_dequeue(wl, lnk, ac); 560 } 561 562 static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl, 563 struct wl12xx_vif *wlvif, 564 u8 ac, u8 *hlid, 565 u8 *low_prio_hlid) 566 { 567 struct sk_buff *skb = NULL; 568 int i, h, start_hlid; 569 570 /* start from the link after the last one */ 571 start_hlid = (wlvif->last_tx_hlid + 1) % wl->num_links; 572 573 /* dequeue according to AC, round robin on each link */ 574 for (i = 0; i < wl->num_links; i++) { 575 h = (start_hlid + i) % wl->num_links; 576 577 /* only consider connected stations */ 578 if (!test_bit(h, wlvif->links_map)) 579 continue; 580 581 skb = wlcore_lnk_dequeue_high_prio(wl, h, ac, 582 low_prio_hlid); 583 if (!skb) 584 continue; 585 586 wlvif->last_tx_hlid = h; 587 break; 588 } 589 590 if (!skb) 591 wlvif->last_tx_hlid = 0; 592 593 *hlid = wlvif->last_tx_hlid; 594 return skb; 595 } 596 597 static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid) 598 { 599 unsigned long flags; 600 struct wl12xx_vif *wlvif = wl->last_wlvif; 601 struct sk_buff *skb = NULL; 602 int ac; 603 u8 low_prio_hlid = WL12XX_INVALID_LINK_ID; 604 605 ac = wlcore_select_ac(wl); 606 if (ac < 0) 607 goto out; 608 609 /* continue from last wlvif (round robin) */ 610 if (wlvif) { 611 wl12xx_for_each_wlvif_continue(wl, wlvif) { 612 if (!wlvif->tx_queue_count[ac]) 613 continue; 614 615 skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid, 616 &low_prio_hlid); 617 if (!skb) 618 continue; 619 620 wl->last_wlvif = wlvif; 621 break; 622 } 623 } 624 625 /* dequeue from the system HLID before the restarting wlvif list */ 626 if (!skb) { 627 skb = wlcore_lnk_dequeue_high_prio(wl, wl->system_hlid, 628 ac, &low_prio_hlid); 629 if (skb) { 630 *hlid = wl->system_hlid; 631 wl->last_wlvif = NULL; 632 } 633 } 634 635 /* Do a new pass over the wlvif list. But no need to continue 636 * after last_wlvif. The previous pass should have found it. */ 637 if (!skb) { 638 wl12xx_for_each_wlvif(wl, wlvif) { 639 if (!wlvif->tx_queue_count[ac]) 640 goto next; 641 642 skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid, 643 &low_prio_hlid); 644 if (skb) { 645 wl->last_wlvif = wlvif; 646 break; 647 } 648 649 next: 650 if (wlvif == wl->last_wlvif) 651 break; 652 } 653 } 654 655 /* no high priority skbs found - but maybe a low priority one? */ 656 if (!skb && low_prio_hlid != WL12XX_INVALID_LINK_ID) { 657 struct wl1271_link *lnk = &wl->links[low_prio_hlid]; 658 skb = wlcore_lnk_dequeue(wl, lnk, ac); 659 660 WARN_ON(!skb); /* we checked this before */ 661 *hlid = low_prio_hlid; 662 663 /* ensure proper round robin in the vif/link levels */ 664 wl->last_wlvif = lnk->wlvif; 665 if (lnk->wlvif) 666 lnk->wlvif->last_tx_hlid = low_prio_hlid; 667 668 } 669 670 out: 671 if (!skb && 672 test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { 673 int q; 674 675 skb = wl->dummy_packet; 676 *hlid = wl->system_hlid; 677 q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 678 spin_lock_irqsave(&wl->wl_lock, flags); 679 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); 680 wl->tx_queue_count[q]--; 681 spin_unlock_irqrestore(&wl->wl_lock, flags); 682 } 683 684 return skb; 685 } 686 687 static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif, 688 struct sk_buff *skb, u8 hlid) 689 { 690 unsigned long flags; 691 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 692 693 if (wl12xx_is_dummy_packet(wl, skb)) { 694 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); 695 } else { 696 skb_queue_head(&wl->links[hlid].tx_queue[q], skb); 697 698 /* make sure we dequeue the same packet next time */ 699 wlvif->last_tx_hlid = (hlid + wl->num_links - 1) % 700 wl->num_links; 701 } 702 703 spin_lock_irqsave(&wl->wl_lock, flags); 704 wl->tx_queue_count[q]++; 705 if (wlvif) 706 wlvif->tx_queue_count[q]++; 707 spin_unlock_irqrestore(&wl->wl_lock, flags); 708 } 709 710 static bool wl1271_tx_is_data_present(struct sk_buff *skb) 711 { 712 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); 713 714 return ieee80211_is_data_present(hdr->frame_control); 715 } 716 717 void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids) 718 { 719 struct wl12xx_vif *wlvif; 720 u32 timeout; 721 u8 hlid; 722 723 if (!wl->conf.rx_streaming.interval) 724 return; 725 726 if (!wl->conf.rx_streaming.always && 727 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)) 728 return; 729 730 timeout = wl->conf.rx_streaming.duration; 731 wl12xx_for_each_wlvif_sta(wl, wlvif) { 732 bool found = false; 733 for_each_set_bit(hlid, active_hlids, wl->num_links) { 734 if (test_bit(hlid, wlvif->links_map)) { 735 found = true; 736 break; 737 } 738 } 739 740 if (!found) 741 continue; 742 743 /* enable rx streaming */ 744 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) 745 ieee80211_queue_work(wl->hw, 746 &wlvif->rx_streaming_enable_work); 747 748 mod_timer(&wlvif->rx_streaming_timer, 749 jiffies + msecs_to_jiffies(timeout)); 750 } 751 } 752 753 /* 754 * Returns failure values only in case of failed bus ops within this function. 755 * wl1271_prepare_tx_frame retvals won't be returned in order to avoid 756 * triggering recovery by higher layers when not necessary. 757 * In case a FW command fails within wl1271_prepare_tx_frame fails a recovery 758 * will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame 759 * can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING 760 * within prepare_tx_frame code but there's nothing we should do about those 761 * as well. 762 */ 763 int wlcore_tx_work_locked(struct wl1271 *wl) 764 { 765 struct wl12xx_vif *wlvif; 766 struct sk_buff *skb; 767 struct wl1271_tx_hw_descr *desc; 768 u32 buf_offset = 0, last_len = 0; 769 bool sent_packets = false; 770 unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0}; 771 int ret = 0; 772 int bus_ret = 0; 773 u8 hlid; 774 775 if (unlikely(wl->state != WLCORE_STATE_ON)) 776 return 0; 777 778 while ((skb = wl1271_skb_dequeue(wl, &hlid))) { 779 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 780 bool has_data = false; 781 782 wlvif = NULL; 783 if (!wl12xx_is_dummy_packet(wl, skb)) 784 wlvif = wl12xx_vif_to_data(info->control.vif); 785 else 786 hlid = wl->system_hlid; 787 788 has_data = wlvif && wl1271_tx_is_data_present(skb); 789 ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset, 790 hlid); 791 if (ret == -EAGAIN) { 792 /* 793 * Aggregation buffer is full. 794 * Flush buffer and try again. 795 */ 796 wl1271_skb_queue_head(wl, wlvif, skb, hlid); 797 798 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, 799 last_len); 800 bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, 801 wl->aggr_buf, buf_offset, true); 802 if (bus_ret < 0) 803 goto out; 804 805 sent_packets = true; 806 buf_offset = 0; 807 continue; 808 } else if (ret == -EBUSY) { 809 /* 810 * Firmware buffer is full. 811 * Queue back last skb, and stop aggregating. 812 */ 813 wl1271_skb_queue_head(wl, wlvif, skb, hlid); 814 /* No work left, avoid scheduling redundant tx work */ 815 set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); 816 goto out_ack; 817 } else if (ret < 0) { 818 if (wl12xx_is_dummy_packet(wl, skb)) 819 /* 820 * fw still expects dummy packet, 821 * so re-enqueue it 822 */ 823 wl1271_skb_queue_head(wl, wlvif, skb, hlid); 824 else 825 ieee80211_free_txskb(wl->hw, skb); 826 goto out_ack; 827 } 828 last_len = ret; 829 buf_offset += last_len; 830 wl->tx_packets_count++; 831 if (has_data) { 832 desc = (struct wl1271_tx_hw_descr *) skb->data; 833 __set_bit(desc->hlid, active_hlids); 834 } 835 } 836 837 out_ack: 838 if (buf_offset) { 839 buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len); 840 bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, 841 buf_offset, true); 842 if (bus_ret < 0) 843 goto out; 844 845 sent_packets = true; 846 } 847 if (sent_packets) { 848 /* 849 * Interrupt the firmware with the new packets. This is only 850 * required for older hardware revisions 851 */ 852 if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) { 853 bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS, 854 wl->tx_packets_count); 855 if (bus_ret < 0) 856 goto out; 857 } 858 859 wl1271_handle_tx_low_watermark(wl); 860 } 861 wl12xx_rearm_rx_streaming(wl, active_hlids); 862 863 out: 864 return bus_ret; 865 } 866 867 void wl1271_tx_work(struct work_struct *work) 868 { 869 struct wl1271 *wl = container_of(work, struct wl1271, tx_work); 870 int ret; 871 872 mutex_lock(&wl->mutex); 873 ret = wl1271_ps_elp_wakeup(wl); 874 if (ret < 0) 875 goto out; 876 877 ret = wlcore_tx_work_locked(wl); 878 if (ret < 0) { 879 wl12xx_queue_recovery_work(wl); 880 goto out; 881 } 882 883 wl1271_ps_elp_sleep(wl); 884 out: 885 mutex_unlock(&wl->mutex); 886 } 887 888 static u8 wl1271_tx_get_rate_flags(u8 rate_class_index) 889 { 890 u8 flags = 0; 891 892 /* 893 * TODO: use wl12xx constants when this code is moved to wl12xx, as 894 * only it uses Tx-completion. 895 */ 896 if (rate_class_index <= 8) 897 flags |= IEEE80211_TX_RC_MCS; 898 899 /* 900 * TODO: use wl12xx constants when this code is moved to wl12xx, as 901 * only it uses Tx-completion. 902 */ 903 if (rate_class_index == 0) 904 flags |= IEEE80211_TX_RC_SHORT_GI; 905 906 return flags; 907 } 908 909 static void wl1271_tx_complete_packet(struct wl1271 *wl, 910 struct wl1271_tx_hw_res_descr *result) 911 { 912 struct ieee80211_tx_info *info; 913 struct ieee80211_vif *vif; 914 struct wl12xx_vif *wlvif; 915 struct sk_buff *skb; 916 int id = result->id; 917 int rate = -1; 918 u8 rate_flags = 0; 919 u8 retries = 0; 920 921 /* check for id legality */ 922 if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) { 923 wl1271_warning("TX result illegal id: %d", id); 924 return; 925 } 926 927 skb = wl->tx_frames[id]; 928 info = IEEE80211_SKB_CB(skb); 929 930 if (wl12xx_is_dummy_packet(wl, skb)) { 931 wl1271_free_tx_id(wl, id); 932 return; 933 } 934 935 /* info->control is valid as long as we don't update info->status */ 936 vif = info->control.vif; 937 wlvif = wl12xx_vif_to_data(vif); 938 939 /* update the TX status info */ 940 if (result->status == TX_SUCCESS) { 941 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) 942 info->flags |= IEEE80211_TX_STAT_ACK; 943 rate = wlcore_rate_to_idx(wl, result->rate_class_index, 944 wlvif->band); 945 rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index); 946 retries = result->ack_failures; 947 } else if (result->status == TX_RETRY_EXCEEDED) { 948 wl->stats.excessive_retries++; 949 retries = result->ack_failures; 950 } 951 952 info->status.rates[0].idx = rate; 953 info->status.rates[0].count = retries; 954 info->status.rates[0].flags = rate_flags; 955 info->status.ack_signal = -1; 956 957 wl->stats.retry_count += result->ack_failures; 958 959 /* remove private header from packet */ 960 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); 961 962 /* remove TKIP header space if present */ 963 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && 964 info->control.hw_key && 965 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { 966 int hdrlen = ieee80211_get_hdrlen_from_skb(skb); 967 memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data, 968 hdrlen); 969 skb_pull(skb, WL1271_EXTRA_SPACE_TKIP); 970 } 971 972 wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x" 973 " status 0x%x", 974 result->id, skb, result->ack_failures, 975 result->rate_class_index, result->status); 976 977 /* return the packet to the stack */ 978 skb_queue_tail(&wl->deferred_tx_queue, skb); 979 queue_work(wl->freezable_wq, &wl->netstack_work); 980 wl1271_free_tx_id(wl, result->id); 981 } 982 983 /* Called upon reception of a TX complete interrupt */ 984 int wlcore_tx_complete(struct wl1271 *wl) 985 { 986 struct wl1271_acx_mem_map *memmap = wl->target_mem_map; 987 u32 count, fw_counter; 988 u32 i; 989 int ret; 990 991 /* read the tx results from the chipset */ 992 ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result), 993 wl->tx_res_if, sizeof(*wl->tx_res_if), false); 994 if (ret < 0) 995 goto out; 996 997 fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter); 998 999 /* write host counter to chipset (to ack) */ 1000 ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) + 1001 offsetof(struct wl1271_tx_hw_res_if, 1002 tx_result_host_counter), fw_counter); 1003 if (ret < 0) 1004 goto out; 1005 1006 count = fw_counter - wl->tx_results_count; 1007 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count); 1008 1009 /* verify that the result buffer is not getting overrun */ 1010 if (unlikely(count > TX_HW_RESULT_QUEUE_LEN)) 1011 wl1271_warning("TX result overflow from chipset: %d", count); 1012 1013 /* process the results */ 1014 for (i = 0; i < count; i++) { 1015 struct wl1271_tx_hw_res_descr *result; 1016 u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK; 1017 1018 /* process the packet */ 1019 result = &(wl->tx_res_if->tx_results_queue[offset]); 1020 wl1271_tx_complete_packet(wl, result); 1021 1022 wl->tx_results_count++; 1023 } 1024 1025 out: 1026 return ret; 1027 } 1028 EXPORT_SYMBOL(wlcore_tx_complete); 1029 1030 void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) 1031 { 1032 struct sk_buff *skb; 1033 int i; 1034 unsigned long flags; 1035 struct ieee80211_tx_info *info; 1036 int total[NUM_TX_QUEUES]; 1037 struct wl1271_link *lnk = &wl->links[hlid]; 1038 1039 for (i = 0; i < NUM_TX_QUEUES; i++) { 1040 total[i] = 0; 1041 while ((skb = skb_dequeue(&lnk->tx_queue[i]))) { 1042 wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb); 1043 1044 if (!wl12xx_is_dummy_packet(wl, skb)) { 1045 info = IEEE80211_SKB_CB(skb); 1046 info->status.rates[0].idx = -1; 1047 info->status.rates[0].count = 0; 1048 ieee80211_tx_status_ni(wl->hw, skb); 1049 } 1050 1051 total[i]++; 1052 } 1053 } 1054 1055 spin_lock_irqsave(&wl->wl_lock, flags); 1056 for (i = 0; i < NUM_TX_QUEUES; i++) { 1057 wl->tx_queue_count[i] -= total[i]; 1058 if (lnk->wlvif) 1059 lnk->wlvif->tx_queue_count[i] -= total[i]; 1060 } 1061 spin_unlock_irqrestore(&wl->wl_lock, flags); 1062 1063 wl1271_handle_tx_low_watermark(wl); 1064 } 1065 1066 /* caller must hold wl->mutex and TX must be stopped */ 1067 void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif) 1068 { 1069 int i; 1070 1071 /* TX failure */ 1072 for_each_set_bit(i, wlvif->links_map, wl->num_links) { 1073 if (wlvif->bss_type == BSS_TYPE_AP_BSS && 1074 i != wlvif->ap.bcast_hlid && i != wlvif->ap.global_hlid) { 1075 /* this calls wl12xx_free_link */ 1076 wl1271_free_sta(wl, wlvif, i); 1077 } else { 1078 u8 hlid = i; 1079 wl12xx_free_link(wl, wlvif, &hlid); 1080 } 1081 } 1082 wlvif->last_tx_hlid = 0; 1083 1084 for (i = 0; i < NUM_TX_QUEUES; i++) 1085 wlvif->tx_queue_count[i] = 0; 1086 } 1087 /* caller must hold wl->mutex and TX must be stopped */ 1088 void wl12xx_tx_reset(struct wl1271 *wl) 1089 { 1090 int i; 1091 struct sk_buff *skb; 1092 struct ieee80211_tx_info *info; 1093 1094 /* only reset the queues if something bad happened */ 1095 if (wl1271_tx_total_queue_count(wl) != 0) { 1096 for (i = 0; i < wl->num_links; i++) 1097 wl1271_tx_reset_link_queues(wl, i); 1098 1099 for (i = 0; i < NUM_TX_QUEUES; i++) 1100 wl->tx_queue_count[i] = 0; 1101 } 1102 1103 /* 1104 * Make sure the driver is at a consistent state, in case this 1105 * function is called from a context other than interface removal. 1106 * This call will always wake the TX queues. 1107 */ 1108 wl1271_handle_tx_low_watermark(wl); 1109 1110 for (i = 0; i < wl->num_tx_desc; i++) { 1111 if (wl->tx_frames[i] == NULL) 1112 continue; 1113 1114 skb = wl->tx_frames[i]; 1115 wl1271_free_tx_id(wl, i); 1116 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); 1117 1118 if (!wl12xx_is_dummy_packet(wl, skb)) { 1119 /* 1120 * Remove private headers before passing the skb to 1121 * mac80211 1122 */ 1123 info = IEEE80211_SKB_CB(skb); 1124 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); 1125 if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) && 1126 info->control.hw_key && 1127 info->control.hw_key->cipher == 1128 WLAN_CIPHER_SUITE_TKIP) { 1129 int hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1130 memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, 1131 skb->data, hdrlen); 1132 skb_pull(skb, WL1271_EXTRA_SPACE_TKIP); 1133 } 1134 1135 info->status.rates[0].idx = -1; 1136 info->status.rates[0].count = 0; 1137 1138 ieee80211_tx_status_ni(wl->hw, skb); 1139 } 1140 } 1141 } 1142 1143 #define WL1271_TX_FLUSH_TIMEOUT 500000 1144 1145 /* caller must *NOT* hold wl->mutex */ 1146 void wl1271_tx_flush(struct wl1271 *wl) 1147 { 1148 unsigned long timeout, start_time; 1149 int i; 1150 start_time = jiffies; 1151 timeout = start_time + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT); 1152 1153 /* only one flush should be in progress, for consistent queue state */ 1154 mutex_lock(&wl->flush_mutex); 1155 1156 mutex_lock(&wl->mutex); 1157 if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) { 1158 mutex_unlock(&wl->mutex); 1159 goto out; 1160 } 1161 1162 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH); 1163 1164 while (!time_after(jiffies, timeout)) { 1165 wl1271_debug(DEBUG_MAC80211, "flushing tx buffer: %d %d", 1166 wl->tx_frames_cnt, 1167 wl1271_tx_total_queue_count(wl)); 1168 1169 /* force Tx and give the driver some time to flush data */ 1170 mutex_unlock(&wl->mutex); 1171 if (wl1271_tx_total_queue_count(wl)) 1172 wl1271_tx_work(&wl->tx_work); 1173 msleep(20); 1174 mutex_lock(&wl->mutex); 1175 1176 if ((wl->tx_frames_cnt == 0) && 1177 (wl1271_tx_total_queue_count(wl) == 0)) { 1178 wl1271_debug(DEBUG_MAC80211, "tx flush took %d ms", 1179 jiffies_to_msecs(jiffies - start_time)); 1180 goto out_wake; 1181 } 1182 } 1183 1184 wl1271_warning("Unable to flush all TX buffers, " 1185 "timed out (timeout %d ms", 1186 WL1271_TX_FLUSH_TIMEOUT / 1000); 1187 1188 /* forcibly flush all Tx buffers on our queues */ 1189 for (i = 0; i < wl->num_links; i++) 1190 wl1271_tx_reset_link_queues(wl, i); 1191 1192 out_wake: 1193 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH); 1194 mutex_unlock(&wl->mutex); 1195 out: 1196 mutex_unlock(&wl->flush_mutex); 1197 } 1198 EXPORT_SYMBOL_GPL(wl1271_tx_flush); 1199 1200 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set) 1201 { 1202 if (WARN_ON(!rate_set)) 1203 return 0; 1204 1205 return BIT(__ffs(rate_set)); 1206 } 1207 EXPORT_SYMBOL_GPL(wl1271_tx_min_rate_get); 1208 1209 void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif, 1210 u8 queue, enum wlcore_queue_stop_reason reason) 1211 { 1212 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); 1213 bool stopped = !!wl->queue_stop_reasons[hwq]; 1214 1215 /* queue should not be stopped for this reason */ 1216 WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq])); 1217 1218 if (stopped) 1219 return; 1220 1221 ieee80211_stop_queue(wl->hw, hwq); 1222 } 1223 1224 void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, 1225 enum wlcore_queue_stop_reason reason) 1226 { 1227 unsigned long flags; 1228 1229 spin_lock_irqsave(&wl->wl_lock, flags); 1230 wlcore_stop_queue_locked(wl, wlvif, queue, reason); 1231 spin_unlock_irqrestore(&wl->wl_lock, flags); 1232 } 1233 1234 void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, 1235 enum wlcore_queue_stop_reason reason) 1236 { 1237 unsigned long flags; 1238 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); 1239 1240 spin_lock_irqsave(&wl->wl_lock, flags); 1241 1242 /* queue should not be clear for this reason */ 1243 WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq])); 1244 1245 if (wl->queue_stop_reasons[hwq]) 1246 goto out; 1247 1248 ieee80211_wake_queue(wl->hw, hwq); 1249 1250 out: 1251 spin_unlock_irqrestore(&wl->wl_lock, flags); 1252 } 1253 1254 void wlcore_stop_queues(struct wl1271 *wl, 1255 enum wlcore_queue_stop_reason reason) 1256 { 1257 int i; 1258 unsigned long flags; 1259 1260 spin_lock_irqsave(&wl->wl_lock, flags); 1261 1262 /* mark all possible queues as stopped */ 1263 for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++) 1264 WARN_ON_ONCE(test_and_set_bit(reason, 1265 &wl->queue_stop_reasons[i])); 1266 1267 /* use the global version to make sure all vifs in mac80211 we don't 1268 * know are stopped. 1269 */ 1270 ieee80211_stop_queues(wl->hw); 1271 1272 spin_unlock_irqrestore(&wl->wl_lock, flags); 1273 } 1274 1275 void wlcore_wake_queues(struct wl1271 *wl, 1276 enum wlcore_queue_stop_reason reason) 1277 { 1278 int i; 1279 unsigned long flags; 1280 1281 spin_lock_irqsave(&wl->wl_lock, flags); 1282 1283 /* mark all possible queues as awake */ 1284 for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++) 1285 WARN_ON_ONCE(!test_and_clear_bit(reason, 1286 &wl->queue_stop_reasons[i])); 1287 1288 /* use the global version to make sure all vifs in mac80211 we don't 1289 * know are woken up. 1290 */ 1291 ieee80211_wake_queues(wl->hw); 1292 1293 spin_unlock_irqrestore(&wl->wl_lock, flags); 1294 } 1295 1296 bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, 1297 struct wl12xx_vif *wlvif, u8 queue, 1298 enum wlcore_queue_stop_reason reason) 1299 { 1300 unsigned long flags; 1301 bool stopped; 1302 1303 spin_lock_irqsave(&wl->wl_lock, flags); 1304 stopped = wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, queue, 1305 reason); 1306 spin_unlock_irqrestore(&wl->wl_lock, flags); 1307 1308 return stopped; 1309 } 1310 1311 bool wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl, 1312 struct wl12xx_vif *wlvif, u8 queue, 1313 enum wlcore_queue_stop_reason reason) 1314 { 1315 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); 1316 1317 assert_spin_locked(&wl->wl_lock); 1318 return test_bit(reason, &wl->queue_stop_reasons[hwq]); 1319 } 1320 1321 bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif, 1322 u8 queue) 1323 { 1324 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); 1325 1326 assert_spin_locked(&wl->wl_lock); 1327 return !!wl->queue_stop_reasons[hwq]; 1328 } 1329