1203c4805SLuis R. Rodriguez /* 25b68138eSSujith Manoharan * Copyright (c) 2008-2011 Atheros Communications Inc. 3203c4805SLuis R. Rodriguez * 4203c4805SLuis R. Rodriguez * Permission to use, copy, modify, and/or distribute this software for any 5203c4805SLuis R. Rodriguez * purpose with or without fee is hereby granted, provided that the above 6203c4805SLuis R. Rodriguez * copyright notice and this permission notice appear in all copies. 7203c4805SLuis R. Rodriguez * 8203c4805SLuis R. Rodriguez * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9203c4805SLuis R. Rodriguez * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10203c4805SLuis R. Rodriguez * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11203c4805SLuis R. Rodriguez * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12203c4805SLuis R. Rodriguez * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13203c4805SLuis R. Rodriguez * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14203c4805SLuis R. Rodriguez * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15203c4805SLuis R. Rodriguez */ 16203c4805SLuis R. Rodriguez 17b7f080cfSAlexey Dobriyan #include <linux/dma-mapping.h> 18203c4805SLuis R. Rodriguez #include "ath9k.h" 19b622a720SLuis R. Rodriguez #include "ar9003_mac.h" 20203c4805SLuis R. Rodriguez 21b5c80475SFelix Fietkau #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 22b5c80475SFelix Fietkau 23ededf1f8SVasanthakumar Thiagarajan static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 24ededf1f8SVasanthakumar Thiagarajan { 25ededf1f8SVasanthakumar Thiagarajan return sc->ps_enabled && 26ededf1f8SVasanthakumar Thiagarajan (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); 27ededf1f8SVasanthakumar Thiagarajan } 28ededf1f8SVasanthakumar Thiagarajan 29203c4805SLuis R. Rodriguez /* 30203c4805SLuis R. Rodriguez * Setup and link descriptors. 31203c4805SLuis R. Rodriguez * 32203c4805SLuis R. Rodriguez * 11N: we can no longer afford to self link the last descriptor. 33203c4805SLuis R. Rodriguez * MAC acknowledges BA status as long as it copies frames to host 34203c4805SLuis R. Rodriguez * buffer (or rx fifo). This can incorrectly acknowledge packets 35203c4805SLuis R. Rodriguez * to a sender if last desc is self-linked. 36203c4805SLuis R. Rodriguez */ 37203c4805SLuis R. Rodriguez static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 38203c4805SLuis R. Rodriguez { 39203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 40cc861f74SLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(ah); 41203c4805SLuis R. Rodriguez struct ath_desc *ds; 42203c4805SLuis R. Rodriguez struct sk_buff *skb; 43203c4805SLuis R. Rodriguez 44203c4805SLuis R. Rodriguez ATH_RXBUF_RESET(bf); 45203c4805SLuis R. Rodriguez 46203c4805SLuis R. Rodriguez ds = bf->bf_desc; 47203c4805SLuis R. Rodriguez ds->ds_link = 0; /* link to null */ 48203c4805SLuis R. Rodriguez ds->ds_data = bf->bf_buf_addr; 49203c4805SLuis R. Rodriguez 50203c4805SLuis R. Rodriguez /* virtual addr of the beginning of the buffer. */ 51203c4805SLuis R. Rodriguez skb = bf->bf_mpdu; 529680e8a3SLuis R. Rodriguez BUG_ON(skb == NULL); 53203c4805SLuis R. Rodriguez ds->ds_vdata = skb->data; 54203c4805SLuis R. Rodriguez 55cc861f74SLuis R. Rodriguez /* 56cc861f74SLuis R. Rodriguez * setup rx descriptors. The rx_bufsize here tells the hardware 57203c4805SLuis R. Rodriguez * how much data it can DMA to us and that we are prepared 58cc861f74SLuis R. Rodriguez * to process 59cc861f74SLuis R. Rodriguez */ 60203c4805SLuis R. Rodriguez ath9k_hw_setuprxdesc(ah, ds, 61cc861f74SLuis R. Rodriguez common->rx_bufsize, 62203c4805SLuis R. Rodriguez 0); 63203c4805SLuis R. Rodriguez 64203c4805SLuis R. Rodriguez if (sc->rx.rxlink == NULL) 65203c4805SLuis R. Rodriguez ath9k_hw_putrxbuf(ah, bf->bf_daddr); 66203c4805SLuis R. Rodriguez else 67203c4805SLuis R. Rodriguez *sc->rx.rxlink = bf->bf_daddr; 68203c4805SLuis R. Rodriguez 69203c4805SLuis R. Rodriguez sc->rx.rxlink = &ds->ds_link; 70203c4805SLuis R. Rodriguez } 71203c4805SLuis R. Rodriguez 72203c4805SLuis R. Rodriguez static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 73203c4805SLuis R. Rodriguez { 74203c4805SLuis R. Rodriguez /* XXX block beacon interrupts */ 75203c4805SLuis R. Rodriguez ath9k_hw_setantenna(sc->sc_ah, antenna); 76203c4805SLuis R. Rodriguez sc->rx.defant = antenna; 77203c4805SLuis R. Rodriguez sc->rx.rxotherant = 0; 78203c4805SLuis R. Rodriguez } 79203c4805SLuis R. Rodriguez 80203c4805SLuis R. Rodriguez static void ath_opmode_init(struct ath_softc *sc) 81203c4805SLuis R. Rodriguez { 82203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 831510718dSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(ah); 841510718dSLuis R. Rodriguez 85203c4805SLuis R. Rodriguez u32 rfilt, mfilt[2]; 86203c4805SLuis R. Rodriguez 87203c4805SLuis R. Rodriguez /* configure rx filter */ 88203c4805SLuis R. Rodriguez rfilt = ath_calcrxfilter(sc); 89203c4805SLuis R. Rodriguez ath9k_hw_setrxfilter(ah, rfilt); 90203c4805SLuis R. Rodriguez 91203c4805SLuis R. Rodriguez /* configure bssid mask */ 9213b81559SLuis R. Rodriguez ath_hw_setbssidmask(common); 93203c4805SLuis R. Rodriguez 94203c4805SLuis R. Rodriguez /* configure operational mode */ 95203c4805SLuis R. Rodriguez ath9k_hw_setopmode(ah); 96203c4805SLuis R. Rodriguez 97203c4805SLuis R. Rodriguez /* calculate and install multicast filter */ 98203c4805SLuis R. Rodriguez mfilt[0] = mfilt[1] = ~0; 99203c4805SLuis R. Rodriguez ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 100203c4805SLuis R. Rodriguez } 101203c4805SLuis R. Rodriguez 102b5c80475SFelix Fietkau static bool ath_rx_edma_buf_link(struct ath_softc *sc, 103b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype) 104b5c80475SFelix Fietkau { 105b5c80475SFelix Fietkau struct ath_hw *ah = sc->sc_ah; 106b5c80475SFelix Fietkau struct ath_rx_edma *rx_edma; 107b5c80475SFelix Fietkau struct sk_buff *skb; 108b5c80475SFelix Fietkau struct ath_buf *bf; 109b5c80475SFelix Fietkau 110b5c80475SFelix Fietkau rx_edma = &sc->rx.rx_edma[qtype]; 111b5c80475SFelix Fietkau if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) 112b5c80475SFelix Fietkau return false; 113b5c80475SFelix Fietkau 114b5c80475SFelix Fietkau bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 115b5c80475SFelix Fietkau list_del_init(&bf->list); 116b5c80475SFelix Fietkau 117b5c80475SFelix Fietkau skb = bf->bf_mpdu; 118b5c80475SFelix Fietkau 119b5c80475SFelix Fietkau ATH_RXBUF_RESET(bf); 120b5c80475SFelix Fietkau memset(skb->data, 0, ah->caps.rx_status_len); 121b5c80475SFelix Fietkau dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 122b5c80475SFelix Fietkau ah->caps.rx_status_len, DMA_TO_DEVICE); 123b5c80475SFelix Fietkau 124b5c80475SFelix Fietkau SKB_CB_ATHBUF(skb) = bf; 125b5c80475SFelix Fietkau ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); 126b5c80475SFelix Fietkau skb_queue_tail(&rx_edma->rx_fifo, skb); 127b5c80475SFelix Fietkau 128b5c80475SFelix Fietkau return true; 129b5c80475SFelix Fietkau } 130b5c80475SFelix Fietkau 131b5c80475SFelix Fietkau static void ath_rx_addbuffer_edma(struct ath_softc *sc, 132b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype, int size) 133b5c80475SFelix Fietkau { 134b5c80475SFelix Fietkau struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1356a01f0c0SMohammed Shafi Shajakhan struct ath_buf *bf, *tbf; 136b5c80475SFelix Fietkau 137b5c80475SFelix Fietkau if (list_empty(&sc->rx.rxbuf)) { 138d2182b69SJoe Perches ath_dbg(common, QUEUE, "No free rx buf available\n"); 139b5c80475SFelix Fietkau return; 140b5c80475SFelix Fietkau } 141b5c80475SFelix Fietkau 1426a01f0c0SMohammed Shafi Shajakhan list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) 143b5c80475SFelix Fietkau if (!ath_rx_edma_buf_link(sc, qtype)) 144b5c80475SFelix Fietkau break; 145b5c80475SFelix Fietkau 146b5c80475SFelix Fietkau } 147b5c80475SFelix Fietkau 148b5c80475SFelix Fietkau static void ath_rx_remove_buffer(struct ath_softc *sc, 149b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype) 150b5c80475SFelix Fietkau { 151b5c80475SFelix Fietkau struct ath_buf *bf; 152b5c80475SFelix Fietkau struct ath_rx_edma *rx_edma; 153b5c80475SFelix Fietkau struct sk_buff *skb; 154b5c80475SFelix Fietkau 155b5c80475SFelix Fietkau rx_edma = &sc->rx.rx_edma[qtype]; 156b5c80475SFelix Fietkau 157b5c80475SFelix Fietkau while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { 158b5c80475SFelix Fietkau bf = SKB_CB_ATHBUF(skb); 159b5c80475SFelix Fietkau BUG_ON(!bf); 160b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 161b5c80475SFelix Fietkau } 162b5c80475SFelix Fietkau } 163b5c80475SFelix Fietkau 164b5c80475SFelix Fietkau static void ath_rx_edma_cleanup(struct ath_softc *sc) 165b5c80475SFelix Fietkau { 166ba542385SMohammed Shafi Shajakhan struct ath_hw *ah = sc->sc_ah; 167ba542385SMohammed Shafi Shajakhan struct ath_common *common = ath9k_hw_common(ah); 168b5c80475SFelix Fietkau struct ath_buf *bf; 169b5c80475SFelix Fietkau 170b5c80475SFelix Fietkau ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 171b5c80475SFelix Fietkau ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 172b5c80475SFelix Fietkau 173b5c80475SFelix Fietkau list_for_each_entry(bf, &sc->rx.rxbuf, list) { 174ba542385SMohammed Shafi Shajakhan if (bf->bf_mpdu) { 175ba542385SMohammed Shafi Shajakhan dma_unmap_single(sc->dev, bf->bf_buf_addr, 176ba542385SMohammed Shafi Shajakhan common->rx_bufsize, 177ba542385SMohammed Shafi Shajakhan DMA_BIDIRECTIONAL); 178b5c80475SFelix Fietkau dev_kfree_skb_any(bf->bf_mpdu); 179ba542385SMohammed Shafi Shajakhan bf->bf_buf_addr = 0; 180ba542385SMohammed Shafi Shajakhan bf->bf_mpdu = NULL; 181ba542385SMohammed Shafi Shajakhan } 182b5c80475SFelix Fietkau } 183b5c80475SFelix Fietkau 184b5c80475SFelix Fietkau INIT_LIST_HEAD(&sc->rx.rxbuf); 185b5c80475SFelix Fietkau 186b5c80475SFelix Fietkau kfree(sc->rx.rx_bufptr); 187b5c80475SFelix Fietkau sc->rx.rx_bufptr = NULL; 188b5c80475SFelix Fietkau } 189b5c80475SFelix Fietkau 190b5c80475SFelix Fietkau static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) 191b5c80475SFelix Fietkau { 192b5c80475SFelix Fietkau skb_queue_head_init(&rx_edma->rx_fifo); 193b5c80475SFelix Fietkau rx_edma->rx_fifo_hwsize = size; 194b5c80475SFelix Fietkau } 195b5c80475SFelix Fietkau 196b5c80475SFelix Fietkau static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) 197b5c80475SFelix Fietkau { 198b5c80475SFelix Fietkau struct ath_common *common = ath9k_hw_common(sc->sc_ah); 199b5c80475SFelix Fietkau struct ath_hw *ah = sc->sc_ah; 200b5c80475SFelix Fietkau struct sk_buff *skb; 201b5c80475SFelix Fietkau struct ath_buf *bf; 202b5c80475SFelix Fietkau int error = 0, i; 203b5c80475SFelix Fietkau u32 size; 204b5c80475SFelix Fietkau 205b5c80475SFelix Fietkau ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - 206b5c80475SFelix Fietkau ah->caps.rx_status_len); 207b5c80475SFelix Fietkau 208b5c80475SFelix Fietkau ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], 209b5c80475SFelix Fietkau ah->caps.rx_lp_qdepth); 210b5c80475SFelix Fietkau ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], 211b5c80475SFelix Fietkau ah->caps.rx_hp_qdepth); 212b5c80475SFelix Fietkau 213b5c80475SFelix Fietkau size = sizeof(struct ath_buf) * nbufs; 214b5c80475SFelix Fietkau bf = kzalloc(size, GFP_KERNEL); 215b5c80475SFelix Fietkau if (!bf) 216b5c80475SFelix Fietkau return -ENOMEM; 217b5c80475SFelix Fietkau 218b5c80475SFelix Fietkau INIT_LIST_HEAD(&sc->rx.rxbuf); 219b5c80475SFelix Fietkau sc->rx.rx_bufptr = bf; 220b5c80475SFelix Fietkau 221b5c80475SFelix Fietkau for (i = 0; i < nbufs; i++, bf++) { 222b5c80475SFelix Fietkau skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); 223b5c80475SFelix Fietkau if (!skb) { 224b5c80475SFelix Fietkau error = -ENOMEM; 225b5c80475SFelix Fietkau goto rx_init_fail; 226b5c80475SFelix Fietkau } 227b5c80475SFelix Fietkau 228b5c80475SFelix Fietkau memset(skb->data, 0, common->rx_bufsize); 229b5c80475SFelix Fietkau bf->bf_mpdu = skb; 230b5c80475SFelix Fietkau 231b5c80475SFelix Fietkau bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 232b5c80475SFelix Fietkau common->rx_bufsize, 233b5c80475SFelix Fietkau DMA_BIDIRECTIONAL); 234b5c80475SFelix Fietkau if (unlikely(dma_mapping_error(sc->dev, 235b5c80475SFelix Fietkau bf->bf_buf_addr))) { 236b5c80475SFelix Fietkau dev_kfree_skb_any(skb); 237b5c80475SFelix Fietkau bf->bf_mpdu = NULL; 2386cf9e995SBen Greear bf->bf_buf_addr = 0; 2393800276aSJoe Perches ath_err(common, 240b5c80475SFelix Fietkau "dma_mapping_error() on RX init\n"); 241b5c80475SFelix Fietkau error = -ENOMEM; 242b5c80475SFelix Fietkau goto rx_init_fail; 243b5c80475SFelix Fietkau } 244b5c80475SFelix Fietkau 245b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 246b5c80475SFelix Fietkau } 247b5c80475SFelix Fietkau 248b5c80475SFelix Fietkau return 0; 249b5c80475SFelix Fietkau 250b5c80475SFelix Fietkau rx_init_fail: 251b5c80475SFelix Fietkau ath_rx_edma_cleanup(sc); 252b5c80475SFelix Fietkau return error; 253b5c80475SFelix Fietkau } 254b5c80475SFelix Fietkau 255b5c80475SFelix Fietkau static void ath_edma_start_recv(struct ath_softc *sc) 256b5c80475SFelix Fietkau { 257b5c80475SFelix Fietkau spin_lock_bh(&sc->rx.rxbuflock); 258b5c80475SFelix Fietkau 259b5c80475SFelix Fietkau ath9k_hw_rxena(sc->sc_ah); 260b5c80475SFelix Fietkau 261b5c80475SFelix Fietkau ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, 262b5c80475SFelix Fietkau sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); 263b5c80475SFelix Fietkau 264b5c80475SFelix Fietkau ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, 265b5c80475SFelix Fietkau sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); 266b5c80475SFelix Fietkau 267b5c80475SFelix Fietkau ath_opmode_init(sc); 268b5c80475SFelix Fietkau 2694cb54fa3SSujith Manoharan ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); 2707583c550SLuis R. Rodriguez 2717583c550SLuis R. Rodriguez spin_unlock_bh(&sc->rx.rxbuflock); 272b5c80475SFelix Fietkau } 273b5c80475SFelix Fietkau 274b5c80475SFelix Fietkau static void ath_edma_stop_recv(struct ath_softc *sc) 275b5c80475SFelix Fietkau { 276b5c80475SFelix Fietkau ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 277b5c80475SFelix Fietkau ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 278b5c80475SFelix Fietkau } 279b5c80475SFelix Fietkau 280203c4805SLuis R. Rodriguez int ath_rx_init(struct ath_softc *sc, int nbufs) 281203c4805SLuis R. Rodriguez { 28227c51f1aSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(sc->sc_ah); 283203c4805SLuis R. Rodriguez struct sk_buff *skb; 284203c4805SLuis R. Rodriguez struct ath_buf *bf; 285203c4805SLuis R. Rodriguez int error = 0; 286203c4805SLuis R. Rodriguez 2874bdd1e97SLuis R. Rodriguez spin_lock_init(&sc->sc_pcu_lock); 288203c4805SLuis R. Rodriguez spin_lock_init(&sc->rx.rxbuflock); 289781b14a3SSujith Manoharan clear_bit(SC_OP_RXFLUSH, &sc->sc_flags); 290203c4805SLuis R. Rodriguez 2910d95521eSFelix Fietkau common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + 2920d95521eSFelix Fietkau sc->sc_ah->caps.rx_status_len; 2930d95521eSFelix Fietkau 294b5c80475SFelix Fietkau if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 295b5c80475SFelix Fietkau return ath_rx_edma_init(sc, nbufs); 296b5c80475SFelix Fietkau } else { 297d2182b69SJoe Perches ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n", 298cc861f74SLuis R. Rodriguez common->cachelsz, common->rx_bufsize); 299203c4805SLuis R. Rodriguez 300203c4805SLuis R. Rodriguez /* Initialize rx descriptors */ 301203c4805SLuis R. Rodriguez 302203c4805SLuis R. Rodriguez error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 3034adfcdedSVasanthakumar Thiagarajan "rx", nbufs, 1, 0); 304203c4805SLuis R. Rodriguez if (error != 0) { 3053800276aSJoe Perches ath_err(common, 306b5c80475SFelix Fietkau "failed to allocate rx descriptors: %d\n", 307b5c80475SFelix Fietkau error); 308203c4805SLuis R. Rodriguez goto err; 309203c4805SLuis R. Rodriguez } 310203c4805SLuis R. Rodriguez 311203c4805SLuis R. Rodriguez list_for_each_entry(bf, &sc->rx.rxbuf, list) { 312b5c80475SFelix Fietkau skb = ath_rxbuf_alloc(common, common->rx_bufsize, 313b5c80475SFelix Fietkau GFP_KERNEL); 314203c4805SLuis R. Rodriguez if (skb == NULL) { 315203c4805SLuis R. Rodriguez error = -ENOMEM; 316203c4805SLuis R. Rodriguez goto err; 317203c4805SLuis R. Rodriguez } 318203c4805SLuis R. Rodriguez 319203c4805SLuis R. Rodriguez bf->bf_mpdu = skb; 320203c4805SLuis R. Rodriguez bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 321cc861f74SLuis R. Rodriguez common->rx_bufsize, 322203c4805SLuis R. Rodriguez DMA_FROM_DEVICE); 323203c4805SLuis R. Rodriguez if (unlikely(dma_mapping_error(sc->dev, 324203c4805SLuis R. Rodriguez bf->bf_buf_addr))) { 325203c4805SLuis R. Rodriguez dev_kfree_skb_any(skb); 326203c4805SLuis R. Rodriguez bf->bf_mpdu = NULL; 3276cf9e995SBen Greear bf->bf_buf_addr = 0; 3283800276aSJoe Perches ath_err(common, 329203c4805SLuis R. Rodriguez "dma_mapping_error() on RX init\n"); 330203c4805SLuis R. Rodriguez error = -ENOMEM; 331203c4805SLuis R. Rodriguez goto err; 332203c4805SLuis R. Rodriguez } 333203c4805SLuis R. Rodriguez } 334203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 335b5c80475SFelix Fietkau } 336203c4805SLuis R. Rodriguez 337203c4805SLuis R. Rodriguez err: 338203c4805SLuis R. Rodriguez if (error) 339203c4805SLuis R. Rodriguez ath_rx_cleanup(sc); 340203c4805SLuis R. Rodriguez 341203c4805SLuis R. Rodriguez return error; 342203c4805SLuis R. Rodriguez } 343203c4805SLuis R. Rodriguez 344203c4805SLuis R. Rodriguez void ath_rx_cleanup(struct ath_softc *sc) 345203c4805SLuis R. Rodriguez { 346cc861f74SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 347cc861f74SLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(ah); 348203c4805SLuis R. Rodriguez struct sk_buff *skb; 349203c4805SLuis R. Rodriguez struct ath_buf *bf; 350203c4805SLuis R. Rodriguez 351b5c80475SFelix Fietkau if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 352b5c80475SFelix Fietkau ath_rx_edma_cleanup(sc); 353b5c80475SFelix Fietkau return; 354b5c80475SFelix Fietkau } else { 355203c4805SLuis R. Rodriguez list_for_each_entry(bf, &sc->rx.rxbuf, list) { 356203c4805SLuis R. Rodriguez skb = bf->bf_mpdu; 357203c4805SLuis R. Rodriguez if (skb) { 358203c4805SLuis R. Rodriguez dma_unmap_single(sc->dev, bf->bf_buf_addr, 359b5c80475SFelix Fietkau common->rx_bufsize, 360b5c80475SFelix Fietkau DMA_FROM_DEVICE); 361203c4805SLuis R. Rodriguez dev_kfree_skb(skb); 3626cf9e995SBen Greear bf->bf_buf_addr = 0; 3636cf9e995SBen Greear bf->bf_mpdu = NULL; 364203c4805SLuis R. Rodriguez } 365203c4805SLuis R. Rodriguez } 366203c4805SLuis R. Rodriguez 367203c4805SLuis R. Rodriguez if (sc->rx.rxdma.dd_desc_len != 0) 368203c4805SLuis R. Rodriguez ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); 369203c4805SLuis R. Rodriguez } 370b5c80475SFelix Fietkau } 371203c4805SLuis R. Rodriguez 372203c4805SLuis R. Rodriguez /* 373203c4805SLuis R. Rodriguez * Calculate the receive filter according to the 374203c4805SLuis R. Rodriguez * operating mode and state: 375203c4805SLuis R. Rodriguez * 376203c4805SLuis R. Rodriguez * o always accept unicast, broadcast, and multicast traffic 377203c4805SLuis R. Rodriguez * o maintain current state of phy error reception (the hal 378203c4805SLuis R. Rodriguez * may enable phy error frames for noise immunity work) 379203c4805SLuis R. Rodriguez * o probe request frames are accepted only when operating in 380203c4805SLuis R. Rodriguez * hostap, adhoc, or monitor modes 381203c4805SLuis R. Rodriguez * o enable promiscuous mode according to the interface state 382203c4805SLuis R. Rodriguez * o accept beacons: 383203c4805SLuis R. Rodriguez * - when operating in adhoc mode so the 802.11 layer creates 384203c4805SLuis R. Rodriguez * node table entries for peers, 385203c4805SLuis R. Rodriguez * - when operating in station mode for collecting rssi data when 386203c4805SLuis R. Rodriguez * the station is otherwise quiet, or 387203c4805SLuis R. Rodriguez * - when operating as a repeater so we see repeater-sta beacons 388203c4805SLuis R. Rodriguez * - when scanning 389203c4805SLuis R. Rodriguez */ 390203c4805SLuis R. Rodriguez 391203c4805SLuis R. Rodriguez u32 ath_calcrxfilter(struct ath_softc *sc) 392203c4805SLuis R. Rodriguez { 393203c4805SLuis R. Rodriguez u32 rfilt; 394203c4805SLuis R. Rodriguez 395ac06697cSFelix Fietkau rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 396203c4805SLuis R. Rodriguez | ATH9K_RX_FILTER_MCAST; 397203c4805SLuis R. Rodriguez 3989c1d8e4aSJouni Malinen if (sc->rx.rxfilter & FIF_PROBE_REQ) 399203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_PROBEREQ; 400203c4805SLuis R. Rodriguez 401203c4805SLuis R. Rodriguez /* 402203c4805SLuis R. Rodriguez * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station 403203c4805SLuis R. Rodriguez * mode interface or when in monitor mode. AP mode does not need this 404203c4805SLuis R. Rodriguez * since it receives all in-BSS frames anyway. 405203c4805SLuis R. Rodriguez */ 4062e286947SFelix Fietkau if (sc->sc_ah->is_monitoring) 407203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_PROM; 408203c4805SLuis R. Rodriguez 409203c4805SLuis R. Rodriguez if (sc->rx.rxfilter & FIF_CONTROL) 410203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_CONTROL; 411203c4805SLuis R. Rodriguez 412203c4805SLuis R. Rodriguez if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 413cfda6695SBen Greear (sc->nvifs <= 1) && 414203c4805SLuis R. Rodriguez !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) 415203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_MYBEACON; 416203c4805SLuis R. Rodriguez else 417203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_BEACON; 418203c4805SLuis R. Rodriguez 419264bbec8SFelix Fietkau if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || 42066afad01SSenthil Balasubramanian (sc->rx.rxfilter & FIF_PSPOLL)) 421203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_PSPOLL; 422203c4805SLuis R. Rodriguez 4237ea310beSSujith if (conf_is_ht(&sc->hw->conf)) 4247ea310beSSujith rfilt |= ATH9K_RX_FILTER_COMP_BAR; 4257ea310beSSujith 4267545daf4SFelix Fietkau if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) { 427a549459cSThomas Wagner /* This is needed for older chips */ 428a549459cSThomas Wagner if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160) 4295eb6ba83SJavier Cardona rfilt |= ATH9K_RX_FILTER_PROM; 430203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 431203c4805SLuis R. Rodriguez } 432203c4805SLuis R. Rodriguez 433b3d7aa43SGabor Juhos if (AR_SREV_9550(sc->sc_ah)) 434b3d7aa43SGabor Juhos rfilt |= ATH9K_RX_FILTER_4ADDRESS; 435b3d7aa43SGabor Juhos 436203c4805SLuis R. Rodriguez return rfilt; 437203c4805SLuis R. Rodriguez 438203c4805SLuis R. Rodriguez } 439203c4805SLuis R. Rodriguez 440203c4805SLuis R. Rodriguez int ath_startrecv(struct ath_softc *sc) 441203c4805SLuis R. Rodriguez { 442203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 443203c4805SLuis R. Rodriguez struct ath_buf *bf, *tbf; 444203c4805SLuis R. Rodriguez 445b5c80475SFelix Fietkau if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 446b5c80475SFelix Fietkau ath_edma_start_recv(sc); 447b5c80475SFelix Fietkau return 0; 448b5c80475SFelix Fietkau } 449b5c80475SFelix Fietkau 450203c4805SLuis R. Rodriguez spin_lock_bh(&sc->rx.rxbuflock); 451203c4805SLuis R. Rodriguez if (list_empty(&sc->rx.rxbuf)) 452203c4805SLuis R. Rodriguez goto start_recv; 453203c4805SLuis R. Rodriguez 454203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 455203c4805SLuis R. Rodriguez list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 456203c4805SLuis R. Rodriguez ath_rx_buf_link(sc, bf); 457203c4805SLuis R. Rodriguez } 458203c4805SLuis R. Rodriguez 459203c4805SLuis R. Rodriguez /* We could have deleted elements so the list may be empty now */ 460203c4805SLuis R. Rodriguez if (list_empty(&sc->rx.rxbuf)) 461203c4805SLuis R. Rodriguez goto start_recv; 462203c4805SLuis R. Rodriguez 463203c4805SLuis R. Rodriguez bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 464203c4805SLuis R. Rodriguez ath9k_hw_putrxbuf(ah, bf->bf_daddr); 465203c4805SLuis R. Rodriguez ath9k_hw_rxena(ah); 466203c4805SLuis R. Rodriguez 467203c4805SLuis R. Rodriguez start_recv: 468203c4805SLuis R. Rodriguez ath_opmode_init(sc); 4694cb54fa3SSujith Manoharan ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); 470203c4805SLuis R. Rodriguez 4717583c550SLuis R. Rodriguez spin_unlock_bh(&sc->rx.rxbuflock); 4727583c550SLuis R. Rodriguez 473203c4805SLuis R. Rodriguez return 0; 474203c4805SLuis R. Rodriguez } 475203c4805SLuis R. Rodriguez 476203c4805SLuis R. Rodriguez bool ath_stoprecv(struct ath_softc *sc) 477203c4805SLuis R. Rodriguez { 478203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 4795882da02SFelix Fietkau bool stopped, reset = false; 480203c4805SLuis R. Rodriguez 4811e450285SLuis R. Rodriguez spin_lock_bh(&sc->rx.rxbuflock); 482d47844a0SFelix Fietkau ath9k_hw_abortpcurecv(ah); 483203c4805SLuis R. Rodriguez ath9k_hw_setrxfilter(ah, 0); 4845882da02SFelix Fietkau stopped = ath9k_hw_stopdmarecv(ah, &reset); 485b5c80475SFelix Fietkau 486b5c80475SFelix Fietkau if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 487b5c80475SFelix Fietkau ath_edma_stop_recv(sc); 488b5c80475SFelix Fietkau else 489203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 4901e450285SLuis R. Rodriguez spin_unlock_bh(&sc->rx.rxbuflock); 491203c4805SLuis R. Rodriguez 492d584747bSRajkumar Manoharan if (!(ah->ah_flags & AH_UNPLUGGED) && 493d584747bSRajkumar Manoharan unlikely(!stopped)) { 494d7fd1b50SBen Greear ath_err(ath9k_hw_common(sc->sc_ah), 495d7fd1b50SBen Greear "Could not stop RX, we could be " 49678a7685eSLuis R. Rodriguez "confusing the DMA engine when we start RX up\n"); 497d7fd1b50SBen Greear ATH_DBG_WARN_ON_ONCE(!stopped); 498d7fd1b50SBen Greear } 4992232d31bSFelix Fietkau return stopped && !reset; 500203c4805SLuis R. Rodriguez } 501203c4805SLuis R. Rodriguez 502203c4805SLuis R. Rodriguez void ath_flushrecv(struct ath_softc *sc) 503203c4805SLuis R. Rodriguez { 504781b14a3SSujith Manoharan set_bit(SC_OP_RXFLUSH, &sc->sc_flags); 505b5c80475SFelix Fietkau if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 506b5c80475SFelix Fietkau ath_rx_tasklet(sc, 1, true); 507b5c80475SFelix Fietkau ath_rx_tasklet(sc, 1, false); 508781b14a3SSujith Manoharan clear_bit(SC_OP_RXFLUSH, &sc->sc_flags); 509203c4805SLuis R. Rodriguez } 510203c4805SLuis R. Rodriguez 511cc65965cSJouni Malinen static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 512cc65965cSJouni Malinen { 513cc65965cSJouni Malinen /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ 514cc65965cSJouni Malinen struct ieee80211_mgmt *mgmt; 515cc65965cSJouni Malinen u8 *pos, *end, id, elen; 516cc65965cSJouni Malinen struct ieee80211_tim_ie *tim; 517cc65965cSJouni Malinen 518cc65965cSJouni Malinen mgmt = (struct ieee80211_mgmt *)skb->data; 519cc65965cSJouni Malinen pos = mgmt->u.beacon.variable; 520cc65965cSJouni Malinen end = skb->data + skb->len; 521cc65965cSJouni Malinen 522cc65965cSJouni Malinen while (pos + 2 < end) { 523cc65965cSJouni Malinen id = *pos++; 524cc65965cSJouni Malinen elen = *pos++; 525cc65965cSJouni Malinen if (pos + elen > end) 526cc65965cSJouni Malinen break; 527cc65965cSJouni Malinen 528cc65965cSJouni Malinen if (id == WLAN_EID_TIM) { 529cc65965cSJouni Malinen if (elen < sizeof(*tim)) 530cc65965cSJouni Malinen break; 531cc65965cSJouni Malinen tim = (struct ieee80211_tim_ie *) pos; 532cc65965cSJouni Malinen if (tim->dtim_count != 0) 533cc65965cSJouni Malinen break; 534cc65965cSJouni Malinen return tim->bitmap_ctrl & 0x01; 535cc65965cSJouni Malinen } 536cc65965cSJouni Malinen 537cc65965cSJouni Malinen pos += elen; 538cc65965cSJouni Malinen } 539cc65965cSJouni Malinen 540cc65965cSJouni Malinen return false; 541cc65965cSJouni Malinen } 542cc65965cSJouni Malinen 543cc65965cSJouni Malinen static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 544cc65965cSJouni Malinen { 5451510718dSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(sc->sc_ah); 546cc65965cSJouni Malinen 547cc65965cSJouni Malinen if (skb->len < 24 + 8 + 2 + 2) 548cc65965cSJouni Malinen return; 549cc65965cSJouni Malinen 5501b04b930SSujith sc->ps_flags &= ~PS_WAIT_FOR_BEACON; 551293dc5dfSGabor Juhos 5521b04b930SSujith if (sc->ps_flags & PS_BEACON_SYNC) { 5531b04b930SSujith sc->ps_flags &= ~PS_BEACON_SYNC; 554d2182b69SJoe Perches ath_dbg(common, PS, 555226afe68SJoe Perches "Reconfigure Beacon timers based on timestamp from the AP\n"); 556ef4ad633SSujith Manoharan ath9k_set_beacon(sc); 557ccdfeab6SJouni Malinen } 558ccdfeab6SJouni Malinen 559cc65965cSJouni Malinen if (ath_beacon_dtim_pending_cab(skb)) { 560cc65965cSJouni Malinen /* 561cc65965cSJouni Malinen * Remain awake waiting for buffered broadcast/multicast 56258f5fffdSGabor Juhos * frames. If the last broadcast/multicast frame is not 56358f5fffdSGabor Juhos * received properly, the next beacon frame will work as 56458f5fffdSGabor Juhos * a backup trigger for returning into NETWORK SLEEP state, 56558f5fffdSGabor Juhos * so we are waiting for it as well. 566cc65965cSJouni Malinen */ 567d2182b69SJoe Perches ath_dbg(common, PS, 568226afe68SJoe Perches "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n"); 5691b04b930SSujith sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; 570cc65965cSJouni Malinen return; 571cc65965cSJouni Malinen } 572cc65965cSJouni Malinen 5731b04b930SSujith if (sc->ps_flags & PS_WAIT_FOR_CAB) { 574cc65965cSJouni Malinen /* 575cc65965cSJouni Malinen * This can happen if a broadcast frame is dropped or the AP 576cc65965cSJouni Malinen * fails to send a frame indicating that all CAB frames have 577cc65965cSJouni Malinen * been delivered. 578cc65965cSJouni Malinen */ 5791b04b930SSujith sc->ps_flags &= ~PS_WAIT_FOR_CAB; 580d2182b69SJoe Perches ath_dbg(common, PS, "PS wait for CAB frames timed out\n"); 581cc65965cSJouni Malinen } 582cc65965cSJouni Malinen } 583cc65965cSJouni Malinen 584f73c604cSRajkumar Manoharan static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon) 585cc65965cSJouni Malinen { 586cc65965cSJouni Malinen struct ieee80211_hdr *hdr; 587c46917bbSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(sc->sc_ah); 588cc65965cSJouni Malinen 589cc65965cSJouni Malinen hdr = (struct ieee80211_hdr *)skb->data; 590cc65965cSJouni Malinen 591cc65965cSJouni Malinen /* Process Beacon and CAB receive in PS state */ 592ededf1f8SVasanthakumar Thiagarajan if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) 59307c15a3fSSujith Manoharan && mybeacon) { 594cc65965cSJouni Malinen ath_rx_ps_beacon(sc, skb); 59507c15a3fSSujith Manoharan } else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 596cc65965cSJouni Malinen (ieee80211_is_data(hdr->frame_control) || 597cc65965cSJouni Malinen ieee80211_is_action(hdr->frame_control)) && 598cc65965cSJouni Malinen is_multicast_ether_addr(hdr->addr1) && 599cc65965cSJouni Malinen !ieee80211_has_moredata(hdr->frame_control)) { 600cc65965cSJouni Malinen /* 601cc65965cSJouni Malinen * No more broadcast/multicast frames to be received at this 602cc65965cSJouni Malinen * point. 603cc65965cSJouni Malinen */ 6043fac6dfdSSenthil Balasubramanian sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); 605d2182b69SJoe Perches ath_dbg(common, PS, 606c46917bbSLuis R. Rodriguez "All PS CAB frames received, back to sleep\n"); 6071b04b930SSujith } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && 6089a23f9caSJouni Malinen !is_multicast_ether_addr(hdr->addr1) && 6099a23f9caSJouni Malinen !ieee80211_has_morefrags(hdr->frame_control)) { 6101b04b930SSujith sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; 611d2182b69SJoe Perches ath_dbg(common, PS, 612226afe68SJoe Perches "Going back to sleep after having received PS-Poll data (0x%lx)\n", 6131b04b930SSujith sc->ps_flags & (PS_WAIT_FOR_BEACON | 6141b04b930SSujith PS_WAIT_FOR_CAB | 6151b04b930SSujith PS_WAIT_FOR_PSPOLL_DATA | 6161b04b930SSujith PS_WAIT_FOR_TX_ACK)); 617cc65965cSJouni Malinen } 618cc65965cSJouni Malinen } 619cc65965cSJouni Malinen 620b5c80475SFelix Fietkau static bool ath_edma_get_buffers(struct ath_softc *sc, 6213a2923e8SFelix Fietkau enum ath9k_rx_qtype qtype, 6223a2923e8SFelix Fietkau struct ath_rx_status *rs, 6233a2923e8SFelix Fietkau struct ath_buf **dest) 624203c4805SLuis R. Rodriguez { 625b5c80475SFelix Fietkau struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 626203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 62727c51f1aSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(ah); 628b5c80475SFelix Fietkau struct sk_buff *skb; 629b5c80475SFelix Fietkau struct ath_buf *bf; 630b5c80475SFelix Fietkau int ret; 631203c4805SLuis R. Rodriguez 632b5c80475SFelix Fietkau skb = skb_peek(&rx_edma->rx_fifo); 633b5c80475SFelix Fietkau if (!skb) 634b5c80475SFelix Fietkau return false; 635203c4805SLuis R. Rodriguez 636b5c80475SFelix Fietkau bf = SKB_CB_ATHBUF(skb); 637b5c80475SFelix Fietkau BUG_ON(!bf); 638b5c80475SFelix Fietkau 639ce9426d1SMing Lei dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 640b5c80475SFelix Fietkau common->rx_bufsize, DMA_FROM_DEVICE); 641b5c80475SFelix Fietkau 6423a2923e8SFelix Fietkau ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data); 643ce9426d1SMing Lei if (ret == -EINPROGRESS) { 644ce9426d1SMing Lei /*let device gain the buffer again*/ 645ce9426d1SMing Lei dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 646ce9426d1SMing Lei common->rx_bufsize, DMA_FROM_DEVICE); 647b5c80475SFelix Fietkau return false; 648ce9426d1SMing Lei } 649b5c80475SFelix Fietkau 650b5c80475SFelix Fietkau __skb_unlink(skb, &rx_edma->rx_fifo); 651b5c80475SFelix Fietkau if (ret == -EINVAL) { 652b5c80475SFelix Fietkau /* corrupt descriptor, skip this one and the following one */ 653b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 654b5c80475SFelix Fietkau ath_rx_edma_buf_link(sc, qtype); 655b5c80475SFelix Fietkau 6563a2923e8SFelix Fietkau skb = skb_peek(&rx_edma->rx_fifo); 6573a2923e8SFelix Fietkau if (skb) { 658b5c80475SFelix Fietkau bf = SKB_CB_ATHBUF(skb); 659b5c80475SFelix Fietkau BUG_ON(!bf); 660b5c80475SFelix Fietkau 661b5c80475SFelix Fietkau __skb_unlink(skb, &rx_edma->rx_fifo); 662b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 663b5c80475SFelix Fietkau ath_rx_edma_buf_link(sc, qtype); 664b5c80475SFelix Fietkau } 6656bb51c70STom Hughes 6666bb51c70STom Hughes bf = NULL; 6673a2923e8SFelix Fietkau } 668b5c80475SFelix Fietkau 6693a2923e8SFelix Fietkau *dest = bf; 670b5c80475SFelix Fietkau return true; 671b5c80475SFelix Fietkau } 672b5c80475SFelix Fietkau 673b5c80475SFelix Fietkau static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, 674b5c80475SFelix Fietkau struct ath_rx_status *rs, 675b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype) 676b5c80475SFelix Fietkau { 6773a2923e8SFelix Fietkau struct ath_buf *bf = NULL; 678b5c80475SFelix Fietkau 6793a2923e8SFelix Fietkau while (ath_edma_get_buffers(sc, qtype, rs, &bf)) { 6803a2923e8SFelix Fietkau if (!bf) 6813a2923e8SFelix Fietkau continue; 682b5c80475SFelix Fietkau 683b5c80475SFelix Fietkau return bf; 684b5c80475SFelix Fietkau } 6853a2923e8SFelix Fietkau return NULL; 6863a2923e8SFelix Fietkau } 687b5c80475SFelix Fietkau 688b5c80475SFelix Fietkau static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, 689b5c80475SFelix Fietkau struct ath_rx_status *rs) 690b5c80475SFelix Fietkau { 691b5c80475SFelix Fietkau struct ath_hw *ah = sc->sc_ah; 692b5c80475SFelix Fietkau struct ath_common *common = ath9k_hw_common(ah); 693b5c80475SFelix Fietkau struct ath_desc *ds; 694b5c80475SFelix Fietkau struct ath_buf *bf; 695b5c80475SFelix Fietkau int ret; 696203c4805SLuis R. Rodriguez 697203c4805SLuis R. Rodriguez if (list_empty(&sc->rx.rxbuf)) { 698203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 699b5c80475SFelix Fietkau return NULL; 700203c4805SLuis R. Rodriguez } 701203c4805SLuis R. Rodriguez 702203c4805SLuis R. Rodriguez bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 703203c4805SLuis R. Rodriguez ds = bf->bf_desc; 704203c4805SLuis R. Rodriguez 705203c4805SLuis R. Rodriguez /* 706203c4805SLuis R. Rodriguez * Must provide the virtual address of the current 707203c4805SLuis R. Rodriguez * descriptor, the physical address, and the virtual 708203c4805SLuis R. Rodriguez * address of the next descriptor in the h/w chain. 709203c4805SLuis R. Rodriguez * This allows the HAL to look ahead to see if the 710203c4805SLuis R. Rodriguez * hardware is done with a descriptor by checking the 711203c4805SLuis R. Rodriguez * done bit in the following descriptor and the address 712203c4805SLuis R. Rodriguez * of the current descriptor the DMA engine is working 713203c4805SLuis R. Rodriguez * on. All this is necessary because of our use of 714203c4805SLuis R. Rodriguez * a self-linked list to avoid rx overruns. 715203c4805SLuis R. Rodriguez */ 7163de21116SRajkumar Manoharan ret = ath9k_hw_rxprocdesc(ah, ds, rs); 717b5c80475SFelix Fietkau if (ret == -EINPROGRESS) { 71829bffa96SFelix Fietkau struct ath_rx_status trs; 719203c4805SLuis R. Rodriguez struct ath_buf *tbf; 720203c4805SLuis R. Rodriguez struct ath_desc *tds; 721203c4805SLuis R. Rodriguez 72229bffa96SFelix Fietkau memset(&trs, 0, sizeof(trs)); 723203c4805SLuis R. Rodriguez if (list_is_last(&bf->list, &sc->rx.rxbuf)) { 724203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 725b5c80475SFelix Fietkau return NULL; 726203c4805SLuis R. Rodriguez } 727203c4805SLuis R. Rodriguez 728203c4805SLuis R. Rodriguez tbf = list_entry(bf->list.next, struct ath_buf, list); 729203c4805SLuis R. Rodriguez 730203c4805SLuis R. Rodriguez /* 731203c4805SLuis R. Rodriguez * On some hardware the descriptor status words could 732203c4805SLuis R. Rodriguez * get corrupted, including the done bit. Because of 733203c4805SLuis R. Rodriguez * this, check if the next descriptor's done bit is 734203c4805SLuis R. Rodriguez * set or not. 735203c4805SLuis R. Rodriguez * 736203c4805SLuis R. Rodriguez * If the next descriptor's done bit is set, the current 737203c4805SLuis R. Rodriguez * descriptor has been corrupted. Force s/w to discard 738203c4805SLuis R. Rodriguez * this descriptor and continue... 739203c4805SLuis R. Rodriguez */ 740203c4805SLuis R. Rodriguez 741203c4805SLuis R. Rodriguez tds = tbf->bf_desc; 7423de21116SRajkumar Manoharan ret = ath9k_hw_rxprocdesc(ah, tds, &trs); 743b5c80475SFelix Fietkau if (ret == -EINPROGRESS) 744b5c80475SFelix Fietkau return NULL; 745203c4805SLuis R. Rodriguez } 746203c4805SLuis R. Rodriguez 747b5c80475SFelix Fietkau if (!bf->bf_mpdu) 748b5c80475SFelix Fietkau return bf; 749203c4805SLuis R. Rodriguez 750203c4805SLuis R. Rodriguez /* 751203c4805SLuis R. Rodriguez * Synchronize the DMA transfer with CPU before 752203c4805SLuis R. Rodriguez * 1. accessing the frame 753203c4805SLuis R. Rodriguez * 2. requeueing the same buffer to h/w 754203c4805SLuis R. Rodriguez */ 755ce9426d1SMing Lei dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 756cc861f74SLuis R. Rodriguez common->rx_bufsize, 757203c4805SLuis R. Rodriguez DMA_FROM_DEVICE); 758203c4805SLuis R. Rodriguez 759b5c80475SFelix Fietkau return bf; 760b5c80475SFelix Fietkau } 761b5c80475SFelix Fietkau 762d435700fSSujith /* Assumes you've already done the endian to CPU conversion */ 763d435700fSSujith static bool ath9k_rx_accept(struct ath_common *common, 7649f167f64SVasanthakumar Thiagarajan struct ieee80211_hdr *hdr, 765d435700fSSujith struct ieee80211_rx_status *rxs, 766d435700fSSujith struct ath_rx_status *rx_stats, 767d435700fSSujith bool *decrypt_error) 768d435700fSSujith { 769ec205999SFelix Fietkau struct ath_softc *sc = (struct ath_softc *) common->priv; 77066760eacSFelix Fietkau bool is_mc, is_valid_tkip, strip_mic, mic_error; 771d435700fSSujith struct ath_hw *ah = common->ah; 772d435700fSSujith __le16 fc; 773b7b1b512SVasanthakumar Thiagarajan u8 rx_status_len = ah->caps.rx_status_len; 774d435700fSSujith 775d435700fSSujith fc = hdr->frame_control; 776d435700fSSujith 77766760eacSFelix Fietkau is_mc = !!is_multicast_ether_addr(hdr->addr1); 77866760eacSFelix Fietkau is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && 77966760eacSFelix Fietkau test_bit(rx_stats->rs_keyix, common->tkip_keymap); 780152e585dSBill Jordan strip_mic = is_valid_tkip && ieee80211_is_data(fc) && 7812a5783b8SMichael Liang ieee80211_has_protected(fc) && 782152e585dSBill Jordan !(rx_stats->rs_status & 783846d9363SFelix Fietkau (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC | 784846d9363SFelix Fietkau ATH9K_RXERR_KEYMISS)); 78566760eacSFelix Fietkau 786f88373faSFelix Fietkau /* 787f88373faSFelix Fietkau * Key miss events are only relevant for pairwise keys where the 788f88373faSFelix Fietkau * descriptor does contain a valid key index. This has been observed 789f88373faSFelix Fietkau * mostly with CCMP encryption. 790f88373faSFelix Fietkau */ 791bed3d9c0SFelix Fietkau if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID || 792bed3d9c0SFelix Fietkau !test_bit(rx_stats->rs_keyix, common->ccmp_keymap)) 793f88373faSFelix Fietkau rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS; 794f88373faSFelix Fietkau 79515072189SBen Greear if (!rx_stats->rs_datalen) { 79615072189SBen Greear RX_STAT_INC(rx_len_err); 797d435700fSSujith return false; 79815072189SBen Greear } 79915072189SBen Greear 800d435700fSSujith /* 801d435700fSSujith * rs_status follows rs_datalen so if rs_datalen is too large 802d435700fSSujith * we can take a hint that hardware corrupted it, so ignore 803d435700fSSujith * those frames. 804d435700fSSujith */ 80515072189SBen Greear if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) { 80615072189SBen Greear RX_STAT_INC(rx_len_err); 807d435700fSSujith return false; 80815072189SBen Greear } 809d435700fSSujith 8100d95521eSFelix Fietkau /* Only use error bits from the last fragment */ 811d435700fSSujith if (rx_stats->rs_more) 8120d95521eSFelix Fietkau return true; 813d435700fSSujith 81466760eacSFelix Fietkau mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) && 81566760eacSFelix Fietkau !ieee80211_has_morefrags(fc) && 81666760eacSFelix Fietkau !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && 81766760eacSFelix Fietkau (rx_stats->rs_status & ATH9K_RXERR_MIC); 81866760eacSFelix Fietkau 819d435700fSSujith /* 820d435700fSSujith * The rx_stats->rs_status will not be set until the end of the 821d435700fSSujith * chained descriptors so it can be ignored if rs_more is set. The 822d435700fSSujith * rs_more will be false at the last element of the chained 823d435700fSSujith * descriptors. 824d435700fSSujith */ 825d435700fSSujith if (rx_stats->rs_status != 0) { 826846d9363SFelix Fietkau u8 status_mask; 827846d9363SFelix Fietkau 82866760eacSFelix Fietkau if (rx_stats->rs_status & ATH9K_RXERR_CRC) { 829d435700fSSujith rxs->flag |= RX_FLAG_FAILED_FCS_CRC; 83066760eacSFelix Fietkau mic_error = false; 83166760eacSFelix Fietkau } 832d435700fSSujith if (rx_stats->rs_status & ATH9K_RXERR_PHY) 833d435700fSSujith return false; 834d435700fSSujith 835846d9363SFelix Fietkau if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) || 836846d9363SFelix Fietkau (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) { 837d435700fSSujith *decrypt_error = true; 83866760eacSFelix Fietkau mic_error = false; 839d435700fSSujith } 84066760eacSFelix Fietkau 841d435700fSSujith /* 842d435700fSSujith * Reject error frames with the exception of 843d435700fSSujith * decryption and MIC failures. For monitor mode, 844d435700fSSujith * we also ignore the CRC error. 845d435700fSSujith */ 846846d9363SFelix Fietkau status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 847846d9363SFelix Fietkau ATH9K_RXERR_KEYMISS; 848846d9363SFelix Fietkau 849ec205999SFelix Fietkau if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL)) 850846d9363SFelix Fietkau status_mask |= ATH9K_RXERR_CRC; 851846d9363SFelix Fietkau 852846d9363SFelix Fietkau if (rx_stats->rs_status & ~status_mask) 853d435700fSSujith return false; 854d435700fSSujith } 85566760eacSFelix Fietkau 85666760eacSFelix Fietkau /* 85766760eacSFelix Fietkau * For unicast frames the MIC error bit can have false positives, 85866760eacSFelix Fietkau * so all MIC error reports need to be validated in software. 85966760eacSFelix Fietkau * False negatives are not common, so skip software verification 86066760eacSFelix Fietkau * if the hardware considers the MIC valid. 86166760eacSFelix Fietkau */ 86266760eacSFelix Fietkau if (strip_mic) 86366760eacSFelix Fietkau rxs->flag |= RX_FLAG_MMIC_STRIPPED; 86466760eacSFelix Fietkau else if (is_mc && mic_error) 86566760eacSFelix Fietkau rxs->flag |= RX_FLAG_MMIC_ERROR; 86666760eacSFelix Fietkau 867d435700fSSujith return true; 868d435700fSSujith } 869d435700fSSujith 870d435700fSSujith static int ath9k_process_rate(struct ath_common *common, 871d435700fSSujith struct ieee80211_hw *hw, 872d435700fSSujith struct ath_rx_status *rx_stats, 8739f167f64SVasanthakumar Thiagarajan struct ieee80211_rx_status *rxs) 874d435700fSSujith { 875d435700fSSujith struct ieee80211_supported_band *sband; 876d435700fSSujith enum ieee80211_band band; 877d435700fSSujith unsigned int i = 0; 878990e08a0SBen Greear struct ath_softc __maybe_unused *sc = common->priv; 879d435700fSSujith 880d435700fSSujith band = hw->conf.channel->band; 881d435700fSSujith sband = hw->wiphy->bands[band]; 882d435700fSSujith 883d435700fSSujith if (rx_stats->rs_rate & 0x80) { 884d435700fSSujith /* HT rate */ 885d435700fSSujith rxs->flag |= RX_FLAG_HT; 886d435700fSSujith if (rx_stats->rs_flags & ATH9K_RX_2040) 887d435700fSSujith rxs->flag |= RX_FLAG_40MHZ; 888d435700fSSujith if (rx_stats->rs_flags & ATH9K_RX_GI) 889d435700fSSujith rxs->flag |= RX_FLAG_SHORT_GI; 890d435700fSSujith rxs->rate_idx = rx_stats->rs_rate & 0x7f; 891d435700fSSujith return 0; 892d435700fSSujith } 893d435700fSSujith 894d435700fSSujith for (i = 0; i < sband->n_bitrates; i++) { 895d435700fSSujith if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { 896d435700fSSujith rxs->rate_idx = i; 897d435700fSSujith return 0; 898d435700fSSujith } 899d435700fSSujith if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { 900d435700fSSujith rxs->flag |= RX_FLAG_SHORTPRE; 901d435700fSSujith rxs->rate_idx = i; 902d435700fSSujith return 0; 903d435700fSSujith } 904d435700fSSujith } 905d435700fSSujith 906d435700fSSujith /* 907d435700fSSujith * No valid hardware bitrate found -- we should not get here 908d435700fSSujith * because hardware has already validated this frame as OK. 909d435700fSSujith */ 910d2182b69SJoe Perches ath_dbg(common, ANY, 911226afe68SJoe Perches "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", 912226afe68SJoe Perches rx_stats->rs_rate); 91315072189SBen Greear RX_STAT_INC(rx_rate_err); 914d435700fSSujith return -EINVAL; 915d435700fSSujith } 916d435700fSSujith 917d435700fSSujith static void ath9k_process_rssi(struct ath_common *common, 918d435700fSSujith struct ieee80211_hw *hw, 9199f167f64SVasanthakumar Thiagarajan struct ieee80211_hdr *hdr, 920d435700fSSujith struct ath_rx_status *rx_stats) 921d435700fSSujith { 9229ac58615SFelix Fietkau struct ath_softc *sc = hw->priv; 923d435700fSSujith struct ath_hw *ah = common->ah; 9249fa23e17SFelix Fietkau int last_rssi; 9252ef16755SFelix Fietkau int rssi = rx_stats->rs_rssi; 926d435700fSSujith 927cf3af748SRajkumar Manoharan if (!rx_stats->is_mybeacon || 928cf3af748SRajkumar Manoharan ((ah->opmode != NL80211_IFTYPE_STATION) && 929cf3af748SRajkumar Manoharan (ah->opmode != NL80211_IFTYPE_ADHOC))) 9309fa23e17SFelix Fietkau return; 9319fa23e17SFelix Fietkau 9329fa23e17SFelix Fietkau if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr) 9339ac58615SFelix Fietkau ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi); 934686b9cb9SBen Greear 9359ac58615SFelix Fietkau last_rssi = sc->last_rssi; 936d435700fSSujith if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 9372ef16755SFelix Fietkau rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER); 9382ef16755SFelix Fietkau if (rssi < 0) 9392ef16755SFelix Fietkau rssi = 0; 940d435700fSSujith 941d435700fSSujith /* Update Beacon RSSI, this is used by ANI. */ 9422ef16755SFelix Fietkau ah->stats.avgbrssi = rssi; 943d435700fSSujith } 944d435700fSSujith 945d435700fSSujith /* 946d435700fSSujith * For Decrypt or Demic errors, we only mark packet status here and always push 947d435700fSSujith * up the frame up to let mac80211 handle the actual error case, be it no 948d435700fSSujith * decryption key or real decryption error. This let us keep statistics there. 949d435700fSSujith */ 950d435700fSSujith static int ath9k_rx_skb_preprocess(struct ath_common *common, 951d435700fSSujith struct ieee80211_hw *hw, 9529f167f64SVasanthakumar Thiagarajan struct ieee80211_hdr *hdr, 953d435700fSSujith struct ath_rx_status *rx_stats, 954d435700fSSujith struct ieee80211_rx_status *rx_status, 955d435700fSSujith bool *decrypt_error) 956d435700fSSujith { 957f749b946SFelix Fietkau struct ath_hw *ah = common->ah; 958f749b946SFelix Fietkau 959d435700fSSujith /* 960d435700fSSujith * everything but the rate is checked here, the rate check is done 961d435700fSSujith * separately to avoid doing two lookups for a rate for each frame. 962d435700fSSujith */ 9639f167f64SVasanthakumar Thiagarajan if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) 964d435700fSSujith return -EINVAL; 965d435700fSSujith 9660d95521eSFelix Fietkau /* Only use status info from the last fragment */ 9670d95521eSFelix Fietkau if (rx_stats->rs_more) 9680d95521eSFelix Fietkau return 0; 9690d95521eSFelix Fietkau 9709f167f64SVasanthakumar Thiagarajan ath9k_process_rssi(common, hw, hdr, rx_stats); 971d435700fSSujith 9729f167f64SVasanthakumar Thiagarajan if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 973d435700fSSujith return -EINVAL; 974d435700fSSujith 975d435700fSSujith rx_status->band = hw->conf.channel->band; 976d435700fSSujith rx_status->freq = hw->conf.channel->center_freq; 977f749b946SFelix Fietkau rx_status->signal = ah->noise + rx_stats->rs_rssi; 978d435700fSSujith rx_status->antenna = rx_stats->rs_antenna; 979*96d21371SThomas Pedersen rx_status->flag |= RX_FLAG_MACTIME_END; 9802ef16755SFelix Fietkau if (rx_stats->rs_moreaggr) 9812ef16755SFelix Fietkau rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 982d435700fSSujith 983d435700fSSujith return 0; 984d435700fSSujith } 985d435700fSSujith 986d435700fSSujith static void ath9k_rx_skb_postprocess(struct ath_common *common, 987d435700fSSujith struct sk_buff *skb, 988d435700fSSujith struct ath_rx_status *rx_stats, 989d435700fSSujith struct ieee80211_rx_status *rxs, 990d435700fSSujith bool decrypt_error) 991d435700fSSujith { 992d435700fSSujith struct ath_hw *ah = common->ah; 993d435700fSSujith struct ieee80211_hdr *hdr; 994d435700fSSujith int hdrlen, padpos, padsize; 995d435700fSSujith u8 keyix; 996d435700fSSujith __le16 fc; 997d435700fSSujith 998d435700fSSujith /* see if any padding is done by the hw and remove it */ 999d435700fSSujith hdr = (struct ieee80211_hdr *) skb->data; 1000d435700fSSujith hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1001d435700fSSujith fc = hdr->frame_control; 1002d435700fSSujith padpos = ath9k_cmn_padpos(hdr->frame_control); 1003d435700fSSujith 1004d435700fSSujith /* The MAC header is padded to have 32-bit boundary if the 1005d435700fSSujith * packet payload is non-zero. The general calculation for 1006d435700fSSujith * padsize would take into account odd header lengths: 1007d435700fSSujith * padsize = (4 - padpos % 4) % 4; However, since only 1008d435700fSSujith * even-length headers are used, padding can only be 0 or 2 1009d435700fSSujith * bytes and we can optimize this a bit. In addition, we must 1010d435700fSSujith * not try to remove padding from short control frames that do 1011d435700fSSujith * not have payload. */ 1012d435700fSSujith padsize = padpos & 3; 1013d435700fSSujith if (padsize && skb->len>=padpos+padsize+FCS_LEN) { 1014d435700fSSujith memmove(skb->data + padsize, skb->data, padpos); 1015d435700fSSujith skb_pull(skb, padsize); 1016d435700fSSujith } 1017d435700fSSujith 1018d435700fSSujith keyix = rx_stats->rs_keyix; 1019d435700fSSujith 1020d435700fSSujith if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && 1021d435700fSSujith ieee80211_has_protected(fc)) { 1022d435700fSSujith rxs->flag |= RX_FLAG_DECRYPTED; 1023d435700fSSujith } else if (ieee80211_has_protected(fc) 1024d435700fSSujith && !decrypt_error && skb->len >= hdrlen + 4) { 1025d435700fSSujith keyix = skb->data[hdrlen + 3] >> 6; 1026d435700fSSujith 1027d435700fSSujith if (test_bit(keyix, common->keymap)) 1028d435700fSSujith rxs->flag |= RX_FLAG_DECRYPTED; 1029d435700fSSujith } 1030d435700fSSujith if (ah->sw_mgmt_crypto && 1031d435700fSSujith (rxs->flag & RX_FLAG_DECRYPTED) && 1032d435700fSSujith ieee80211_is_mgmt(fc)) 1033d435700fSSujith /* Use software decrypt for management frames. */ 1034d435700fSSujith rxs->flag &= ~RX_FLAG_DECRYPTED; 1035d435700fSSujith } 1036b5c80475SFelix Fietkau 1037b5c80475SFelix Fietkau int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1038b5c80475SFelix Fietkau { 1039b5c80475SFelix Fietkau struct ath_buf *bf; 10400d95521eSFelix Fietkau struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; 1041b5c80475SFelix Fietkau struct ieee80211_rx_status *rxs; 1042b5c80475SFelix Fietkau struct ath_hw *ah = sc->sc_ah; 1043b5c80475SFelix Fietkau struct ath_common *common = ath9k_hw_common(ah); 10447545daf4SFelix Fietkau struct ieee80211_hw *hw = sc->hw; 1045b5c80475SFelix Fietkau struct ieee80211_hdr *hdr; 1046b5c80475SFelix Fietkau int retval; 1047b5c80475SFelix Fietkau struct ath_rx_status rs; 1048b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype; 1049b5c80475SFelix Fietkau bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1050b5c80475SFelix Fietkau int dma_type; 10515c6dd921SVasanthakumar Thiagarajan u8 rx_status_len = ah->caps.rx_status_len; 1052a6d2055bSFelix Fietkau u64 tsf = 0; 1053a6d2055bSFelix Fietkau u32 tsf_lower = 0; 10548ab2cd09SLuis R. Rodriguez unsigned long flags; 1055b5c80475SFelix Fietkau 1056b5c80475SFelix Fietkau if (edma) 1057b5c80475SFelix Fietkau dma_type = DMA_BIDIRECTIONAL; 105856824223SMing Lei else 105956824223SMing Lei dma_type = DMA_FROM_DEVICE; 1060b5c80475SFelix Fietkau 1061b5c80475SFelix Fietkau qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 1062b5c80475SFelix Fietkau spin_lock_bh(&sc->rx.rxbuflock); 1063b5c80475SFelix Fietkau 1064a6d2055bSFelix Fietkau tsf = ath9k_hw_gettsf64(ah); 1065a6d2055bSFelix Fietkau tsf_lower = tsf & 0xffffffff; 1066a6d2055bSFelix Fietkau 1067b5c80475SFelix Fietkau do { 1068e1352fdeSLorenzo Bianconi bool decrypt_error = false; 1069b5c80475SFelix Fietkau /* If handling rx interrupt and flush is in progress => exit */ 1070781b14a3SSujith Manoharan if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0)) 1071b5c80475SFelix Fietkau break; 1072b5c80475SFelix Fietkau 1073b5c80475SFelix Fietkau memset(&rs, 0, sizeof(rs)); 1074b5c80475SFelix Fietkau if (edma) 1075b5c80475SFelix Fietkau bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); 1076b5c80475SFelix Fietkau else 1077b5c80475SFelix Fietkau bf = ath_get_next_rx_buf(sc, &rs); 1078b5c80475SFelix Fietkau 1079b5c80475SFelix Fietkau if (!bf) 1080b5c80475SFelix Fietkau break; 1081b5c80475SFelix Fietkau 1082b5c80475SFelix Fietkau skb = bf->bf_mpdu; 1083b5c80475SFelix Fietkau if (!skb) 1084b5c80475SFelix Fietkau continue; 1085b5c80475SFelix Fietkau 10860d95521eSFelix Fietkau /* 10870d95521eSFelix Fietkau * Take frame header from the first fragment and RX status from 10880d95521eSFelix Fietkau * the last one. 10890d95521eSFelix Fietkau */ 10900d95521eSFelix Fietkau if (sc->rx.frag) 10910d95521eSFelix Fietkau hdr_skb = sc->rx.frag; 10920d95521eSFelix Fietkau else 10930d95521eSFelix Fietkau hdr_skb = skb; 10940d95521eSFelix Fietkau 10950d95521eSFelix Fietkau hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); 10960d95521eSFelix Fietkau rxs = IEEE80211_SKB_RXCB(hdr_skb); 109715072189SBen Greear if (ieee80211_is_beacon(hdr->frame_control)) { 109815072189SBen Greear RX_STAT_INC(rx_beacons); 109915072189SBen Greear if (!is_zero_ether_addr(common->curbssid) && 11002e42e474SJoe Perches ether_addr_equal(hdr->addr3, common->curbssid)) 1101cf3af748SRajkumar Manoharan rs.is_mybeacon = true; 1102cf3af748SRajkumar Manoharan else 1103cf3af748SRajkumar Manoharan rs.is_mybeacon = false; 110415072189SBen Greear } 110515072189SBen Greear else 110615072189SBen Greear rs.is_mybeacon = false; 11075ca42627SLuis R. Rodriguez 1108be41b052SMohammed Shafi Shajakhan if (ieee80211_is_data_present(hdr->frame_control) && 1109be41b052SMohammed Shafi Shajakhan !ieee80211_is_qos_nullfunc(hdr->frame_control)) 11106995fb80SRajkumar Manoharan sc->rx.num_pkts++; 1111be41b052SMohammed Shafi Shajakhan 111229bffa96SFelix Fietkau ath_debug_stat_rx(sc, &rs); 11131395d3f0SSujith 1114203c4805SLuis R. Rodriguez /* 1115203c4805SLuis R. Rodriguez * If we're asked to flush receive queue, directly 1116203c4805SLuis R. Rodriguez * chain it back at the queue without processing it. 1117203c4805SLuis R. Rodriguez */ 1118781b14a3SSujith Manoharan if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags)) { 111915072189SBen Greear RX_STAT_INC(rx_drop_rxflush); 11200d95521eSFelix Fietkau goto requeue_drop_frag; 112115072189SBen Greear } 1122203c4805SLuis R. Rodriguez 1123ffb1c56aSAshok Nagarajan memset(rxs, 0, sizeof(struct ieee80211_rx_status)); 1124ffb1c56aSAshok Nagarajan 1125a6d2055bSFelix Fietkau rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1126a6d2055bSFelix Fietkau if (rs.rs_tstamp > tsf_lower && 1127a6d2055bSFelix Fietkau unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) 1128a6d2055bSFelix Fietkau rxs->mactime -= 0x100000000ULL; 1129a6d2055bSFelix Fietkau 1130a6d2055bSFelix Fietkau if (rs.rs_tstamp < tsf_lower && 1131a6d2055bSFelix Fietkau unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) 1132a6d2055bSFelix Fietkau rxs->mactime += 0x100000000ULL; 1133a6d2055bSFelix Fietkau 113483c76570SZefir Kurtisi retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, 113583c76570SZefir Kurtisi rxs, &decrypt_error); 113683c76570SZefir Kurtisi if (retval) 113783c76570SZefir Kurtisi goto requeue_drop_frag; 113883c76570SZefir Kurtisi 113901e18918SRajkumar Manoharan if (rs.is_mybeacon) { 114001e18918SRajkumar Manoharan sc->hw_busy_count = 0; 114101e18918SRajkumar Manoharan ath_start_rx_poll(sc, 3); 114201e18918SRajkumar Manoharan } 1143203c4805SLuis R. Rodriguez /* Ensure we always have an skb to requeue once we are done 1144203c4805SLuis R. Rodriguez * processing the current buffer's skb */ 1145cc861f74SLuis R. Rodriguez requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1146203c4805SLuis R. Rodriguez 1147203c4805SLuis R. Rodriguez /* If there is no memory we ignore the current RX'd frame, 1148203c4805SLuis R. Rodriguez * tell hardware it can give us a new frame using the old 1149203c4805SLuis R. Rodriguez * skb and put it at the tail of the sc->rx.rxbuf list for 1150203c4805SLuis R. Rodriguez * processing. */ 115115072189SBen Greear if (!requeue_skb) { 115215072189SBen Greear RX_STAT_INC(rx_oom_err); 11530d95521eSFelix Fietkau goto requeue_drop_frag; 115415072189SBen Greear } 1155203c4805SLuis R. Rodriguez 1156203c4805SLuis R. Rodriguez /* Unmap the frame */ 1157203c4805SLuis R. Rodriguez dma_unmap_single(sc->dev, bf->bf_buf_addr, 1158cc861f74SLuis R. Rodriguez common->rx_bufsize, 1159b5c80475SFelix Fietkau dma_type); 1160203c4805SLuis R. Rodriguez 1161b5c80475SFelix Fietkau skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); 1162b5c80475SFelix Fietkau if (ah->caps.rx_status_len) 1163b5c80475SFelix Fietkau skb_pull(skb, ah->caps.rx_status_len); 1164203c4805SLuis R. Rodriguez 11650d95521eSFelix Fietkau if (!rs.rs_more) 11660d95521eSFelix Fietkau ath9k_rx_skb_postprocess(common, hdr_skb, &rs, 1167c9b14170SLuis R. Rodriguez rxs, decrypt_error); 1168203c4805SLuis R. Rodriguez 1169203c4805SLuis R. Rodriguez /* We will now give hardware our shiny new allocated skb */ 1170203c4805SLuis R. Rodriguez bf->bf_mpdu = requeue_skb; 1171203c4805SLuis R. Rodriguez bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 1172cc861f74SLuis R. Rodriguez common->rx_bufsize, 1173b5c80475SFelix Fietkau dma_type); 1174203c4805SLuis R. Rodriguez if (unlikely(dma_mapping_error(sc->dev, 1175203c4805SLuis R. Rodriguez bf->bf_buf_addr))) { 1176203c4805SLuis R. Rodriguez dev_kfree_skb_any(requeue_skb); 1177203c4805SLuis R. Rodriguez bf->bf_mpdu = NULL; 11786cf9e995SBen Greear bf->bf_buf_addr = 0; 11793800276aSJoe Perches ath_err(common, "dma_mapping_error() on RX\n"); 11807545daf4SFelix Fietkau ieee80211_rx(hw, skb); 1181203c4805SLuis R. Rodriguez break; 1182203c4805SLuis R. Rodriguez } 1183203c4805SLuis R. Rodriguez 11840d95521eSFelix Fietkau if (rs.rs_more) { 118515072189SBen Greear RX_STAT_INC(rx_frags); 11860d95521eSFelix Fietkau /* 11870d95521eSFelix Fietkau * rs_more indicates chained descriptors which can be 11880d95521eSFelix Fietkau * used to link buffers together for a sort of 11890d95521eSFelix Fietkau * scatter-gather operation. 11900d95521eSFelix Fietkau */ 11910d95521eSFelix Fietkau if (sc->rx.frag) { 11920d95521eSFelix Fietkau /* too many fragments - cannot handle frame */ 11930d95521eSFelix Fietkau dev_kfree_skb_any(sc->rx.frag); 11940d95521eSFelix Fietkau dev_kfree_skb_any(skb); 119515072189SBen Greear RX_STAT_INC(rx_too_many_frags_err); 11960d95521eSFelix Fietkau skb = NULL; 11970d95521eSFelix Fietkau } 11980d95521eSFelix Fietkau sc->rx.frag = skb; 11990d95521eSFelix Fietkau goto requeue; 12000d95521eSFelix Fietkau } 12010d95521eSFelix Fietkau 12020d95521eSFelix Fietkau if (sc->rx.frag) { 12030d95521eSFelix Fietkau int space = skb->len - skb_tailroom(hdr_skb); 12040d95521eSFelix Fietkau 12050d95521eSFelix Fietkau if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { 12060d95521eSFelix Fietkau dev_kfree_skb(skb); 120715072189SBen Greear RX_STAT_INC(rx_oom_err); 12080d95521eSFelix Fietkau goto requeue_drop_frag; 12090d95521eSFelix Fietkau } 12100d95521eSFelix Fietkau 1211b5447ff9SEric Dumazet sc->rx.frag = NULL; 1212b5447ff9SEric Dumazet 12130d95521eSFelix Fietkau skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len), 12140d95521eSFelix Fietkau skb->len); 12150d95521eSFelix Fietkau dev_kfree_skb_any(skb); 12160d95521eSFelix Fietkau skb = hdr_skb; 12170d95521eSFelix Fietkau } 12180d95521eSFelix Fietkau 1219eb840a80SMohammed Shafi Shajakhan 1220eb840a80SMohammed Shafi Shajakhan if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) { 1221eb840a80SMohammed Shafi Shajakhan 1222203c4805SLuis R. Rodriguez /* 1223eb840a80SMohammed Shafi Shajakhan * change the default rx antenna if rx diversity 1224eb840a80SMohammed Shafi Shajakhan * chooses the other antenna 3 times in a row. 1225203c4805SLuis R. Rodriguez */ 122629bffa96SFelix Fietkau if (sc->rx.defant != rs.rs_antenna) { 1227203c4805SLuis R. Rodriguez if (++sc->rx.rxotherant >= 3) 122829bffa96SFelix Fietkau ath_setdefantenna(sc, rs.rs_antenna); 1229203c4805SLuis R. Rodriguez } else { 1230203c4805SLuis R. Rodriguez sc->rx.rxotherant = 0; 1231203c4805SLuis R. Rodriguez } 1232203c4805SLuis R. Rodriguez 1233eb840a80SMohammed Shafi Shajakhan } 1234eb840a80SMohammed Shafi Shajakhan 123566760eacSFelix Fietkau if (rxs->flag & RX_FLAG_MMIC_STRIPPED) 123666760eacSFelix Fietkau skb_trim(skb, skb->len - 8); 123766760eacSFelix Fietkau 12388ab2cd09SLuis R. Rodriguez spin_lock_irqsave(&sc->sc_pm_lock, flags); 1239aaef24b4SMohammed Shafi Shajakhan if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | 12401b04b930SSujith PS_WAIT_FOR_CAB | 1241aaef24b4SMohammed Shafi Shajakhan PS_WAIT_FOR_PSPOLL_DATA)) || 1242cedc7e3dSMohammed Shafi Shajakhan ath9k_check_auto_sleep(sc)) 1243f73c604cSRajkumar Manoharan ath_rx_ps(sc, skb, rs.is_mybeacon); 12448ab2cd09SLuis R. Rodriguez spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1245cc65965cSJouni Malinen 124643c35284SFelix Fietkau if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3) 1247102885a5SVasanthakumar Thiagarajan ath_ant_comb_scan(sc, &rs); 1248102885a5SVasanthakumar Thiagarajan 12497545daf4SFelix Fietkau ieee80211_rx(hw, skb); 1250cc65965cSJouni Malinen 12510d95521eSFelix Fietkau requeue_drop_frag: 12520d95521eSFelix Fietkau if (sc->rx.frag) { 12530d95521eSFelix Fietkau dev_kfree_skb_any(sc->rx.frag); 12540d95521eSFelix Fietkau sc->rx.frag = NULL; 12550d95521eSFelix Fietkau } 1256203c4805SLuis R. Rodriguez requeue: 1257b5c80475SFelix Fietkau if (edma) { 1258b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 1259b5c80475SFelix Fietkau ath_rx_edma_buf_link(sc, qtype); 1260b5c80475SFelix Fietkau } else { 1261203c4805SLuis R. Rodriguez list_move_tail(&bf->list, &sc->rx.rxbuf); 1262203c4805SLuis R. Rodriguez ath_rx_buf_link(sc, bf); 12633483288cSFelix Fietkau if (!flush) 126495294973SFelix Fietkau ath9k_hw_rxena(ah); 1265b5c80475SFelix Fietkau } 1266203c4805SLuis R. Rodriguez } while (1); 1267203c4805SLuis R. Rodriguez 1268203c4805SLuis R. Rodriguez spin_unlock_bh(&sc->rx.rxbuflock); 1269203c4805SLuis R. Rodriguez 127029ab0b36SRajkumar Manoharan if (!(ah->imask & ATH9K_INT_RXEOL)) { 127129ab0b36SRajkumar Manoharan ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN); 127272d874c6SFelix Fietkau ath9k_hw_set_interrupts(ah); 127329ab0b36SRajkumar Manoharan } 127429ab0b36SRajkumar Manoharan 1275203c4805SLuis R. Rodriguez return 0; 1276203c4805SLuis R. Rodriguez } 1277