1203c4805SLuis R. Rodriguez /* 25b68138eSSujith Manoharan * Copyright (c) 2008-2011 Atheros Communications Inc. 3203c4805SLuis R. Rodriguez * 4203c4805SLuis R. Rodriguez * Permission to use, copy, modify, and/or distribute this software for any 5203c4805SLuis R. Rodriguez * purpose with or without fee is hereby granted, provided that the above 6203c4805SLuis R. Rodriguez * copyright notice and this permission notice appear in all copies. 7203c4805SLuis R. Rodriguez * 8203c4805SLuis R. Rodriguez * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9203c4805SLuis R. Rodriguez * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10203c4805SLuis R. Rodriguez * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11203c4805SLuis R. Rodriguez * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12203c4805SLuis R. Rodriguez * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13203c4805SLuis R. Rodriguez * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14203c4805SLuis R. Rodriguez * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15203c4805SLuis R. Rodriguez */ 16203c4805SLuis R. Rodriguez 17b7f080cfSAlexey Dobriyan #include <linux/dma-mapping.h> 18203c4805SLuis R. Rodriguez #include "ath9k.h" 19b622a720SLuis R. Rodriguez #include "ar9003_mac.h" 20203c4805SLuis R. Rodriguez 21b5c80475SFelix Fietkau #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) 22b5c80475SFelix Fietkau 23ededf1f8SVasanthakumar Thiagarajan static inline bool ath9k_check_auto_sleep(struct ath_softc *sc) 24ededf1f8SVasanthakumar Thiagarajan { 25ededf1f8SVasanthakumar Thiagarajan return sc->ps_enabled && 26ededf1f8SVasanthakumar Thiagarajan (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP); 27ededf1f8SVasanthakumar Thiagarajan } 28ededf1f8SVasanthakumar Thiagarajan 29203c4805SLuis R. Rodriguez /* 30203c4805SLuis R. Rodriguez * Setup and link descriptors. 31203c4805SLuis R. Rodriguez * 32203c4805SLuis R. Rodriguez * 11N: we can no longer afford to self link the last descriptor. 33203c4805SLuis R. Rodriguez * MAC acknowledges BA status as long as it copies frames to host 34203c4805SLuis R. Rodriguez * buffer (or rx fifo). This can incorrectly acknowledge packets 35203c4805SLuis R. Rodriguez * to a sender if last desc is self-linked. 36203c4805SLuis R. Rodriguez */ 37203c4805SLuis R. Rodriguez static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 38203c4805SLuis R. Rodriguez { 39203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 40cc861f74SLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(ah); 41203c4805SLuis R. Rodriguez struct ath_desc *ds; 42203c4805SLuis R. Rodriguez struct sk_buff *skb; 43203c4805SLuis R. Rodriguez 44203c4805SLuis R. Rodriguez ATH_RXBUF_RESET(bf); 45203c4805SLuis R. Rodriguez 46203c4805SLuis R. Rodriguez ds = bf->bf_desc; 47203c4805SLuis R. Rodriguez ds->ds_link = 0; /* link to null */ 48203c4805SLuis R. Rodriguez ds->ds_data = bf->bf_buf_addr; 49203c4805SLuis R. Rodriguez 50203c4805SLuis R. Rodriguez /* virtual addr of the beginning of the buffer. */ 51203c4805SLuis R. Rodriguez skb = bf->bf_mpdu; 529680e8a3SLuis R. Rodriguez BUG_ON(skb == NULL); 53203c4805SLuis R. Rodriguez ds->ds_vdata = skb->data; 54203c4805SLuis R. Rodriguez 55cc861f74SLuis R. Rodriguez /* 56cc861f74SLuis R. Rodriguez * setup rx descriptors. The rx_bufsize here tells the hardware 57203c4805SLuis R. Rodriguez * how much data it can DMA to us and that we are prepared 58cc861f74SLuis R. Rodriguez * to process 59cc861f74SLuis R. Rodriguez */ 60203c4805SLuis R. Rodriguez ath9k_hw_setuprxdesc(ah, ds, 61cc861f74SLuis R. Rodriguez common->rx_bufsize, 62203c4805SLuis R. Rodriguez 0); 63203c4805SLuis R. Rodriguez 64203c4805SLuis R. Rodriguez if (sc->rx.rxlink == NULL) 65203c4805SLuis R. Rodriguez ath9k_hw_putrxbuf(ah, bf->bf_daddr); 66203c4805SLuis R. Rodriguez else 67203c4805SLuis R. Rodriguez *sc->rx.rxlink = bf->bf_daddr; 68203c4805SLuis R. Rodriguez 69203c4805SLuis R. Rodriguez sc->rx.rxlink = &ds->ds_link; 70203c4805SLuis R. Rodriguez } 71203c4805SLuis R. Rodriguez 72203c4805SLuis R. Rodriguez static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) 73203c4805SLuis R. Rodriguez { 74203c4805SLuis R. Rodriguez /* XXX block beacon interrupts */ 75203c4805SLuis R. Rodriguez ath9k_hw_setantenna(sc->sc_ah, antenna); 76203c4805SLuis R. Rodriguez sc->rx.defant = antenna; 77203c4805SLuis R. Rodriguez sc->rx.rxotherant = 0; 78203c4805SLuis R. Rodriguez } 79203c4805SLuis R. Rodriguez 80203c4805SLuis R. Rodriguez static void ath_opmode_init(struct ath_softc *sc) 81203c4805SLuis R. Rodriguez { 82203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 831510718dSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(ah); 841510718dSLuis R. Rodriguez 85203c4805SLuis R. Rodriguez u32 rfilt, mfilt[2]; 86203c4805SLuis R. Rodriguez 87203c4805SLuis R. Rodriguez /* configure rx filter */ 88203c4805SLuis R. Rodriguez rfilt = ath_calcrxfilter(sc); 89203c4805SLuis R. Rodriguez ath9k_hw_setrxfilter(ah, rfilt); 90203c4805SLuis R. Rodriguez 91203c4805SLuis R. Rodriguez /* configure bssid mask */ 9213b81559SLuis R. Rodriguez ath_hw_setbssidmask(common); 93203c4805SLuis R. Rodriguez 94203c4805SLuis R. Rodriguez /* configure operational mode */ 95203c4805SLuis R. Rodriguez ath9k_hw_setopmode(ah); 96203c4805SLuis R. Rodriguez 97203c4805SLuis R. Rodriguez /* calculate and install multicast filter */ 98203c4805SLuis R. Rodriguez mfilt[0] = mfilt[1] = ~0; 99203c4805SLuis R. Rodriguez ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 100203c4805SLuis R. Rodriguez } 101203c4805SLuis R. Rodriguez 102b5c80475SFelix Fietkau static bool ath_rx_edma_buf_link(struct ath_softc *sc, 103b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype) 104b5c80475SFelix Fietkau { 105b5c80475SFelix Fietkau struct ath_hw *ah = sc->sc_ah; 106b5c80475SFelix Fietkau struct ath_rx_edma *rx_edma; 107b5c80475SFelix Fietkau struct sk_buff *skb; 108b5c80475SFelix Fietkau struct ath_buf *bf; 109b5c80475SFelix Fietkau 110b5c80475SFelix Fietkau rx_edma = &sc->rx.rx_edma[qtype]; 111b5c80475SFelix Fietkau if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) 112b5c80475SFelix Fietkau return false; 113b5c80475SFelix Fietkau 114b5c80475SFelix Fietkau bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 115b5c80475SFelix Fietkau list_del_init(&bf->list); 116b5c80475SFelix Fietkau 117b5c80475SFelix Fietkau skb = bf->bf_mpdu; 118b5c80475SFelix Fietkau 119b5c80475SFelix Fietkau ATH_RXBUF_RESET(bf); 120b5c80475SFelix Fietkau memset(skb->data, 0, ah->caps.rx_status_len); 121b5c80475SFelix Fietkau dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 122b5c80475SFelix Fietkau ah->caps.rx_status_len, DMA_TO_DEVICE); 123b5c80475SFelix Fietkau 124b5c80475SFelix Fietkau SKB_CB_ATHBUF(skb) = bf; 125b5c80475SFelix Fietkau ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); 126b5c80475SFelix Fietkau skb_queue_tail(&rx_edma->rx_fifo, skb); 127b5c80475SFelix Fietkau 128b5c80475SFelix Fietkau return true; 129b5c80475SFelix Fietkau } 130b5c80475SFelix Fietkau 131b5c80475SFelix Fietkau static void ath_rx_addbuffer_edma(struct ath_softc *sc, 132b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype, int size) 133b5c80475SFelix Fietkau { 134b5c80475SFelix Fietkau struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1356a01f0c0SMohammed Shafi Shajakhan struct ath_buf *bf, *tbf; 136b5c80475SFelix Fietkau 137b5c80475SFelix Fietkau if (list_empty(&sc->rx.rxbuf)) { 138d2182b69SJoe Perches ath_dbg(common, QUEUE, "No free rx buf available\n"); 139b5c80475SFelix Fietkau return; 140b5c80475SFelix Fietkau } 141b5c80475SFelix Fietkau 1426a01f0c0SMohammed Shafi Shajakhan list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) 143b5c80475SFelix Fietkau if (!ath_rx_edma_buf_link(sc, qtype)) 144b5c80475SFelix Fietkau break; 145b5c80475SFelix Fietkau 146b5c80475SFelix Fietkau } 147b5c80475SFelix Fietkau 148b5c80475SFelix Fietkau static void ath_rx_remove_buffer(struct ath_softc *sc, 149b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype) 150b5c80475SFelix Fietkau { 151b5c80475SFelix Fietkau struct ath_buf *bf; 152b5c80475SFelix Fietkau struct ath_rx_edma *rx_edma; 153b5c80475SFelix Fietkau struct sk_buff *skb; 154b5c80475SFelix Fietkau 155b5c80475SFelix Fietkau rx_edma = &sc->rx.rx_edma[qtype]; 156b5c80475SFelix Fietkau 157b5c80475SFelix Fietkau while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { 158b5c80475SFelix Fietkau bf = SKB_CB_ATHBUF(skb); 159b5c80475SFelix Fietkau BUG_ON(!bf); 160b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 161b5c80475SFelix Fietkau } 162b5c80475SFelix Fietkau } 163b5c80475SFelix Fietkau 164b5c80475SFelix Fietkau static void ath_rx_edma_cleanup(struct ath_softc *sc) 165b5c80475SFelix Fietkau { 166ba542385SMohammed Shafi Shajakhan struct ath_hw *ah = sc->sc_ah; 167ba542385SMohammed Shafi Shajakhan struct ath_common *common = ath9k_hw_common(ah); 168b5c80475SFelix Fietkau struct ath_buf *bf; 169b5c80475SFelix Fietkau 170b5c80475SFelix Fietkau ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 171b5c80475SFelix Fietkau ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 172b5c80475SFelix Fietkau 173b5c80475SFelix Fietkau list_for_each_entry(bf, &sc->rx.rxbuf, list) { 174ba542385SMohammed Shafi Shajakhan if (bf->bf_mpdu) { 175ba542385SMohammed Shafi Shajakhan dma_unmap_single(sc->dev, bf->bf_buf_addr, 176ba542385SMohammed Shafi Shajakhan common->rx_bufsize, 177ba542385SMohammed Shafi Shajakhan DMA_BIDIRECTIONAL); 178b5c80475SFelix Fietkau dev_kfree_skb_any(bf->bf_mpdu); 179ba542385SMohammed Shafi Shajakhan bf->bf_buf_addr = 0; 180ba542385SMohammed Shafi Shajakhan bf->bf_mpdu = NULL; 181ba542385SMohammed Shafi Shajakhan } 182b5c80475SFelix Fietkau } 183b5c80475SFelix Fietkau 184b5c80475SFelix Fietkau INIT_LIST_HEAD(&sc->rx.rxbuf); 185b5c80475SFelix Fietkau 186b5c80475SFelix Fietkau kfree(sc->rx.rx_bufptr); 187b5c80475SFelix Fietkau sc->rx.rx_bufptr = NULL; 188b5c80475SFelix Fietkau } 189b5c80475SFelix Fietkau 190b5c80475SFelix Fietkau static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) 191b5c80475SFelix Fietkau { 192b5c80475SFelix Fietkau skb_queue_head_init(&rx_edma->rx_fifo); 193b5c80475SFelix Fietkau rx_edma->rx_fifo_hwsize = size; 194b5c80475SFelix Fietkau } 195b5c80475SFelix Fietkau 196b5c80475SFelix Fietkau static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) 197b5c80475SFelix Fietkau { 198b5c80475SFelix Fietkau struct ath_common *common = ath9k_hw_common(sc->sc_ah); 199b5c80475SFelix Fietkau struct ath_hw *ah = sc->sc_ah; 200b5c80475SFelix Fietkau struct sk_buff *skb; 201b5c80475SFelix Fietkau struct ath_buf *bf; 202b5c80475SFelix Fietkau int error = 0, i; 203b5c80475SFelix Fietkau u32 size; 204b5c80475SFelix Fietkau 205b5c80475SFelix Fietkau ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - 206b5c80475SFelix Fietkau ah->caps.rx_status_len); 207b5c80475SFelix Fietkau 208b5c80475SFelix Fietkau ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], 209b5c80475SFelix Fietkau ah->caps.rx_lp_qdepth); 210b5c80475SFelix Fietkau ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], 211b5c80475SFelix Fietkau ah->caps.rx_hp_qdepth); 212b5c80475SFelix Fietkau 213b5c80475SFelix Fietkau size = sizeof(struct ath_buf) * nbufs; 214b5c80475SFelix Fietkau bf = kzalloc(size, GFP_KERNEL); 215b5c80475SFelix Fietkau if (!bf) 216b5c80475SFelix Fietkau return -ENOMEM; 217b5c80475SFelix Fietkau 218b5c80475SFelix Fietkau INIT_LIST_HEAD(&sc->rx.rxbuf); 219b5c80475SFelix Fietkau sc->rx.rx_bufptr = bf; 220b5c80475SFelix Fietkau 221b5c80475SFelix Fietkau for (i = 0; i < nbufs; i++, bf++) { 222b5c80475SFelix Fietkau skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); 223b5c80475SFelix Fietkau if (!skb) { 224b5c80475SFelix Fietkau error = -ENOMEM; 225b5c80475SFelix Fietkau goto rx_init_fail; 226b5c80475SFelix Fietkau } 227b5c80475SFelix Fietkau 228b5c80475SFelix Fietkau memset(skb->data, 0, common->rx_bufsize); 229b5c80475SFelix Fietkau bf->bf_mpdu = skb; 230b5c80475SFelix Fietkau 231b5c80475SFelix Fietkau bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 232b5c80475SFelix Fietkau common->rx_bufsize, 233b5c80475SFelix Fietkau DMA_BIDIRECTIONAL); 234b5c80475SFelix Fietkau if (unlikely(dma_mapping_error(sc->dev, 235b5c80475SFelix Fietkau bf->bf_buf_addr))) { 236b5c80475SFelix Fietkau dev_kfree_skb_any(skb); 237b5c80475SFelix Fietkau bf->bf_mpdu = NULL; 2386cf9e995SBen Greear bf->bf_buf_addr = 0; 2393800276aSJoe Perches ath_err(common, 240b5c80475SFelix Fietkau "dma_mapping_error() on RX init\n"); 241b5c80475SFelix Fietkau error = -ENOMEM; 242b5c80475SFelix Fietkau goto rx_init_fail; 243b5c80475SFelix Fietkau } 244b5c80475SFelix Fietkau 245b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 246b5c80475SFelix Fietkau } 247b5c80475SFelix Fietkau 248b5c80475SFelix Fietkau return 0; 249b5c80475SFelix Fietkau 250b5c80475SFelix Fietkau rx_init_fail: 251b5c80475SFelix Fietkau ath_rx_edma_cleanup(sc); 252b5c80475SFelix Fietkau return error; 253b5c80475SFelix Fietkau } 254b5c80475SFelix Fietkau 255b5c80475SFelix Fietkau static void ath_edma_start_recv(struct ath_softc *sc) 256b5c80475SFelix Fietkau { 257b5c80475SFelix Fietkau spin_lock_bh(&sc->rx.rxbuflock); 258b5c80475SFelix Fietkau 259b5c80475SFelix Fietkau ath9k_hw_rxena(sc->sc_ah); 260b5c80475SFelix Fietkau 261b5c80475SFelix Fietkau ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, 262b5c80475SFelix Fietkau sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); 263b5c80475SFelix Fietkau 264b5c80475SFelix Fietkau ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, 265b5c80475SFelix Fietkau sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); 266b5c80475SFelix Fietkau 267b5c80475SFelix Fietkau ath_opmode_init(sc); 268b5c80475SFelix Fietkau 2694cb54fa3SSujith Manoharan ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); 2707583c550SLuis R. Rodriguez 2717583c550SLuis R. Rodriguez spin_unlock_bh(&sc->rx.rxbuflock); 272b5c80475SFelix Fietkau } 273b5c80475SFelix Fietkau 274b5c80475SFelix Fietkau static void ath_edma_stop_recv(struct ath_softc *sc) 275b5c80475SFelix Fietkau { 276b5c80475SFelix Fietkau ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); 277b5c80475SFelix Fietkau ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); 278b5c80475SFelix Fietkau } 279b5c80475SFelix Fietkau 280203c4805SLuis R. Rodriguez int ath_rx_init(struct ath_softc *sc, int nbufs) 281203c4805SLuis R. Rodriguez { 28227c51f1aSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(sc->sc_ah); 283203c4805SLuis R. Rodriguez struct sk_buff *skb; 284203c4805SLuis R. Rodriguez struct ath_buf *bf; 285203c4805SLuis R. Rodriguez int error = 0; 286203c4805SLuis R. Rodriguez 2874bdd1e97SLuis R. Rodriguez spin_lock_init(&sc->sc_pcu_lock); 288203c4805SLuis R. Rodriguez spin_lock_init(&sc->rx.rxbuflock); 289781b14a3SSujith Manoharan clear_bit(SC_OP_RXFLUSH, &sc->sc_flags); 290203c4805SLuis R. Rodriguez 2910d95521eSFelix Fietkau common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 + 2920d95521eSFelix Fietkau sc->sc_ah->caps.rx_status_len; 2930d95521eSFelix Fietkau 294b5c80475SFelix Fietkau if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 295b5c80475SFelix Fietkau return ath_rx_edma_init(sc, nbufs); 296b5c80475SFelix Fietkau } else { 297d2182b69SJoe Perches ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n", 298cc861f74SLuis R. Rodriguez common->cachelsz, common->rx_bufsize); 299203c4805SLuis R. Rodriguez 300203c4805SLuis R. Rodriguez /* Initialize rx descriptors */ 301203c4805SLuis R. Rodriguez 302203c4805SLuis R. Rodriguez error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 3034adfcdedSVasanthakumar Thiagarajan "rx", nbufs, 1, 0); 304203c4805SLuis R. Rodriguez if (error != 0) { 3053800276aSJoe Perches ath_err(common, 306b5c80475SFelix Fietkau "failed to allocate rx descriptors: %d\n", 307b5c80475SFelix Fietkau error); 308203c4805SLuis R. Rodriguez goto err; 309203c4805SLuis R. Rodriguez } 310203c4805SLuis R. Rodriguez 311203c4805SLuis R. Rodriguez list_for_each_entry(bf, &sc->rx.rxbuf, list) { 312b5c80475SFelix Fietkau skb = ath_rxbuf_alloc(common, common->rx_bufsize, 313b5c80475SFelix Fietkau GFP_KERNEL); 314203c4805SLuis R. Rodriguez if (skb == NULL) { 315203c4805SLuis R. Rodriguez error = -ENOMEM; 316203c4805SLuis R. Rodriguez goto err; 317203c4805SLuis R. Rodriguez } 318203c4805SLuis R. Rodriguez 319203c4805SLuis R. Rodriguez bf->bf_mpdu = skb; 320203c4805SLuis R. Rodriguez bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 321cc861f74SLuis R. Rodriguez common->rx_bufsize, 322203c4805SLuis R. Rodriguez DMA_FROM_DEVICE); 323203c4805SLuis R. Rodriguez if (unlikely(dma_mapping_error(sc->dev, 324203c4805SLuis R. Rodriguez bf->bf_buf_addr))) { 325203c4805SLuis R. Rodriguez dev_kfree_skb_any(skb); 326203c4805SLuis R. Rodriguez bf->bf_mpdu = NULL; 3276cf9e995SBen Greear bf->bf_buf_addr = 0; 3283800276aSJoe Perches ath_err(common, 329203c4805SLuis R. Rodriguez "dma_mapping_error() on RX init\n"); 330203c4805SLuis R. Rodriguez error = -ENOMEM; 331203c4805SLuis R. Rodriguez goto err; 332203c4805SLuis R. Rodriguez } 333203c4805SLuis R. Rodriguez } 334203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 335b5c80475SFelix Fietkau } 336203c4805SLuis R. Rodriguez 337203c4805SLuis R. Rodriguez err: 338203c4805SLuis R. Rodriguez if (error) 339203c4805SLuis R. Rodriguez ath_rx_cleanup(sc); 340203c4805SLuis R. Rodriguez 341203c4805SLuis R. Rodriguez return error; 342203c4805SLuis R. Rodriguez } 343203c4805SLuis R. Rodriguez 344203c4805SLuis R. Rodriguez void ath_rx_cleanup(struct ath_softc *sc) 345203c4805SLuis R. Rodriguez { 346cc861f74SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 347cc861f74SLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(ah); 348203c4805SLuis R. Rodriguez struct sk_buff *skb; 349203c4805SLuis R. Rodriguez struct ath_buf *bf; 350203c4805SLuis R. Rodriguez 351b5c80475SFelix Fietkau if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 352b5c80475SFelix Fietkau ath_rx_edma_cleanup(sc); 353b5c80475SFelix Fietkau return; 354b5c80475SFelix Fietkau } else { 355203c4805SLuis R. Rodriguez list_for_each_entry(bf, &sc->rx.rxbuf, list) { 356203c4805SLuis R. Rodriguez skb = bf->bf_mpdu; 357203c4805SLuis R. Rodriguez if (skb) { 358203c4805SLuis R. Rodriguez dma_unmap_single(sc->dev, bf->bf_buf_addr, 359b5c80475SFelix Fietkau common->rx_bufsize, 360b5c80475SFelix Fietkau DMA_FROM_DEVICE); 361203c4805SLuis R. Rodriguez dev_kfree_skb(skb); 3626cf9e995SBen Greear bf->bf_buf_addr = 0; 3636cf9e995SBen Greear bf->bf_mpdu = NULL; 364203c4805SLuis R. Rodriguez } 365203c4805SLuis R. Rodriguez } 366203c4805SLuis R. Rodriguez 367203c4805SLuis R. Rodriguez if (sc->rx.rxdma.dd_desc_len != 0) 368203c4805SLuis R. Rodriguez ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); 369203c4805SLuis R. Rodriguez } 370b5c80475SFelix Fietkau } 371203c4805SLuis R. Rodriguez 372203c4805SLuis R. Rodriguez /* 373203c4805SLuis R. Rodriguez * Calculate the receive filter according to the 374203c4805SLuis R. Rodriguez * operating mode and state: 375203c4805SLuis R. Rodriguez * 376203c4805SLuis R. Rodriguez * o always accept unicast, broadcast, and multicast traffic 377203c4805SLuis R. Rodriguez * o maintain current state of phy error reception (the hal 378203c4805SLuis R. Rodriguez * may enable phy error frames for noise immunity work) 379203c4805SLuis R. Rodriguez * o probe request frames are accepted only when operating in 380203c4805SLuis R. Rodriguez * hostap, adhoc, or monitor modes 381203c4805SLuis R. Rodriguez * o enable promiscuous mode according to the interface state 382203c4805SLuis R. Rodriguez * o accept beacons: 383203c4805SLuis R. Rodriguez * - when operating in adhoc mode so the 802.11 layer creates 384203c4805SLuis R. Rodriguez * node table entries for peers, 385203c4805SLuis R. Rodriguez * - when operating in station mode for collecting rssi data when 386203c4805SLuis R. Rodriguez * the station is otherwise quiet, or 387203c4805SLuis R. Rodriguez * - when operating as a repeater so we see repeater-sta beacons 388203c4805SLuis R. Rodriguez * - when scanning 389203c4805SLuis R. Rodriguez */ 390203c4805SLuis R. Rodriguez 391203c4805SLuis R. Rodriguez u32 ath_calcrxfilter(struct ath_softc *sc) 392203c4805SLuis R. Rodriguez { 393203c4805SLuis R. Rodriguez u32 rfilt; 394203c4805SLuis R. Rodriguez 395ac06697cSFelix Fietkau rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST 396203c4805SLuis R. Rodriguez | ATH9K_RX_FILTER_MCAST; 397203c4805SLuis R. Rodriguez 3989c1d8e4aSJouni Malinen if (sc->rx.rxfilter & FIF_PROBE_REQ) 399203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_PROBEREQ; 400203c4805SLuis R. Rodriguez 401203c4805SLuis R. Rodriguez /* 402203c4805SLuis R. Rodriguez * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station 403203c4805SLuis R. Rodriguez * mode interface or when in monitor mode. AP mode does not need this 404203c4805SLuis R. Rodriguez * since it receives all in-BSS frames anyway. 405203c4805SLuis R. Rodriguez */ 4062e286947SFelix Fietkau if (sc->sc_ah->is_monitoring) 407203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_PROM; 408203c4805SLuis R. Rodriguez 409203c4805SLuis R. Rodriguez if (sc->rx.rxfilter & FIF_CONTROL) 410203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_CONTROL; 411203c4805SLuis R. Rodriguez 412203c4805SLuis R. Rodriguez if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) && 413cfda6695SBen Greear (sc->nvifs <= 1) && 414203c4805SLuis R. Rodriguez !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC)) 415203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_MYBEACON; 416203c4805SLuis R. Rodriguez else 417203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_BEACON; 418203c4805SLuis R. Rodriguez 419264bbec8SFelix Fietkau if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || 42066afad01SSenthil Balasubramanian (sc->rx.rxfilter & FIF_PSPOLL)) 421203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_PSPOLL; 422203c4805SLuis R. Rodriguez 4237ea310beSSujith if (conf_is_ht(&sc->hw->conf)) 4247ea310beSSujith rfilt |= ATH9K_RX_FILTER_COMP_BAR; 4257ea310beSSujith 4267545daf4SFelix Fietkau if (sc->nvifs > 1 || (sc->rx.rxfilter & FIF_OTHER_BSS)) { 427a549459cSThomas Wagner /* This is needed for older chips */ 428a549459cSThomas Wagner if (sc->sc_ah->hw_version.macVersion <= AR_SREV_VERSION_9160) 4295eb6ba83SJavier Cardona rfilt |= ATH9K_RX_FILTER_PROM; 430203c4805SLuis R. Rodriguez rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; 431203c4805SLuis R. Rodriguez } 432203c4805SLuis R. Rodriguez 433b3d7aa43SGabor Juhos if (AR_SREV_9550(sc->sc_ah)) 434b3d7aa43SGabor Juhos rfilt |= ATH9K_RX_FILTER_4ADDRESS; 435b3d7aa43SGabor Juhos 436203c4805SLuis R. Rodriguez return rfilt; 437203c4805SLuis R. Rodriguez 438203c4805SLuis R. Rodriguez } 439203c4805SLuis R. Rodriguez 440203c4805SLuis R. Rodriguez int ath_startrecv(struct ath_softc *sc) 441203c4805SLuis R. Rodriguez { 442203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 443203c4805SLuis R. Rodriguez struct ath_buf *bf, *tbf; 444203c4805SLuis R. Rodriguez 445b5c80475SFelix Fietkau if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 446b5c80475SFelix Fietkau ath_edma_start_recv(sc); 447b5c80475SFelix Fietkau return 0; 448b5c80475SFelix Fietkau } 449b5c80475SFelix Fietkau 450203c4805SLuis R. Rodriguez spin_lock_bh(&sc->rx.rxbuflock); 451203c4805SLuis R. Rodriguez if (list_empty(&sc->rx.rxbuf)) 452203c4805SLuis R. Rodriguez goto start_recv; 453203c4805SLuis R. Rodriguez 454203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 455203c4805SLuis R. Rodriguez list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { 456203c4805SLuis R. Rodriguez ath_rx_buf_link(sc, bf); 457203c4805SLuis R. Rodriguez } 458203c4805SLuis R. Rodriguez 459203c4805SLuis R. Rodriguez /* We could have deleted elements so the list may be empty now */ 460203c4805SLuis R. Rodriguez if (list_empty(&sc->rx.rxbuf)) 461203c4805SLuis R. Rodriguez goto start_recv; 462203c4805SLuis R. Rodriguez 463203c4805SLuis R. Rodriguez bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 464203c4805SLuis R. Rodriguez ath9k_hw_putrxbuf(ah, bf->bf_daddr); 465203c4805SLuis R. Rodriguez ath9k_hw_rxena(ah); 466203c4805SLuis R. Rodriguez 467203c4805SLuis R. Rodriguez start_recv: 468203c4805SLuis R. Rodriguez ath_opmode_init(sc); 4694cb54fa3SSujith Manoharan ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)); 470203c4805SLuis R. Rodriguez 4717583c550SLuis R. Rodriguez spin_unlock_bh(&sc->rx.rxbuflock); 4727583c550SLuis R. Rodriguez 473203c4805SLuis R. Rodriguez return 0; 474203c4805SLuis R. Rodriguez } 475203c4805SLuis R. Rodriguez 476203c4805SLuis R. Rodriguez bool ath_stoprecv(struct ath_softc *sc) 477203c4805SLuis R. Rodriguez { 478203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 4795882da02SFelix Fietkau bool stopped, reset = false; 480203c4805SLuis R. Rodriguez 4811e450285SLuis R. Rodriguez spin_lock_bh(&sc->rx.rxbuflock); 482d47844a0SFelix Fietkau ath9k_hw_abortpcurecv(ah); 483203c4805SLuis R. Rodriguez ath9k_hw_setrxfilter(ah, 0); 4845882da02SFelix Fietkau stopped = ath9k_hw_stopdmarecv(ah, &reset); 485b5c80475SFelix Fietkau 486b5c80475SFelix Fietkau if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 487b5c80475SFelix Fietkau ath_edma_stop_recv(sc); 488b5c80475SFelix Fietkau else 489203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 4901e450285SLuis R. Rodriguez spin_unlock_bh(&sc->rx.rxbuflock); 491203c4805SLuis R. Rodriguez 492d584747bSRajkumar Manoharan if (!(ah->ah_flags & AH_UNPLUGGED) && 493d584747bSRajkumar Manoharan unlikely(!stopped)) { 494d7fd1b50SBen Greear ath_err(ath9k_hw_common(sc->sc_ah), 495d7fd1b50SBen Greear "Could not stop RX, we could be " 49678a7685eSLuis R. Rodriguez "confusing the DMA engine when we start RX up\n"); 497d7fd1b50SBen Greear ATH_DBG_WARN_ON_ONCE(!stopped); 498d7fd1b50SBen Greear } 4992232d31bSFelix Fietkau return stopped && !reset; 500203c4805SLuis R. Rodriguez } 501203c4805SLuis R. Rodriguez 502203c4805SLuis R. Rodriguez void ath_flushrecv(struct ath_softc *sc) 503203c4805SLuis R. Rodriguez { 504781b14a3SSujith Manoharan set_bit(SC_OP_RXFLUSH, &sc->sc_flags); 505b5c80475SFelix Fietkau if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 506b5c80475SFelix Fietkau ath_rx_tasklet(sc, 1, true); 507b5c80475SFelix Fietkau ath_rx_tasklet(sc, 1, false); 508781b14a3SSujith Manoharan clear_bit(SC_OP_RXFLUSH, &sc->sc_flags); 509203c4805SLuis R. Rodriguez } 510203c4805SLuis R. Rodriguez 511cc65965cSJouni Malinen static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb) 512cc65965cSJouni Malinen { 513cc65965cSJouni Malinen /* Check whether the Beacon frame has DTIM indicating buffered bc/mc */ 514cc65965cSJouni Malinen struct ieee80211_mgmt *mgmt; 515cc65965cSJouni Malinen u8 *pos, *end, id, elen; 516cc65965cSJouni Malinen struct ieee80211_tim_ie *tim; 517cc65965cSJouni Malinen 518cc65965cSJouni Malinen mgmt = (struct ieee80211_mgmt *)skb->data; 519cc65965cSJouni Malinen pos = mgmt->u.beacon.variable; 520cc65965cSJouni Malinen end = skb->data + skb->len; 521cc65965cSJouni Malinen 522cc65965cSJouni Malinen while (pos + 2 < end) { 523cc65965cSJouni Malinen id = *pos++; 524cc65965cSJouni Malinen elen = *pos++; 525cc65965cSJouni Malinen if (pos + elen > end) 526cc65965cSJouni Malinen break; 527cc65965cSJouni Malinen 528cc65965cSJouni Malinen if (id == WLAN_EID_TIM) { 529cc65965cSJouni Malinen if (elen < sizeof(*tim)) 530cc65965cSJouni Malinen break; 531cc65965cSJouni Malinen tim = (struct ieee80211_tim_ie *) pos; 532cc65965cSJouni Malinen if (tim->dtim_count != 0) 533cc65965cSJouni Malinen break; 534cc65965cSJouni Malinen return tim->bitmap_ctrl & 0x01; 535cc65965cSJouni Malinen } 536cc65965cSJouni Malinen 537cc65965cSJouni Malinen pos += elen; 538cc65965cSJouni Malinen } 539cc65965cSJouni Malinen 540cc65965cSJouni Malinen return false; 541cc65965cSJouni Malinen } 542cc65965cSJouni Malinen 543cc65965cSJouni Malinen static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 544cc65965cSJouni Malinen { 5451510718dSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(sc->sc_ah); 546cc65965cSJouni Malinen 547cc65965cSJouni Malinen if (skb->len < 24 + 8 + 2 + 2) 548cc65965cSJouni Malinen return; 549cc65965cSJouni Malinen 5501b04b930SSujith sc->ps_flags &= ~PS_WAIT_FOR_BEACON; 551293dc5dfSGabor Juhos 5521b04b930SSujith if (sc->ps_flags & PS_BEACON_SYNC) { 5531b04b930SSujith sc->ps_flags &= ~PS_BEACON_SYNC; 554d2182b69SJoe Perches ath_dbg(common, PS, 555226afe68SJoe Perches "Reconfigure Beacon timers based on timestamp from the AP\n"); 556ef4ad633SSujith Manoharan ath9k_set_beacon(sc); 557ccdfeab6SJouni Malinen } 558ccdfeab6SJouni Malinen 559cc65965cSJouni Malinen if (ath_beacon_dtim_pending_cab(skb)) { 560cc65965cSJouni Malinen /* 561cc65965cSJouni Malinen * Remain awake waiting for buffered broadcast/multicast 56258f5fffdSGabor Juhos * frames. If the last broadcast/multicast frame is not 56358f5fffdSGabor Juhos * received properly, the next beacon frame will work as 56458f5fffdSGabor Juhos * a backup trigger for returning into NETWORK SLEEP state, 56558f5fffdSGabor Juhos * so we are waiting for it as well. 566cc65965cSJouni Malinen */ 567d2182b69SJoe Perches ath_dbg(common, PS, 568226afe68SJoe Perches "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n"); 5691b04b930SSujith sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; 570cc65965cSJouni Malinen return; 571cc65965cSJouni Malinen } 572cc65965cSJouni Malinen 5731b04b930SSujith if (sc->ps_flags & PS_WAIT_FOR_CAB) { 574cc65965cSJouni Malinen /* 575cc65965cSJouni Malinen * This can happen if a broadcast frame is dropped or the AP 576cc65965cSJouni Malinen * fails to send a frame indicating that all CAB frames have 577cc65965cSJouni Malinen * been delivered. 578cc65965cSJouni Malinen */ 5791b04b930SSujith sc->ps_flags &= ~PS_WAIT_FOR_CAB; 580d2182b69SJoe Perches ath_dbg(common, PS, "PS wait for CAB frames timed out\n"); 581cc65965cSJouni Malinen } 582cc65965cSJouni Malinen } 583cc65965cSJouni Malinen 584f73c604cSRajkumar Manoharan static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon) 585cc65965cSJouni Malinen { 586cc65965cSJouni Malinen struct ieee80211_hdr *hdr; 587c46917bbSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(sc->sc_ah); 588cc65965cSJouni Malinen 589cc65965cSJouni Malinen hdr = (struct ieee80211_hdr *)skb->data; 590cc65965cSJouni Malinen 591cc65965cSJouni Malinen /* Process Beacon and CAB receive in PS state */ 592ededf1f8SVasanthakumar Thiagarajan if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc)) 59307c15a3fSSujith Manoharan && mybeacon) { 594cc65965cSJouni Malinen ath_rx_ps_beacon(sc, skb); 59507c15a3fSSujith Manoharan } else if ((sc->ps_flags & PS_WAIT_FOR_CAB) && 596cc65965cSJouni Malinen (ieee80211_is_data(hdr->frame_control) || 597cc65965cSJouni Malinen ieee80211_is_action(hdr->frame_control)) && 598cc65965cSJouni Malinen is_multicast_ether_addr(hdr->addr1) && 599cc65965cSJouni Malinen !ieee80211_has_moredata(hdr->frame_control)) { 600cc65965cSJouni Malinen /* 601cc65965cSJouni Malinen * No more broadcast/multicast frames to be received at this 602cc65965cSJouni Malinen * point. 603cc65965cSJouni Malinen */ 6043fac6dfdSSenthil Balasubramanian sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); 605d2182b69SJoe Perches ath_dbg(common, PS, 606c46917bbSLuis R. Rodriguez "All PS CAB frames received, back to sleep\n"); 6071b04b930SSujith } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && 6089a23f9caSJouni Malinen !is_multicast_ether_addr(hdr->addr1) && 6099a23f9caSJouni Malinen !ieee80211_has_morefrags(hdr->frame_control)) { 6101b04b930SSujith sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; 611d2182b69SJoe Perches ath_dbg(common, PS, 612226afe68SJoe Perches "Going back to sleep after having received PS-Poll data (0x%lx)\n", 6131b04b930SSujith sc->ps_flags & (PS_WAIT_FOR_BEACON | 6141b04b930SSujith PS_WAIT_FOR_CAB | 6151b04b930SSujith PS_WAIT_FOR_PSPOLL_DATA | 6161b04b930SSujith PS_WAIT_FOR_TX_ACK)); 617cc65965cSJouni Malinen } 618cc65965cSJouni Malinen } 619cc65965cSJouni Malinen 620b5c80475SFelix Fietkau static bool ath_edma_get_buffers(struct ath_softc *sc, 6213a2923e8SFelix Fietkau enum ath9k_rx_qtype qtype, 6223a2923e8SFelix Fietkau struct ath_rx_status *rs, 6233a2923e8SFelix Fietkau struct ath_buf **dest) 624203c4805SLuis R. Rodriguez { 625b5c80475SFelix Fietkau struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; 626203c4805SLuis R. Rodriguez struct ath_hw *ah = sc->sc_ah; 62727c51f1aSLuis R. Rodriguez struct ath_common *common = ath9k_hw_common(ah); 628b5c80475SFelix Fietkau struct sk_buff *skb; 629b5c80475SFelix Fietkau struct ath_buf *bf; 630b5c80475SFelix Fietkau int ret; 631203c4805SLuis R. Rodriguez 632b5c80475SFelix Fietkau skb = skb_peek(&rx_edma->rx_fifo); 633b5c80475SFelix Fietkau if (!skb) 634b5c80475SFelix Fietkau return false; 635203c4805SLuis R. Rodriguez 636b5c80475SFelix Fietkau bf = SKB_CB_ATHBUF(skb); 637b5c80475SFelix Fietkau BUG_ON(!bf); 638b5c80475SFelix Fietkau 639ce9426d1SMing Lei dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 640b5c80475SFelix Fietkau common->rx_bufsize, DMA_FROM_DEVICE); 641b5c80475SFelix Fietkau 6423a2923e8SFelix Fietkau ret = ath9k_hw_process_rxdesc_edma(ah, rs, skb->data); 643ce9426d1SMing Lei if (ret == -EINPROGRESS) { 644ce9426d1SMing Lei /*let device gain the buffer again*/ 645ce9426d1SMing Lei dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, 646ce9426d1SMing Lei common->rx_bufsize, DMA_FROM_DEVICE); 647b5c80475SFelix Fietkau return false; 648ce9426d1SMing Lei } 649b5c80475SFelix Fietkau 650b5c80475SFelix Fietkau __skb_unlink(skb, &rx_edma->rx_fifo); 651b5c80475SFelix Fietkau if (ret == -EINVAL) { 652b5c80475SFelix Fietkau /* corrupt descriptor, skip this one and the following one */ 653b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 654b5c80475SFelix Fietkau ath_rx_edma_buf_link(sc, qtype); 655b5c80475SFelix Fietkau 6563a2923e8SFelix Fietkau skb = skb_peek(&rx_edma->rx_fifo); 6573a2923e8SFelix Fietkau if (skb) { 658b5c80475SFelix Fietkau bf = SKB_CB_ATHBUF(skb); 659b5c80475SFelix Fietkau BUG_ON(!bf); 660b5c80475SFelix Fietkau 661b5c80475SFelix Fietkau __skb_unlink(skb, &rx_edma->rx_fifo); 662b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 663b5c80475SFelix Fietkau ath_rx_edma_buf_link(sc, qtype); 664b5c80475SFelix Fietkau } 6656bb51c70STom Hughes 6666bb51c70STom Hughes bf = NULL; 6673a2923e8SFelix Fietkau } 668b5c80475SFelix Fietkau 6693a2923e8SFelix Fietkau *dest = bf; 670b5c80475SFelix Fietkau return true; 671b5c80475SFelix Fietkau } 672b5c80475SFelix Fietkau 673b5c80475SFelix Fietkau static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, 674b5c80475SFelix Fietkau struct ath_rx_status *rs, 675b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype) 676b5c80475SFelix Fietkau { 6773a2923e8SFelix Fietkau struct ath_buf *bf = NULL; 678b5c80475SFelix Fietkau 6793a2923e8SFelix Fietkau while (ath_edma_get_buffers(sc, qtype, rs, &bf)) { 6803a2923e8SFelix Fietkau if (!bf) 6813a2923e8SFelix Fietkau continue; 682b5c80475SFelix Fietkau 683b5c80475SFelix Fietkau return bf; 684b5c80475SFelix Fietkau } 6853a2923e8SFelix Fietkau return NULL; 6863a2923e8SFelix Fietkau } 687b5c80475SFelix Fietkau 688b5c80475SFelix Fietkau static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, 689b5c80475SFelix Fietkau struct ath_rx_status *rs) 690b5c80475SFelix Fietkau { 691b5c80475SFelix Fietkau struct ath_hw *ah = sc->sc_ah; 692b5c80475SFelix Fietkau struct ath_common *common = ath9k_hw_common(ah); 693b5c80475SFelix Fietkau struct ath_desc *ds; 694b5c80475SFelix Fietkau struct ath_buf *bf; 695b5c80475SFelix Fietkau int ret; 696203c4805SLuis R. Rodriguez 697203c4805SLuis R. Rodriguez if (list_empty(&sc->rx.rxbuf)) { 698203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 699b5c80475SFelix Fietkau return NULL; 700203c4805SLuis R. Rodriguez } 701203c4805SLuis R. Rodriguez 702203c4805SLuis R. Rodriguez bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); 703203c4805SLuis R. Rodriguez ds = bf->bf_desc; 704203c4805SLuis R. Rodriguez 705203c4805SLuis R. Rodriguez /* 706203c4805SLuis R. Rodriguez * Must provide the virtual address of the current 707203c4805SLuis R. Rodriguez * descriptor, the physical address, and the virtual 708203c4805SLuis R. Rodriguez * address of the next descriptor in the h/w chain. 709203c4805SLuis R. Rodriguez * This allows the HAL to look ahead to see if the 710203c4805SLuis R. Rodriguez * hardware is done with a descriptor by checking the 711203c4805SLuis R. Rodriguez * done bit in the following descriptor and the address 712203c4805SLuis R. Rodriguez * of the current descriptor the DMA engine is working 713203c4805SLuis R. Rodriguez * on. All this is necessary because of our use of 714203c4805SLuis R. Rodriguez * a self-linked list to avoid rx overruns. 715203c4805SLuis R. Rodriguez */ 7163de21116SRajkumar Manoharan ret = ath9k_hw_rxprocdesc(ah, ds, rs); 717b5c80475SFelix Fietkau if (ret == -EINPROGRESS) { 71829bffa96SFelix Fietkau struct ath_rx_status trs; 719203c4805SLuis R. Rodriguez struct ath_buf *tbf; 720203c4805SLuis R. Rodriguez struct ath_desc *tds; 721203c4805SLuis R. Rodriguez 72229bffa96SFelix Fietkau memset(&trs, 0, sizeof(trs)); 723203c4805SLuis R. Rodriguez if (list_is_last(&bf->list, &sc->rx.rxbuf)) { 724203c4805SLuis R. Rodriguez sc->rx.rxlink = NULL; 725b5c80475SFelix Fietkau return NULL; 726203c4805SLuis R. Rodriguez } 727203c4805SLuis R. Rodriguez 728203c4805SLuis R. Rodriguez tbf = list_entry(bf->list.next, struct ath_buf, list); 729203c4805SLuis R. Rodriguez 730203c4805SLuis R. Rodriguez /* 731203c4805SLuis R. Rodriguez * On some hardware the descriptor status words could 732203c4805SLuis R. Rodriguez * get corrupted, including the done bit. Because of 733203c4805SLuis R. Rodriguez * this, check if the next descriptor's done bit is 734203c4805SLuis R. Rodriguez * set or not. 735203c4805SLuis R. Rodriguez * 736203c4805SLuis R. Rodriguez * If the next descriptor's done bit is set, the current 737203c4805SLuis R. Rodriguez * descriptor has been corrupted. Force s/w to discard 738203c4805SLuis R. Rodriguez * this descriptor and continue... 739203c4805SLuis R. Rodriguez */ 740203c4805SLuis R. Rodriguez 741203c4805SLuis R. Rodriguez tds = tbf->bf_desc; 7423de21116SRajkumar Manoharan ret = ath9k_hw_rxprocdesc(ah, tds, &trs); 743b5c80475SFelix Fietkau if (ret == -EINPROGRESS) 744b5c80475SFelix Fietkau return NULL; 745203c4805SLuis R. Rodriguez } 746203c4805SLuis R. Rodriguez 747*a3dc48e8SFelix Fietkau list_del(&bf->list); 748b5c80475SFelix Fietkau if (!bf->bf_mpdu) 749b5c80475SFelix Fietkau return bf; 750203c4805SLuis R. Rodriguez 751203c4805SLuis R. Rodriguez /* 752203c4805SLuis R. Rodriguez * Synchronize the DMA transfer with CPU before 753203c4805SLuis R. Rodriguez * 1. accessing the frame 754203c4805SLuis R. Rodriguez * 2. requeueing the same buffer to h/w 755203c4805SLuis R. Rodriguez */ 756ce9426d1SMing Lei dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 757cc861f74SLuis R. Rodriguez common->rx_bufsize, 758203c4805SLuis R. Rodriguez DMA_FROM_DEVICE); 759203c4805SLuis R. Rodriguez 760b5c80475SFelix Fietkau return bf; 761b5c80475SFelix Fietkau } 762b5c80475SFelix Fietkau 763d435700fSSujith /* Assumes you've already done the endian to CPU conversion */ 764d435700fSSujith static bool ath9k_rx_accept(struct ath_common *common, 7659f167f64SVasanthakumar Thiagarajan struct ieee80211_hdr *hdr, 766d435700fSSujith struct ieee80211_rx_status *rxs, 767d435700fSSujith struct ath_rx_status *rx_stats, 768d435700fSSujith bool *decrypt_error) 769d435700fSSujith { 770ec205999SFelix Fietkau struct ath_softc *sc = (struct ath_softc *) common->priv; 77166760eacSFelix Fietkau bool is_mc, is_valid_tkip, strip_mic, mic_error; 772d435700fSSujith struct ath_hw *ah = common->ah; 773d435700fSSujith __le16 fc; 774b7b1b512SVasanthakumar Thiagarajan u8 rx_status_len = ah->caps.rx_status_len; 775d435700fSSujith 776d435700fSSujith fc = hdr->frame_control; 777d435700fSSujith 77866760eacSFelix Fietkau is_mc = !!is_multicast_ether_addr(hdr->addr1); 77966760eacSFelix Fietkau is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && 78066760eacSFelix Fietkau test_bit(rx_stats->rs_keyix, common->tkip_keymap); 781152e585dSBill Jordan strip_mic = is_valid_tkip && ieee80211_is_data(fc) && 7822a5783b8SMichael Liang ieee80211_has_protected(fc) && 783152e585dSBill Jordan !(rx_stats->rs_status & 784846d9363SFelix Fietkau (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC | 785846d9363SFelix Fietkau ATH9K_RXERR_KEYMISS)); 78666760eacSFelix Fietkau 787f88373faSFelix Fietkau /* 788f88373faSFelix Fietkau * Key miss events are only relevant for pairwise keys where the 789f88373faSFelix Fietkau * descriptor does contain a valid key index. This has been observed 790f88373faSFelix Fietkau * mostly with CCMP encryption. 791f88373faSFelix Fietkau */ 792bed3d9c0SFelix Fietkau if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID || 793bed3d9c0SFelix Fietkau !test_bit(rx_stats->rs_keyix, common->ccmp_keymap)) 794f88373faSFelix Fietkau rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS; 795f88373faSFelix Fietkau 79615072189SBen Greear if (!rx_stats->rs_datalen) { 79715072189SBen Greear RX_STAT_INC(rx_len_err); 798d435700fSSujith return false; 79915072189SBen Greear } 80015072189SBen Greear 801d435700fSSujith /* 802d435700fSSujith * rs_status follows rs_datalen so if rs_datalen is too large 803d435700fSSujith * we can take a hint that hardware corrupted it, so ignore 804d435700fSSujith * those frames. 805d435700fSSujith */ 80615072189SBen Greear if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) { 80715072189SBen Greear RX_STAT_INC(rx_len_err); 808d435700fSSujith return false; 80915072189SBen Greear } 810d435700fSSujith 8110d95521eSFelix Fietkau /* Only use error bits from the last fragment */ 812d435700fSSujith if (rx_stats->rs_more) 8130d95521eSFelix Fietkau return true; 814d435700fSSujith 81566760eacSFelix Fietkau mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) && 81666760eacSFelix Fietkau !ieee80211_has_morefrags(fc) && 81766760eacSFelix Fietkau !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && 81866760eacSFelix Fietkau (rx_stats->rs_status & ATH9K_RXERR_MIC); 81966760eacSFelix Fietkau 820d435700fSSujith /* 821d435700fSSujith * The rx_stats->rs_status will not be set until the end of the 822d435700fSSujith * chained descriptors so it can be ignored if rs_more is set. The 823d435700fSSujith * rs_more will be false at the last element of the chained 824d435700fSSujith * descriptors. 825d435700fSSujith */ 826d435700fSSujith if (rx_stats->rs_status != 0) { 827846d9363SFelix Fietkau u8 status_mask; 828846d9363SFelix Fietkau 82966760eacSFelix Fietkau if (rx_stats->rs_status & ATH9K_RXERR_CRC) { 830d435700fSSujith rxs->flag |= RX_FLAG_FAILED_FCS_CRC; 83166760eacSFelix Fietkau mic_error = false; 83266760eacSFelix Fietkau } 833d435700fSSujith if (rx_stats->rs_status & ATH9K_RXERR_PHY) 834d435700fSSujith return false; 835d435700fSSujith 836846d9363SFelix Fietkau if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) || 837846d9363SFelix Fietkau (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) { 838d435700fSSujith *decrypt_error = true; 83966760eacSFelix Fietkau mic_error = false; 840d435700fSSujith } 84166760eacSFelix Fietkau 842d435700fSSujith /* 843d435700fSSujith * Reject error frames with the exception of 844d435700fSSujith * decryption and MIC failures. For monitor mode, 845d435700fSSujith * we also ignore the CRC error. 846d435700fSSujith */ 847846d9363SFelix Fietkau status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | 848846d9363SFelix Fietkau ATH9K_RXERR_KEYMISS; 849846d9363SFelix Fietkau 850ec205999SFelix Fietkau if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL)) 851846d9363SFelix Fietkau status_mask |= ATH9K_RXERR_CRC; 852846d9363SFelix Fietkau 853846d9363SFelix Fietkau if (rx_stats->rs_status & ~status_mask) 854d435700fSSujith return false; 855d435700fSSujith } 85666760eacSFelix Fietkau 85766760eacSFelix Fietkau /* 85866760eacSFelix Fietkau * For unicast frames the MIC error bit can have false positives, 85966760eacSFelix Fietkau * so all MIC error reports need to be validated in software. 86066760eacSFelix Fietkau * False negatives are not common, so skip software verification 86166760eacSFelix Fietkau * if the hardware considers the MIC valid. 86266760eacSFelix Fietkau */ 86366760eacSFelix Fietkau if (strip_mic) 86466760eacSFelix Fietkau rxs->flag |= RX_FLAG_MMIC_STRIPPED; 86566760eacSFelix Fietkau else if (is_mc && mic_error) 86666760eacSFelix Fietkau rxs->flag |= RX_FLAG_MMIC_ERROR; 86766760eacSFelix Fietkau 868d435700fSSujith return true; 869d435700fSSujith } 870d435700fSSujith 871d435700fSSujith static int ath9k_process_rate(struct ath_common *common, 872d435700fSSujith struct ieee80211_hw *hw, 873d435700fSSujith struct ath_rx_status *rx_stats, 8749f167f64SVasanthakumar Thiagarajan struct ieee80211_rx_status *rxs) 875d435700fSSujith { 876d435700fSSujith struct ieee80211_supported_band *sband; 877d435700fSSujith enum ieee80211_band band; 878d435700fSSujith unsigned int i = 0; 879990e08a0SBen Greear struct ath_softc __maybe_unused *sc = common->priv; 880d435700fSSujith 881d435700fSSujith band = hw->conf.channel->band; 882d435700fSSujith sband = hw->wiphy->bands[band]; 883d435700fSSujith 884d435700fSSujith if (rx_stats->rs_rate & 0x80) { 885d435700fSSujith /* HT rate */ 886d435700fSSujith rxs->flag |= RX_FLAG_HT; 887d435700fSSujith if (rx_stats->rs_flags & ATH9K_RX_2040) 888d435700fSSujith rxs->flag |= RX_FLAG_40MHZ; 889d435700fSSujith if (rx_stats->rs_flags & ATH9K_RX_GI) 890d435700fSSujith rxs->flag |= RX_FLAG_SHORT_GI; 891d435700fSSujith rxs->rate_idx = rx_stats->rs_rate & 0x7f; 892d435700fSSujith return 0; 893d435700fSSujith } 894d435700fSSujith 895d435700fSSujith for (i = 0; i < sband->n_bitrates; i++) { 896d435700fSSujith if (sband->bitrates[i].hw_value == rx_stats->rs_rate) { 897d435700fSSujith rxs->rate_idx = i; 898d435700fSSujith return 0; 899d435700fSSujith } 900d435700fSSujith if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { 901d435700fSSujith rxs->flag |= RX_FLAG_SHORTPRE; 902d435700fSSujith rxs->rate_idx = i; 903d435700fSSujith return 0; 904d435700fSSujith } 905d435700fSSujith } 906d435700fSSujith 907d435700fSSujith /* 908d435700fSSujith * No valid hardware bitrate found -- we should not get here 909d435700fSSujith * because hardware has already validated this frame as OK. 910d435700fSSujith */ 911d2182b69SJoe Perches ath_dbg(common, ANY, 912226afe68SJoe Perches "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", 913226afe68SJoe Perches rx_stats->rs_rate); 91415072189SBen Greear RX_STAT_INC(rx_rate_err); 915d435700fSSujith return -EINVAL; 916d435700fSSujith } 917d435700fSSujith 918d435700fSSujith static void ath9k_process_rssi(struct ath_common *common, 919d435700fSSujith struct ieee80211_hw *hw, 9209f167f64SVasanthakumar Thiagarajan struct ieee80211_hdr *hdr, 921d435700fSSujith struct ath_rx_status *rx_stats) 922d435700fSSujith { 9239ac58615SFelix Fietkau struct ath_softc *sc = hw->priv; 924d435700fSSujith struct ath_hw *ah = common->ah; 9259fa23e17SFelix Fietkau int last_rssi; 9262ef16755SFelix Fietkau int rssi = rx_stats->rs_rssi; 927d435700fSSujith 928cf3af748SRajkumar Manoharan if (!rx_stats->is_mybeacon || 929cf3af748SRajkumar Manoharan ((ah->opmode != NL80211_IFTYPE_STATION) && 930cf3af748SRajkumar Manoharan (ah->opmode != NL80211_IFTYPE_ADHOC))) 9319fa23e17SFelix Fietkau return; 9329fa23e17SFelix Fietkau 9339fa23e17SFelix Fietkau if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr) 9349ac58615SFelix Fietkau ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi); 935686b9cb9SBen Greear 9369ac58615SFelix Fietkau last_rssi = sc->last_rssi; 937d435700fSSujith if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 9382ef16755SFelix Fietkau rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER); 9392ef16755SFelix Fietkau if (rssi < 0) 9402ef16755SFelix Fietkau rssi = 0; 941d435700fSSujith 942d435700fSSujith /* Update Beacon RSSI, this is used by ANI. */ 9432ef16755SFelix Fietkau ah->stats.avgbrssi = rssi; 944d435700fSSujith } 945d435700fSSujith 946d435700fSSujith /* 947d435700fSSujith * For Decrypt or Demic errors, we only mark packet status here and always push 948d435700fSSujith * up the frame up to let mac80211 handle the actual error case, be it no 949d435700fSSujith * decryption key or real decryption error. This let us keep statistics there. 950d435700fSSujith */ 951d435700fSSujith static int ath9k_rx_skb_preprocess(struct ath_common *common, 952d435700fSSujith struct ieee80211_hw *hw, 9539f167f64SVasanthakumar Thiagarajan struct ieee80211_hdr *hdr, 954d435700fSSujith struct ath_rx_status *rx_stats, 955d435700fSSujith struct ieee80211_rx_status *rx_status, 956d435700fSSujith bool *decrypt_error) 957d435700fSSujith { 958f749b946SFelix Fietkau struct ath_hw *ah = common->ah; 959f749b946SFelix Fietkau 960d435700fSSujith /* 961d435700fSSujith * everything but the rate is checked here, the rate check is done 962d435700fSSujith * separately to avoid doing two lookups for a rate for each frame. 963d435700fSSujith */ 9649f167f64SVasanthakumar Thiagarajan if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) 965d435700fSSujith return -EINVAL; 966d435700fSSujith 9670d95521eSFelix Fietkau /* Only use status info from the last fragment */ 9680d95521eSFelix Fietkau if (rx_stats->rs_more) 9690d95521eSFelix Fietkau return 0; 9700d95521eSFelix Fietkau 9719f167f64SVasanthakumar Thiagarajan ath9k_process_rssi(common, hw, hdr, rx_stats); 972d435700fSSujith 9739f167f64SVasanthakumar Thiagarajan if (ath9k_process_rate(common, hw, rx_stats, rx_status)) 974d435700fSSujith return -EINVAL; 975d435700fSSujith 976d435700fSSujith rx_status->band = hw->conf.channel->band; 977d435700fSSujith rx_status->freq = hw->conf.channel->center_freq; 978f749b946SFelix Fietkau rx_status->signal = ah->noise + rx_stats->rs_rssi; 979d435700fSSujith rx_status->antenna = rx_stats->rs_antenna; 98096d21371SThomas Pedersen rx_status->flag |= RX_FLAG_MACTIME_END; 9812ef16755SFelix Fietkau if (rx_stats->rs_moreaggr) 9822ef16755SFelix Fietkau rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 983d435700fSSujith 984d435700fSSujith return 0; 985d435700fSSujith } 986d435700fSSujith 987d435700fSSujith static void ath9k_rx_skb_postprocess(struct ath_common *common, 988d435700fSSujith struct sk_buff *skb, 989d435700fSSujith struct ath_rx_status *rx_stats, 990d435700fSSujith struct ieee80211_rx_status *rxs, 991d435700fSSujith bool decrypt_error) 992d435700fSSujith { 993d435700fSSujith struct ath_hw *ah = common->ah; 994d435700fSSujith struct ieee80211_hdr *hdr; 995d435700fSSujith int hdrlen, padpos, padsize; 996d435700fSSujith u8 keyix; 997d435700fSSujith __le16 fc; 998d435700fSSujith 999d435700fSSujith /* see if any padding is done by the hw and remove it */ 1000d435700fSSujith hdr = (struct ieee80211_hdr *) skb->data; 1001d435700fSSujith hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1002d435700fSSujith fc = hdr->frame_control; 1003d435700fSSujith padpos = ath9k_cmn_padpos(hdr->frame_control); 1004d435700fSSujith 1005d435700fSSujith /* The MAC header is padded to have 32-bit boundary if the 1006d435700fSSujith * packet payload is non-zero. The general calculation for 1007d435700fSSujith * padsize would take into account odd header lengths: 1008d435700fSSujith * padsize = (4 - padpos % 4) % 4; However, since only 1009d435700fSSujith * even-length headers are used, padding can only be 0 or 2 1010d435700fSSujith * bytes and we can optimize this a bit. In addition, we must 1011d435700fSSujith * not try to remove padding from short control frames that do 1012d435700fSSujith * not have payload. */ 1013d435700fSSujith padsize = padpos & 3; 1014d435700fSSujith if (padsize && skb->len>=padpos+padsize+FCS_LEN) { 1015d435700fSSujith memmove(skb->data + padsize, skb->data, padpos); 1016d435700fSSujith skb_pull(skb, padsize); 1017d435700fSSujith } 1018d435700fSSujith 1019d435700fSSujith keyix = rx_stats->rs_keyix; 1020d435700fSSujith 1021d435700fSSujith if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error && 1022d435700fSSujith ieee80211_has_protected(fc)) { 1023d435700fSSujith rxs->flag |= RX_FLAG_DECRYPTED; 1024d435700fSSujith } else if (ieee80211_has_protected(fc) 1025d435700fSSujith && !decrypt_error && skb->len >= hdrlen + 4) { 1026d435700fSSujith keyix = skb->data[hdrlen + 3] >> 6; 1027d435700fSSujith 1028d435700fSSujith if (test_bit(keyix, common->keymap)) 1029d435700fSSujith rxs->flag |= RX_FLAG_DECRYPTED; 1030d435700fSSujith } 1031d435700fSSujith if (ah->sw_mgmt_crypto && 1032d435700fSSujith (rxs->flag & RX_FLAG_DECRYPTED) && 1033d435700fSSujith ieee80211_is_mgmt(fc)) 1034d435700fSSujith /* Use software decrypt for management frames. */ 1035d435700fSSujith rxs->flag &= ~RX_FLAG_DECRYPTED; 1036d435700fSSujith } 1037b5c80475SFelix Fietkau 1038b5c80475SFelix Fietkau int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) 1039b5c80475SFelix Fietkau { 1040b5c80475SFelix Fietkau struct ath_buf *bf; 10410d95521eSFelix Fietkau struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb; 1042b5c80475SFelix Fietkau struct ieee80211_rx_status *rxs; 1043b5c80475SFelix Fietkau struct ath_hw *ah = sc->sc_ah; 1044b5c80475SFelix Fietkau struct ath_common *common = ath9k_hw_common(ah); 10457545daf4SFelix Fietkau struct ieee80211_hw *hw = sc->hw; 1046b5c80475SFelix Fietkau struct ieee80211_hdr *hdr; 1047b5c80475SFelix Fietkau int retval; 1048b5c80475SFelix Fietkau struct ath_rx_status rs; 1049b5c80475SFelix Fietkau enum ath9k_rx_qtype qtype; 1050b5c80475SFelix Fietkau bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); 1051b5c80475SFelix Fietkau int dma_type; 10525c6dd921SVasanthakumar Thiagarajan u8 rx_status_len = ah->caps.rx_status_len; 1053a6d2055bSFelix Fietkau u64 tsf = 0; 1054a6d2055bSFelix Fietkau u32 tsf_lower = 0; 10558ab2cd09SLuis R. Rodriguez unsigned long flags; 1056b5c80475SFelix Fietkau 1057b5c80475SFelix Fietkau if (edma) 1058b5c80475SFelix Fietkau dma_type = DMA_BIDIRECTIONAL; 105956824223SMing Lei else 106056824223SMing Lei dma_type = DMA_FROM_DEVICE; 1061b5c80475SFelix Fietkau 1062b5c80475SFelix Fietkau qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; 1063b5c80475SFelix Fietkau spin_lock_bh(&sc->rx.rxbuflock); 1064b5c80475SFelix Fietkau 1065a6d2055bSFelix Fietkau tsf = ath9k_hw_gettsf64(ah); 1066a6d2055bSFelix Fietkau tsf_lower = tsf & 0xffffffff; 1067a6d2055bSFelix Fietkau 1068b5c80475SFelix Fietkau do { 1069e1352fdeSLorenzo Bianconi bool decrypt_error = false; 1070b5c80475SFelix Fietkau /* If handling rx interrupt and flush is in progress => exit */ 1071781b14a3SSujith Manoharan if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0)) 1072b5c80475SFelix Fietkau break; 1073b5c80475SFelix Fietkau 1074b5c80475SFelix Fietkau memset(&rs, 0, sizeof(rs)); 1075b5c80475SFelix Fietkau if (edma) 1076b5c80475SFelix Fietkau bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); 1077b5c80475SFelix Fietkau else 1078b5c80475SFelix Fietkau bf = ath_get_next_rx_buf(sc, &rs); 1079b5c80475SFelix Fietkau 1080b5c80475SFelix Fietkau if (!bf) 1081b5c80475SFelix Fietkau break; 1082b5c80475SFelix Fietkau 1083b5c80475SFelix Fietkau skb = bf->bf_mpdu; 1084b5c80475SFelix Fietkau if (!skb) 1085b5c80475SFelix Fietkau continue; 1086b5c80475SFelix Fietkau 10870d95521eSFelix Fietkau /* 10880d95521eSFelix Fietkau * Take frame header from the first fragment and RX status from 10890d95521eSFelix Fietkau * the last one. 10900d95521eSFelix Fietkau */ 10910d95521eSFelix Fietkau if (sc->rx.frag) 10920d95521eSFelix Fietkau hdr_skb = sc->rx.frag; 10930d95521eSFelix Fietkau else 10940d95521eSFelix Fietkau hdr_skb = skb; 10950d95521eSFelix Fietkau 10960d95521eSFelix Fietkau hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); 10970d95521eSFelix Fietkau rxs = IEEE80211_SKB_RXCB(hdr_skb); 109815072189SBen Greear if (ieee80211_is_beacon(hdr->frame_control)) { 109915072189SBen Greear RX_STAT_INC(rx_beacons); 110015072189SBen Greear if (!is_zero_ether_addr(common->curbssid) && 11012e42e474SJoe Perches ether_addr_equal(hdr->addr3, common->curbssid)) 1102cf3af748SRajkumar Manoharan rs.is_mybeacon = true; 1103cf3af748SRajkumar Manoharan else 1104cf3af748SRajkumar Manoharan rs.is_mybeacon = false; 110515072189SBen Greear } 110615072189SBen Greear else 110715072189SBen Greear rs.is_mybeacon = false; 11085ca42627SLuis R. Rodriguez 1109be41b052SMohammed Shafi Shajakhan if (ieee80211_is_data_present(hdr->frame_control) && 1110be41b052SMohammed Shafi Shajakhan !ieee80211_is_qos_nullfunc(hdr->frame_control)) 11116995fb80SRajkumar Manoharan sc->rx.num_pkts++; 1112be41b052SMohammed Shafi Shajakhan 111329bffa96SFelix Fietkau ath_debug_stat_rx(sc, &rs); 11141395d3f0SSujith 1115203c4805SLuis R. Rodriguez /* 1116203c4805SLuis R. Rodriguez * If we're asked to flush receive queue, directly 1117203c4805SLuis R. Rodriguez * chain it back at the queue without processing it. 1118203c4805SLuis R. Rodriguez */ 1119781b14a3SSujith Manoharan if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags)) { 112015072189SBen Greear RX_STAT_INC(rx_drop_rxflush); 11210d95521eSFelix Fietkau goto requeue_drop_frag; 112215072189SBen Greear } 1123203c4805SLuis R. Rodriguez 1124ffb1c56aSAshok Nagarajan memset(rxs, 0, sizeof(struct ieee80211_rx_status)); 1125ffb1c56aSAshok Nagarajan 1126a6d2055bSFelix Fietkau rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp; 1127a6d2055bSFelix Fietkau if (rs.rs_tstamp > tsf_lower && 1128a6d2055bSFelix Fietkau unlikely(rs.rs_tstamp - tsf_lower > 0x10000000)) 1129a6d2055bSFelix Fietkau rxs->mactime -= 0x100000000ULL; 1130a6d2055bSFelix Fietkau 1131a6d2055bSFelix Fietkau if (rs.rs_tstamp < tsf_lower && 1132a6d2055bSFelix Fietkau unlikely(tsf_lower - rs.rs_tstamp > 0x10000000)) 1133a6d2055bSFelix Fietkau rxs->mactime += 0x100000000ULL; 1134a6d2055bSFelix Fietkau 113583c76570SZefir Kurtisi retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs, 113683c76570SZefir Kurtisi rxs, &decrypt_error); 113783c76570SZefir Kurtisi if (retval) 113883c76570SZefir Kurtisi goto requeue_drop_frag; 113983c76570SZefir Kurtisi 114001e18918SRajkumar Manoharan if (rs.is_mybeacon) { 114101e18918SRajkumar Manoharan sc->hw_busy_count = 0; 114201e18918SRajkumar Manoharan ath_start_rx_poll(sc, 3); 114301e18918SRajkumar Manoharan } 1144203c4805SLuis R. Rodriguez /* Ensure we always have an skb to requeue once we are done 1145203c4805SLuis R. Rodriguez * processing the current buffer's skb */ 1146cc861f74SLuis R. Rodriguez requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1147203c4805SLuis R. Rodriguez 1148203c4805SLuis R. Rodriguez /* If there is no memory we ignore the current RX'd frame, 1149203c4805SLuis R. Rodriguez * tell hardware it can give us a new frame using the old 1150203c4805SLuis R. Rodriguez * skb and put it at the tail of the sc->rx.rxbuf list for 1151203c4805SLuis R. Rodriguez * processing. */ 115215072189SBen Greear if (!requeue_skb) { 115315072189SBen Greear RX_STAT_INC(rx_oom_err); 11540d95521eSFelix Fietkau goto requeue_drop_frag; 115515072189SBen Greear } 1156203c4805SLuis R. Rodriguez 1157203c4805SLuis R. Rodriguez /* Unmap the frame */ 1158203c4805SLuis R. Rodriguez dma_unmap_single(sc->dev, bf->bf_buf_addr, 1159cc861f74SLuis R. Rodriguez common->rx_bufsize, 1160b5c80475SFelix Fietkau dma_type); 1161203c4805SLuis R. Rodriguez 1162b5c80475SFelix Fietkau skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); 1163b5c80475SFelix Fietkau if (ah->caps.rx_status_len) 1164b5c80475SFelix Fietkau skb_pull(skb, ah->caps.rx_status_len); 1165203c4805SLuis R. Rodriguez 11660d95521eSFelix Fietkau if (!rs.rs_more) 11670d95521eSFelix Fietkau ath9k_rx_skb_postprocess(common, hdr_skb, &rs, 1168c9b14170SLuis R. Rodriguez rxs, decrypt_error); 1169203c4805SLuis R. Rodriguez 1170203c4805SLuis R. Rodriguez /* We will now give hardware our shiny new allocated skb */ 1171203c4805SLuis R. Rodriguez bf->bf_mpdu = requeue_skb; 1172203c4805SLuis R. Rodriguez bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 1173cc861f74SLuis R. Rodriguez common->rx_bufsize, 1174b5c80475SFelix Fietkau dma_type); 1175203c4805SLuis R. Rodriguez if (unlikely(dma_mapping_error(sc->dev, 1176203c4805SLuis R. Rodriguez bf->bf_buf_addr))) { 1177203c4805SLuis R. Rodriguez dev_kfree_skb_any(requeue_skb); 1178203c4805SLuis R. Rodriguez bf->bf_mpdu = NULL; 11796cf9e995SBen Greear bf->bf_buf_addr = 0; 11803800276aSJoe Perches ath_err(common, "dma_mapping_error() on RX\n"); 11817545daf4SFelix Fietkau ieee80211_rx(hw, skb); 1182203c4805SLuis R. Rodriguez break; 1183203c4805SLuis R. Rodriguez } 1184203c4805SLuis R. Rodriguez 11850d95521eSFelix Fietkau if (rs.rs_more) { 118615072189SBen Greear RX_STAT_INC(rx_frags); 11870d95521eSFelix Fietkau /* 11880d95521eSFelix Fietkau * rs_more indicates chained descriptors which can be 11890d95521eSFelix Fietkau * used to link buffers together for a sort of 11900d95521eSFelix Fietkau * scatter-gather operation. 11910d95521eSFelix Fietkau */ 11920d95521eSFelix Fietkau if (sc->rx.frag) { 11930d95521eSFelix Fietkau /* too many fragments - cannot handle frame */ 11940d95521eSFelix Fietkau dev_kfree_skb_any(sc->rx.frag); 11950d95521eSFelix Fietkau dev_kfree_skb_any(skb); 119615072189SBen Greear RX_STAT_INC(rx_too_many_frags_err); 11970d95521eSFelix Fietkau skb = NULL; 11980d95521eSFelix Fietkau } 11990d95521eSFelix Fietkau sc->rx.frag = skb; 12000d95521eSFelix Fietkau goto requeue; 12010d95521eSFelix Fietkau } 12020d95521eSFelix Fietkau 12030d95521eSFelix Fietkau if (sc->rx.frag) { 12040d95521eSFelix Fietkau int space = skb->len - skb_tailroom(hdr_skb); 12050d95521eSFelix Fietkau 12060d95521eSFelix Fietkau if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { 12070d95521eSFelix Fietkau dev_kfree_skb(skb); 120815072189SBen Greear RX_STAT_INC(rx_oom_err); 12090d95521eSFelix Fietkau goto requeue_drop_frag; 12100d95521eSFelix Fietkau } 12110d95521eSFelix Fietkau 1212b5447ff9SEric Dumazet sc->rx.frag = NULL; 1213b5447ff9SEric Dumazet 12140d95521eSFelix Fietkau skb_copy_from_linear_data(skb, skb_put(hdr_skb, skb->len), 12150d95521eSFelix Fietkau skb->len); 12160d95521eSFelix Fietkau dev_kfree_skb_any(skb); 12170d95521eSFelix Fietkau skb = hdr_skb; 12180d95521eSFelix Fietkau } 12190d95521eSFelix Fietkau 1220eb840a80SMohammed Shafi Shajakhan 1221eb840a80SMohammed Shafi Shajakhan if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) { 1222eb840a80SMohammed Shafi Shajakhan 1223203c4805SLuis R. Rodriguez /* 1224eb840a80SMohammed Shafi Shajakhan * change the default rx antenna if rx diversity 1225eb840a80SMohammed Shafi Shajakhan * chooses the other antenna 3 times in a row. 1226203c4805SLuis R. Rodriguez */ 122729bffa96SFelix Fietkau if (sc->rx.defant != rs.rs_antenna) { 1228203c4805SLuis R. Rodriguez if (++sc->rx.rxotherant >= 3) 122929bffa96SFelix Fietkau ath_setdefantenna(sc, rs.rs_antenna); 1230203c4805SLuis R. Rodriguez } else { 1231203c4805SLuis R. Rodriguez sc->rx.rxotherant = 0; 1232203c4805SLuis R. Rodriguez } 1233203c4805SLuis R. Rodriguez 1234eb840a80SMohammed Shafi Shajakhan } 1235eb840a80SMohammed Shafi Shajakhan 123666760eacSFelix Fietkau if (rxs->flag & RX_FLAG_MMIC_STRIPPED) 123766760eacSFelix Fietkau skb_trim(skb, skb->len - 8); 123866760eacSFelix Fietkau 12398ab2cd09SLuis R. Rodriguez spin_lock_irqsave(&sc->sc_pm_lock, flags); 1240aaef24b4SMohammed Shafi Shajakhan if ((sc->ps_flags & (PS_WAIT_FOR_BEACON | 12411b04b930SSujith PS_WAIT_FOR_CAB | 1242aaef24b4SMohammed Shafi Shajakhan PS_WAIT_FOR_PSPOLL_DATA)) || 1243cedc7e3dSMohammed Shafi Shajakhan ath9k_check_auto_sleep(sc)) 1244f73c604cSRajkumar Manoharan ath_rx_ps(sc, skb, rs.is_mybeacon); 12458ab2cd09SLuis R. Rodriguez spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1246cc65965cSJouni Malinen 124743c35284SFelix Fietkau if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3) 1248102885a5SVasanthakumar Thiagarajan ath_ant_comb_scan(sc, &rs); 1249102885a5SVasanthakumar Thiagarajan 12507545daf4SFelix Fietkau ieee80211_rx(hw, skb); 1251cc65965cSJouni Malinen 12520d95521eSFelix Fietkau requeue_drop_frag: 12530d95521eSFelix Fietkau if (sc->rx.frag) { 12540d95521eSFelix Fietkau dev_kfree_skb_any(sc->rx.frag); 12550d95521eSFelix Fietkau sc->rx.frag = NULL; 12560d95521eSFelix Fietkau } 1257203c4805SLuis R. Rodriguez requeue: 1258b5c80475SFelix Fietkau list_add_tail(&bf->list, &sc->rx.rxbuf); 1259*a3dc48e8SFelix Fietkau if (flush) 1260*a3dc48e8SFelix Fietkau continue; 1261*a3dc48e8SFelix Fietkau 1262*a3dc48e8SFelix Fietkau if (edma) { 1263b5c80475SFelix Fietkau ath_rx_edma_buf_link(sc, qtype); 1264b5c80475SFelix Fietkau } else { 1265203c4805SLuis R. Rodriguez ath_rx_buf_link(sc, bf); 126695294973SFelix Fietkau ath9k_hw_rxena(ah); 1267b5c80475SFelix Fietkau } 1268203c4805SLuis R. Rodriguez } while (1); 1269203c4805SLuis R. Rodriguez 1270203c4805SLuis R. Rodriguez spin_unlock_bh(&sc->rx.rxbuflock); 1271203c4805SLuis R. Rodriguez 127229ab0b36SRajkumar Manoharan if (!(ah->imask & ATH9K_INT_RXEOL)) { 127329ab0b36SRajkumar Manoharan ah->imask |= (ATH9K_INT_RXEOL | ATH9K_INT_RXORN); 127472d874c6SFelix Fietkau ath9k_hw_set_interrupts(ah); 127529ab0b36SRajkumar Manoharan } 127629ab0b36SRajkumar Manoharan 1277203c4805SLuis R. Rodriguez return 0; 1278203c4805SLuis R. Rodriguez } 1279