xref: /freebsd/sys/dev/ath/if_ath_rx.c (revision ab2043b81eaba0d7d7769b4a58b2b6d17bc464a3)
1 /*-
2  * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification.
11  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13  *    redistribution must be conditioned upon including a substantially
14  *    similar Disclaimer requirement for further binary redistribution.
15  *
16  * NO WARRANTY
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGES.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 /*
34  * Driver for the Atheros Wireless LAN controller.
35  *
36  * This software is derived from work of Atsushi Onoe; his contribution
37  * is greatly appreciated.
38  */
39 
40 #include "opt_inet.h"
41 #include "opt_ath.h"
42 /*
43  * This is needed for register operations which are performed
44  * by the driver - eg, calls to ath_hal_gettsf32().
45  *
46  * It's also required for any AH_DEBUG checks in here, eg the
47  * module dependencies.
48  */
49 #include "opt_ah.h"
50 #include "opt_wlan.h"
51 
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/sysctl.h>
55 #include <sys/mbuf.h>
56 #include <sys/malloc.h>
57 #include <sys/lock.h>
58 #include <sys/mutex.h>
59 #include <sys/kernel.h>
60 #include <sys/socket.h>
61 #include <sys/sockio.h>
62 #include <sys/errno.h>
63 #include <sys/callout.h>
64 #include <sys/bus.h>
65 #include <sys/endian.h>
66 #include <sys/kthread.h>
67 #include <sys/taskqueue.h>
68 #include <sys/priv.h>
69 #include <sys/module.h>
70 #include <sys/ktr.h>
71 #include <sys/smp.h>	/* for mp_ncpus */
72 
73 #include <machine/bus.h>
74 
75 #include <net/if.h>
76 #include <net/if_dl.h>
77 #include <net/if_media.h>
78 #include <net/if_types.h>
79 #include <net/if_arp.h>
80 #include <net/ethernet.h>
81 #include <net/if_llc.h>
82 
83 #include <net80211/ieee80211_var.h>
84 #include <net80211/ieee80211_regdomain.h>
85 #ifdef IEEE80211_SUPPORT_SUPERG
86 #include <net80211/ieee80211_superg.h>
87 #endif
88 #ifdef IEEE80211_SUPPORT_TDMA
89 #include <net80211/ieee80211_tdma.h>
90 #endif
91 
92 #include <net/bpf.h>
93 
94 #ifdef INET
95 #include <netinet/in.h>
96 #include <netinet/if_ether.h>
97 #endif
98 
99 #include <dev/ath/if_athvar.h>
100 #include <dev/ath/ath_hal/ah_devid.h>		/* XXX for softled */
101 #include <dev/ath/ath_hal/ah_diagcodes.h>
102 
103 #include <dev/ath/if_ath_debug.h>
104 #include <dev/ath/if_ath_misc.h>
105 #include <dev/ath/if_ath_tsf.h>
106 #include <dev/ath/if_ath_tx.h>
107 #include <dev/ath/if_ath_sysctl.h>
108 #include <dev/ath/if_ath_led.h>
109 #include <dev/ath/if_ath_keycache.h>
110 #include <dev/ath/if_ath_rx.h>
111 #include <dev/ath/if_ath_beacon.h>
112 #include <dev/ath/if_athdfs.h>
113 
114 #ifdef ATH_TX99_DIAG
115 #include <dev/ath/ath_tx99/ath_tx99.h>
116 #endif
117 
118 /*
119  * Calculate the receive filter according to the
120  * operating mode and state:
121  *
122  * o always accept unicast, broadcast, and multicast traffic
123  * o accept PHY error frames when hardware doesn't have MIB support
124  *   to count and we need them for ANI (sta mode only until recently)
125  *   and we are not scanning (ANI is disabled)
126  *   NB: older hal's add rx filter bits out of sight and we need to
127  *	 blindly preserve them
128  * o probe request frames are accepted only when operating in
129  *   hostap, adhoc, mesh, or monitor modes
130  * o enable promiscuous mode
131  *   - when in monitor mode
132  *   - if interface marked PROMISC (assumes bridge setting is filtered)
133  * o accept beacons:
134  *   - when operating in station mode for collecting rssi data when
135  *     the station is otherwise quiet, or
136  *   - when operating in adhoc mode so the 802.11 layer creates
137  *     node table entries for peers,
138  *   - when scanning
139  *   - when doing s/w beacon miss (e.g. for ap+sta)
140  *   - when operating in ap mode in 11g to detect overlapping bss that
141  *     require protection
142  *   - when operating in mesh mode to detect neighbors
143  * o accept control frames:
144  *   - when in monitor mode
145  * XXX HT protection for 11n
146  */
147 u_int32_t
148 ath_calcrxfilter(struct ath_softc *sc)
149 {
150 	struct ifnet *ifp = sc->sc_ifp;
151 	struct ieee80211com *ic = ifp->if_l2com;
152 	u_int32_t rfilt;
153 
154 	rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST;
155 	if (!sc->sc_needmib && !sc->sc_scanning)
156 		rfilt |= HAL_RX_FILTER_PHYERR;
157 	if (ic->ic_opmode != IEEE80211_M_STA)
158 		rfilt |= HAL_RX_FILTER_PROBEREQ;
159 	/* XXX ic->ic_monvaps != 0? */
160 	if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC))
161 		rfilt |= HAL_RX_FILTER_PROM;
162 	if (ic->ic_opmode == IEEE80211_M_STA ||
163 	    ic->ic_opmode == IEEE80211_M_IBSS ||
164 	    sc->sc_swbmiss || sc->sc_scanning)
165 		rfilt |= HAL_RX_FILTER_BEACON;
166 	/*
167 	 * NB: We don't recalculate the rx filter when
168 	 * ic_protmode changes; otherwise we could do
169 	 * this only when ic_protmode != NONE.
170 	 */
171 	if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
172 	    IEEE80211_IS_CHAN_ANYG(ic->ic_curchan))
173 		rfilt |= HAL_RX_FILTER_BEACON;
174 
175 	/*
176 	 * Enable hardware PS-POLL RX only for hostap mode;
177 	 * STA mode sends PS-POLL frames but never
178 	 * receives them.
179 	 */
180 	if (ath_hal_getcapability(sc->sc_ah, HAL_CAP_PSPOLL,
181 	    0, NULL) == HAL_OK &&
182 	    ic->ic_opmode == IEEE80211_M_HOSTAP)
183 		rfilt |= HAL_RX_FILTER_PSPOLL;
184 
185 	if (sc->sc_nmeshvaps) {
186 		rfilt |= HAL_RX_FILTER_BEACON;
187 		if (sc->sc_hasbmatch)
188 			rfilt |= HAL_RX_FILTER_BSSID;
189 		else
190 			rfilt |= HAL_RX_FILTER_PROM;
191 	}
192 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
193 		rfilt |= HAL_RX_FILTER_CONTROL;
194 
195 	/*
196 	 * Enable RX of compressed BAR frames only when doing
197 	 * 802.11n. Required for A-MPDU.
198 	 */
199 	if (IEEE80211_IS_CHAN_HT(ic->ic_curchan))
200 		rfilt |= HAL_RX_FILTER_COMPBAR;
201 
202 	/*
203 	 * Enable radar PHY errors if requested by the
204 	 * DFS module.
205 	 */
206 	if (sc->sc_dodfs)
207 		rfilt |= HAL_RX_FILTER_PHYRADAR;
208 
209 	DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n",
210 	    __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags);
211 	return rfilt;
212 }
213 
214 static int
215 ath_legacy_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
216 {
217 	struct ath_hal *ah = sc->sc_ah;
218 	int error;
219 	struct mbuf *m;
220 	struct ath_desc *ds;
221 
222 	m = bf->bf_m;
223 	if (m == NULL) {
224 		/*
225 		 * NB: by assigning a page to the rx dma buffer we
226 		 * implicitly satisfy the Atheros requirement that
227 		 * this buffer be cache-line-aligned and sized to be
228 		 * multiple of the cache line size.  Not doing this
229 		 * causes weird stuff to happen (for the 5210 at least).
230 		 */
231 		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
232 		if (m == NULL) {
233 			DPRINTF(sc, ATH_DEBUG_ANY,
234 				"%s: no mbuf/cluster\n", __func__);
235 			sc->sc_stats.ast_rx_nombuf++;
236 			return ENOMEM;
237 		}
238 		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
239 
240 		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
241 					     bf->bf_dmamap, m,
242 					     bf->bf_segs, &bf->bf_nseg,
243 					     BUS_DMA_NOWAIT);
244 		if (error != 0) {
245 			DPRINTF(sc, ATH_DEBUG_ANY,
246 			    "%s: bus_dmamap_load_mbuf_sg failed; error %d\n",
247 			    __func__, error);
248 			sc->sc_stats.ast_rx_busdma++;
249 			m_freem(m);
250 			return error;
251 		}
252 		KASSERT(bf->bf_nseg == 1,
253 			("multi-segment packet; nseg %u", bf->bf_nseg));
254 		bf->bf_m = m;
255 	}
256 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD);
257 
258 	/*
259 	 * Setup descriptors.  For receive we always terminate
260 	 * the descriptor list with a self-linked entry so we'll
261 	 * not get overrun under high load (as can happen with a
262 	 * 5212 when ANI processing enables PHY error frames).
263 	 *
264 	 * To insure the last descriptor is self-linked we create
265 	 * each descriptor as self-linked and add it to the end.  As
266 	 * each additional descriptor is added the previous self-linked
267 	 * entry is ``fixed'' naturally.  This should be safe even
268 	 * if DMA is happening.  When processing RX interrupts we
269 	 * never remove/process the last, self-linked, entry on the
270 	 * descriptor list.  This insures the hardware always has
271 	 * someplace to write a new frame.
272 	 */
273 	/*
274 	 * 11N: we can no longer afford to self link the last descriptor.
275 	 * MAC acknowledges BA status as long as it copies frames to host
276 	 * buffer (or rx fifo). This can incorrectly acknowledge packets
277 	 * to a sender if last desc is self-linked.
278 	 */
279 	ds = bf->bf_desc;
280 	if (sc->sc_rxslink)
281 		ds->ds_link = bf->bf_daddr;	/* link to self */
282 	else
283 		ds->ds_link = 0;		/* terminate the list */
284 	ds->ds_data = bf->bf_segs[0].ds_addr;
285 	ath_hal_setuprxdesc(ah, ds
286 		, m->m_len		/* buffer size */
287 		, 0
288 	);
289 
290 	if (sc->sc_rxlink != NULL)
291 		*sc->sc_rxlink = bf->bf_daddr;
292 	sc->sc_rxlink = &ds->ds_link;
293 	return 0;
294 }
295 
296 /*
297  * Intercept management frames to collect beacon rssi data
298  * and to do ibss merges.
299  */
300 void
301 ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
302 	int subtype, int rssi, int nf)
303 {
304 	struct ieee80211vap *vap = ni->ni_vap;
305 	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
306 
307 	/*
308 	 * Call up first so subsequent work can use information
309 	 * potentially stored in the node (e.g. for ibss merge).
310 	 */
311 	ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rssi, nf);
312 	switch (subtype) {
313 	case IEEE80211_FC0_SUBTYPE_BEACON:
314 		/* update rssi statistics for use by the hal */
315 		/* XXX unlocked check against vap->iv_bss? */
316 		ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi);
317 		if (sc->sc_syncbeacon &&
318 		    ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) {
319 			/*
320 			 * Resync beacon timers using the tsf of the beacon
321 			 * frame we just received.
322 			 */
323 			ath_beacon_config(sc, vap);
324 		}
325 		/* fall thru... */
326 	case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
327 		if (vap->iv_opmode == IEEE80211_M_IBSS &&
328 		    vap->iv_state == IEEE80211_S_RUN) {
329 			uint32_t rstamp = sc->sc_lastrs->rs_tstamp;
330 			uint64_t tsf = ath_extend_tsf(sc, rstamp,
331 				ath_hal_gettsf64(sc->sc_ah));
332 			/*
333 			 * Handle ibss merge as needed; check the tsf on the
334 			 * frame before attempting the merge.  The 802.11 spec
335 			 * says the station should change it's bssid to match
336 			 * the oldest station with the same ssid, where oldest
337 			 * is determined by the tsf.  Note that hardware
338 			 * reconfiguration happens through callback to
339 			 * ath_newstate as the state machine will go from
340 			 * RUN -> RUN when this happens.
341 			 */
342 			if (le64toh(ni->ni_tstamp.tsf) >= tsf) {
343 				DPRINTF(sc, ATH_DEBUG_STATE,
344 				    "ibss merge, rstamp %u tsf %ju "
345 				    "tstamp %ju\n", rstamp, (uintmax_t)tsf,
346 				    (uintmax_t)ni->ni_tstamp.tsf);
347 				(void) ieee80211_ibss_merge(ni);
348 			}
349 		}
350 		break;
351 	}
352 }
353 
354 #ifdef	ATH_ENABLE_RADIOTAP_VENDOR_EXT
355 static void
356 ath_rx_tap_vendor(struct ifnet *ifp, struct mbuf *m,
357     const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf)
358 {
359 	struct ath_softc *sc = ifp->if_softc;
360 
361 	/* Fill in the extension bitmap */
362 	sc->sc_rx_th.wr_ext_bitmap = htole32(1 << ATH_RADIOTAP_VENDOR_HEADER);
363 
364 	/* Fill in the vendor header */
365 	sc->sc_rx_th.wr_vh.vh_oui[0] = 0x7f;
366 	sc->sc_rx_th.wr_vh.vh_oui[1] = 0x03;
367 	sc->sc_rx_th.wr_vh.vh_oui[2] = 0x00;
368 
369 	/* XXX what should this be? */
370 	sc->sc_rx_th.wr_vh.vh_sub_ns = 0;
371 	sc->sc_rx_th.wr_vh.vh_skip_len =
372 	    htole16(sizeof(struct ath_radiotap_vendor_hdr));
373 
374 	/* General version info */
375 	sc->sc_rx_th.wr_v.vh_version = 1;
376 
377 	sc->sc_rx_th.wr_v.vh_rx_chainmask = sc->sc_rxchainmask;
378 
379 	/* rssi */
380 	sc->sc_rx_th.wr_v.rssi_ctl[0] = rs->rs_rssi_ctl[0];
381 	sc->sc_rx_th.wr_v.rssi_ctl[1] = rs->rs_rssi_ctl[1];
382 	sc->sc_rx_th.wr_v.rssi_ctl[2] = rs->rs_rssi_ctl[2];
383 	sc->sc_rx_th.wr_v.rssi_ext[0] = rs->rs_rssi_ext[0];
384 	sc->sc_rx_th.wr_v.rssi_ext[1] = rs->rs_rssi_ext[1];
385 	sc->sc_rx_th.wr_v.rssi_ext[2] = rs->rs_rssi_ext[2];
386 
387 	/* evm */
388 	sc->sc_rx_th.wr_v.evm[0] = rs->rs_evm0;
389 	sc->sc_rx_th.wr_v.evm[1] = rs->rs_evm1;
390 	sc->sc_rx_th.wr_v.evm[2] = rs->rs_evm2;
391 	/* XXX TODO: extend this to include 3-stream EVM */
392 
393 	/* phyerr info */
394 	if (rs->rs_status & HAL_RXERR_PHY)
395 		sc->sc_rx_th.wr_v.vh_phyerr_code = rs->rs_phyerr;
396 	else
397 		sc->sc_rx_th.wr_v.vh_phyerr_code = 0xff;
398 	sc->sc_rx_th.wr_v.vh_rs_status = rs->rs_status;
399 	sc->sc_rx_th.wr_v.vh_rssi = rs->rs_rssi;
400 }
401 #endif	/* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
402 
403 static void
404 ath_rx_tap(struct ifnet *ifp, struct mbuf *m,
405 	const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf)
406 {
407 #define	CHAN_HT20	htole32(IEEE80211_CHAN_HT20)
408 #define	CHAN_HT40U	htole32(IEEE80211_CHAN_HT40U)
409 #define	CHAN_HT40D	htole32(IEEE80211_CHAN_HT40D)
410 #define	CHAN_HT		(CHAN_HT20|CHAN_HT40U|CHAN_HT40D)
411 	struct ath_softc *sc = ifp->if_softc;
412 	const HAL_RATE_TABLE *rt;
413 	uint8_t rix;
414 
415 	rt = sc->sc_currates;
416 	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
417 	rix = rt->rateCodeToIndex[rs->rs_rate];
418 	sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate;
419 	sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags;
420 #ifdef AH_SUPPORT_AR5416
421 	sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT;
422 	if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) {	/* HT rate */
423 		struct ieee80211com *ic = ifp->if_l2com;
424 
425 		if ((rs->rs_flags & HAL_RX_2040) == 0)
426 			sc->sc_rx_th.wr_chan_flags |= CHAN_HT20;
427 		else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan))
428 			sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U;
429 		else
430 			sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D;
431 		if ((rs->rs_flags & HAL_RX_GI) == 0)
432 			sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI;
433 	}
434 #endif
435 	sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(sc, rs->rs_tstamp, tsf));
436 	if (rs->rs_status & HAL_RXERR_CRC)
437 		sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS;
438 	/* XXX propagate other error flags from descriptor */
439 	sc->sc_rx_th.wr_antnoise = nf;
440 	sc->sc_rx_th.wr_antsignal = nf + rs->rs_rssi;
441 	sc->sc_rx_th.wr_antenna = rs->rs_antenna;
442 #undef CHAN_HT
443 #undef CHAN_HT20
444 #undef CHAN_HT40U
445 #undef CHAN_HT40D
446 }
447 
448 static void
449 ath_handle_micerror(struct ieee80211com *ic,
450 	struct ieee80211_frame *wh, int keyix)
451 {
452 	struct ieee80211_node *ni;
453 
454 	/* XXX recheck MIC to deal w/ chips that lie */
455 	/* XXX discard MIC errors on !data frames */
456 	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
457 	if (ni != NULL) {
458 		ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix);
459 		ieee80211_free_node(ni);
460 	}
461 }
462 
463 int
464 ath_rx_pkt(struct ath_softc *sc, struct ath_rx_status *rs, HAL_STATUS status,
465     uint64_t tsf, int nf, HAL_RX_QUEUE qtype, struct ath_buf *bf)
466 {
467 	struct ath_hal *ah = sc->sc_ah;
468 	struct mbuf *m = bf->bf_m;
469 	uint64_t rstamp;
470 	int len, type;
471 	struct ifnet *ifp = sc->sc_ifp;
472 	struct ieee80211com *ic = ifp->if_l2com;
473 	struct ieee80211_node *ni;
474 	int is_good = 0;
475 	struct ath_rx_edma *re = &sc->sc_rxedma[qtype];
476 
477 	/*
478 	 * Calculate the correct 64 bit TSF given
479 	 * the TSF64 register value and rs_tstamp.
480 	 */
481 	rstamp = ath_extend_tsf(sc, rs->rs_tstamp, tsf);
482 
483 	/* These aren't specifically errors */
484 #ifdef	AH_SUPPORT_AR5416
485 	if (rs->rs_flags & HAL_RX_GI)
486 		sc->sc_stats.ast_rx_halfgi++;
487 	if (rs->rs_flags & HAL_RX_2040)
488 		sc->sc_stats.ast_rx_2040++;
489 	if (rs->rs_flags & HAL_RX_DELIM_CRC_PRE)
490 		sc->sc_stats.ast_rx_pre_crc_err++;
491 	if (rs->rs_flags & HAL_RX_DELIM_CRC_POST)
492 		sc->sc_stats.ast_rx_post_crc_err++;
493 	if (rs->rs_flags & HAL_RX_DECRYPT_BUSY)
494 		sc->sc_stats.ast_rx_decrypt_busy_err++;
495 	if (rs->rs_flags & HAL_RX_HI_RX_CHAIN)
496 		sc->sc_stats.ast_rx_hi_rx_chain++;
497 #endif /* AH_SUPPORT_AR5416 */
498 
499 	if (rs->rs_status != 0) {
500 		if (rs->rs_status & HAL_RXERR_CRC)
501 			sc->sc_stats.ast_rx_crcerr++;
502 		if (rs->rs_status & HAL_RXERR_FIFO)
503 			sc->sc_stats.ast_rx_fifoerr++;
504 		if (rs->rs_status & HAL_RXERR_PHY) {
505 			sc->sc_stats.ast_rx_phyerr++;
506 			/* Process DFS radar events */
507 			if ((rs->rs_phyerr == HAL_PHYERR_RADAR) ||
508 			    (rs->rs_phyerr == HAL_PHYERR_FALSE_RADAR_EXT)) {
509 				/* Since we're touching the frame data, sync it */
510 				bus_dmamap_sync(sc->sc_dmat,
511 				    bf->bf_dmamap,
512 				    BUS_DMASYNC_POSTREAD);
513 				/* Now pass it to the radar processing code */
514 				ath_dfs_process_phy_err(sc, m, rstamp, rs);
515 			}
516 
517 			/* Be suitably paranoid about receiving phy errors out of the stats array bounds */
518 			if (rs->rs_phyerr < 64)
519 				sc->sc_stats.ast_rx_phy[rs->rs_phyerr]++;
520 			goto rx_error;	/* NB: don't count in ierrors */
521 		}
522 		if (rs->rs_status & HAL_RXERR_DECRYPT) {
523 			/*
524 			 * Decrypt error.  If the error occurred
525 			 * because there was no hardware key, then
526 			 * let the frame through so the upper layers
527 			 * can process it.  This is necessary for 5210
528 			 * parts which have no way to setup a ``clear''
529 			 * key cache entry.
530 			 *
531 			 * XXX do key cache faulting
532 			 */
533 			if (rs->rs_keyix == HAL_RXKEYIX_INVALID)
534 				goto rx_accept;
535 			sc->sc_stats.ast_rx_badcrypt++;
536 		}
537 		/*
538 		 * Similar as above - if the failure was a keymiss
539 		 * just punt it up to the upper layers for now.
540 		 */
541 		if (rs->rs_status & HAL_RXERR_KEYMISS) {
542 			sc->sc_stats.ast_rx_keymiss++;
543 			goto rx_accept;
544 		}
545 		if (rs->rs_status & HAL_RXERR_MIC) {
546 			sc->sc_stats.ast_rx_badmic++;
547 			/*
548 			 * Do minimal work required to hand off
549 			 * the 802.11 header for notification.
550 			 */
551 			/* XXX frag's and qos frames */
552 			len = rs->rs_datalen;
553 			if (len >= sizeof (struct ieee80211_frame)) {
554 				bus_dmamap_sync(sc->sc_dmat,
555 				    bf->bf_dmamap,
556 				    BUS_DMASYNC_POSTREAD);
557 				ath_handle_micerror(ic,
558 				    mtod(m, struct ieee80211_frame *),
559 				    sc->sc_splitmic ?
560 					rs->rs_keyix-32 : rs->rs_keyix);
561 			}
562 		}
563 		ifp->if_ierrors++;
564 rx_error:
565 		/*
566 		 * Cleanup any pending partial frame.
567 		 */
568 		if (re->m_rxpending != NULL) {
569 			m_freem(re->m_rxpending);
570 			re->m_rxpending = NULL;
571 		}
572 		/*
573 		 * When a tap is present pass error frames
574 		 * that have been requested.  By default we
575 		 * pass decrypt+mic errors but others may be
576 		 * interesting (e.g. crc).
577 		 */
578 		if (ieee80211_radiotap_active(ic) &&
579 		    (rs->rs_status & sc->sc_monpass)) {
580 			bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
581 			    BUS_DMASYNC_POSTREAD);
582 			/* NB: bpf needs the mbuf length setup */
583 			len = rs->rs_datalen;
584 			m->m_pkthdr.len = m->m_len = len;
585 			bf->bf_m = NULL;
586 			ath_rx_tap(ifp, m, rs, rstamp, nf);
587 #ifdef	ATH_ENABLE_RADIOTAP_VENDOR_EXT
588 			ath_rx_tap_vendor(ifp, m, rs, rstamp, nf);
589 #endif	/* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
590 			ieee80211_radiotap_rx_all(ic, m);
591 			m_freem(m);
592 		}
593 		/* XXX pass MIC errors up for s/w reclaculation */
594 		goto rx_next;
595 	}
596 rx_accept:
597 	/*
598 	 * Sync and unmap the frame.  At this point we're
599 	 * committed to passing the mbuf somewhere so clear
600 	 * bf_m; this means a new mbuf must be allocated
601 	 * when the rx descriptor is setup again to receive
602 	 * another frame.
603 	 */
604 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTREAD);
605 	bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
606 	bf->bf_m = NULL;
607 
608 	len = rs->rs_datalen;
609 	m->m_len = len;
610 
611 	if (rs->rs_more) {
612 		/*
613 		 * Frame spans multiple descriptors; save
614 		 * it for the next completed descriptor, it
615 		 * will be used to construct a jumbogram.
616 		 */
617 		if (re->m_rxpending != NULL) {
618 			/* NB: max frame size is currently 2 clusters */
619 			sc->sc_stats.ast_rx_toobig++;
620 			m_freem(re->m_rxpending);
621 		}
622 		m->m_pkthdr.rcvif = ifp;
623 		m->m_pkthdr.len = len;
624 		re->m_rxpending = m;
625 		goto rx_next;
626 	} else if (re->m_rxpending != NULL) {
627 		/*
628 		 * This is the second part of a jumbogram,
629 		 * chain it to the first mbuf, adjust the
630 		 * frame length, and clear the rxpending state.
631 		 */
632 		re->m_rxpending->m_next = m;
633 		re->m_rxpending->m_pkthdr.len += len;
634 		m = re->m_rxpending;
635 		re->m_rxpending = NULL;
636 	} else {
637 		/*
638 		 * Normal single-descriptor receive; setup
639 		 * the rcvif and packet length.
640 		 */
641 		m->m_pkthdr.rcvif = ifp;
642 		m->m_pkthdr.len = len;
643 	}
644 
645 	/*
646 	 * Validate rs->rs_antenna.
647 	 *
648 	 * Some users w/ AR9285 NICs have reported crashes
649 	 * here because rs_antenna field is bogusly large.
650 	 * Let's enforce the maximum antenna limit of 8
651 	 * (and it shouldn't be hard coded, but that's a
652 	 * separate problem) and if there's an issue, print
653 	 * out an error and adjust rs_antenna to something
654 	 * sensible.
655 	 *
656 	 * This code should be removed once the actual
657 	 * root cause of the issue has been identified.
658 	 * For example, it may be that the rs_antenna
659 	 * field is only valid for the lsat frame of
660 	 * an aggregate and it just happens that it is
661 	 * "mostly" right. (This is a general statement -
662 	 * the majority of the statistics are only valid
663 	 * for the last frame in an aggregate.
664 	 */
665 	if (rs->rs_antenna > 7) {
666 		device_printf(sc->sc_dev, "%s: rs_antenna > 7 (%d)\n",
667 		    __func__, rs->rs_antenna);
668 #ifdef	ATH_DEBUG
669 		ath_printrxbuf(sc, bf, 0, status == HAL_OK);
670 #endif /* ATH_DEBUG */
671 		rs->rs_antenna = 0;	/* XXX better than nothing */
672 	}
673 
674 	ifp->if_ipackets++;
675 	sc->sc_stats.ast_ant_rx[rs->rs_antenna]++;
676 
677 	/*
678 	 * Populate the rx status block.  When there are bpf
679 	 * listeners we do the additional work to provide
680 	 * complete status.  Otherwise we fill in only the
681 	 * material required by ieee80211_input.  Note that
682 	 * noise setting is filled in above.
683 	 */
684 	if (ieee80211_radiotap_active(ic)) {
685 		ath_rx_tap(ifp, m, rs, rstamp, nf);
686 #ifdef	ATH_ENABLE_RADIOTAP_VENDOR_EXT
687 		ath_rx_tap_vendor(ifp, m, rs, rstamp, nf);
688 #endif	/* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
689 	}
690 
691 	/*
692 	 * From this point on we assume the frame is at least
693 	 * as large as ieee80211_frame_min; verify that.
694 	 */
695 	if (len < IEEE80211_MIN_LEN) {
696 		if (!ieee80211_radiotap_active(ic)) {
697 			DPRINTF(sc, ATH_DEBUG_RECV,
698 			    "%s: short packet %d\n", __func__, len);
699 			sc->sc_stats.ast_rx_tooshort++;
700 		} else {
701 			/* NB: in particular this captures ack's */
702 			ieee80211_radiotap_rx_all(ic, m);
703 		}
704 		m_freem(m);
705 		goto rx_next;
706 	}
707 
708 	if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) {
709 		const HAL_RATE_TABLE *rt = sc->sc_currates;
710 		uint8_t rix = rt->rateCodeToIndex[rs->rs_rate];
711 
712 		ieee80211_dump_pkt(ic, mtod(m, caddr_t), len,
713 		    sc->sc_hwmap[rix].ieeerate, rs->rs_rssi);
714 	}
715 
716 	m_adj(m, -IEEE80211_CRC_LEN);
717 
718 	/*
719 	 * Locate the node for sender, track state, and then
720 	 * pass the (referenced) node up to the 802.11 layer
721 	 * for its use.
722 	 */
723 	ni = ieee80211_find_rxnode_withkey(ic,
724 		mtod(m, const struct ieee80211_frame_min *),
725 		rs->rs_keyix == HAL_RXKEYIX_INVALID ?
726 			IEEE80211_KEYIX_NONE : rs->rs_keyix);
727 	sc->sc_lastrs = rs;
728 
729 #ifdef	AH_SUPPORT_AR5416
730 	if (rs->rs_isaggr)
731 		sc->sc_stats.ast_rx_agg++;
732 #endif /* AH_SUPPORT_AR5416 */
733 
734 	if (ni != NULL) {
735 		/*
736 		 * Only punt packets for ampdu reorder processing for
737 		 * 11n nodes; net80211 enforces that M_AMPDU is only
738 		 * set for 11n nodes.
739 		 */
740 		if (ni->ni_flags & IEEE80211_NODE_HT)
741 			m->m_flags |= M_AMPDU;
742 
743 		/*
744 		 * Sending station is known, dispatch directly.
745 		 */
746 		type = ieee80211_input(ni, m, rs->rs_rssi, nf);
747 		ieee80211_free_node(ni);
748 		/*
749 		 * Arrange to update the last rx timestamp only for
750 		 * frames from our ap when operating in station mode.
751 		 * This assumes the rx key is always setup when
752 		 * associated.
753 		 */
754 		if (ic->ic_opmode == IEEE80211_M_STA &&
755 		    rs->rs_keyix != HAL_RXKEYIX_INVALID)
756 			is_good = 1;
757 	} else {
758 		type = ieee80211_input_all(ic, m, rs->rs_rssi, nf);
759 	}
760 	/*
761 	 * Track rx rssi and do any rx antenna management.
762 	 */
763 	ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi);
764 	if (sc->sc_diversity) {
765 		/*
766 		 * When using fast diversity, change the default rx
767 		 * antenna if diversity chooses the other antenna 3
768 		 * times in a row.
769 		 */
770 		if (sc->sc_defant != rs->rs_antenna) {
771 			if (++sc->sc_rxotherant >= 3)
772 				ath_setdefantenna(sc, rs->rs_antenna);
773 		} else
774 			sc->sc_rxotherant = 0;
775 	}
776 
777 	/* Newer school diversity - kite specific for now */
778 	/* XXX perhaps migrate the normal diversity code to this? */
779 	if ((ah)->ah_rxAntCombDiversity)
780 		(*(ah)->ah_rxAntCombDiversity)(ah, rs, ticks, hz);
781 
782 	if (sc->sc_softled) {
783 		/*
784 		 * Blink for any data frame.  Otherwise do a
785 		 * heartbeat-style blink when idle.  The latter
786 		 * is mainly for station mode where we depend on
787 		 * periodic beacon frames to trigger the poll event.
788 		 */
789 		if (type == IEEE80211_FC0_TYPE_DATA) {
790 			const HAL_RATE_TABLE *rt = sc->sc_currates;
791 			ath_led_event(sc,
792 			    rt->rateCodeToIndex[rs->rs_rate]);
793 		} else if (ticks - sc->sc_ledevent >= sc->sc_ledidle)
794 			ath_led_event(sc, 0);
795 		}
796 rx_next:
797 	return (is_good);
798 }
799 
800 static void
801 ath_rx_proc(struct ath_softc *sc, int resched)
802 {
803 #define	PA2DESC(_sc, _pa) \
804 	((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
805 		((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
806 	struct ath_buf *bf;
807 	struct ifnet *ifp = sc->sc_ifp;
808 	struct ath_hal *ah = sc->sc_ah;
809 #ifdef IEEE80211_SUPPORT_SUPERG
810 	struct ieee80211com *ic = ifp->if_l2com;
811 #endif
812 	struct ath_desc *ds;
813 	struct ath_rx_status *rs;
814 	struct mbuf *m;
815 	int ngood;
816 	HAL_STATUS status;
817 	int16_t nf;
818 	u_int64_t tsf;
819 	int npkts = 0;
820 
821 	/* XXX we must not hold the ATH_LOCK here */
822 	ATH_UNLOCK_ASSERT(sc);
823 	ATH_PCU_UNLOCK_ASSERT(sc);
824 
825 	ATH_PCU_LOCK(sc);
826 	sc->sc_rxproc_cnt++;
827 	ATH_PCU_UNLOCK(sc);
828 
829 	DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: called\n", __func__);
830 	ngood = 0;
831 	nf = ath_hal_getchannoise(ah, sc->sc_curchan);
832 	sc->sc_stats.ast_rx_noise = nf;
833 	tsf = ath_hal_gettsf64(ah);
834 	do {
835 		bf = TAILQ_FIRST(&sc->sc_rxbuf);
836 		if (sc->sc_rxslink && bf == NULL) {	/* NB: shouldn't happen */
837 			if_printf(ifp, "%s: no buffer!\n", __func__);
838 			break;
839 		} else if (bf == NULL) {
840 			/*
841 			 * End of List:
842 			 * this can happen for non-self-linked RX chains
843 			 */
844 			sc->sc_stats.ast_rx_hitqueueend++;
845 			break;
846 		}
847 		m = bf->bf_m;
848 		if (m == NULL) {		/* NB: shouldn't happen */
849 			/*
850 			 * If mbuf allocation failed previously there
851 			 * will be no mbuf; try again to re-populate it.
852 			 */
853 			/* XXX make debug msg */
854 			if_printf(ifp, "%s: no mbuf!\n", __func__);
855 			TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
856 			goto rx_proc_next;
857 		}
858 		ds = bf->bf_desc;
859 		if (ds->ds_link == bf->bf_daddr) {
860 			/* NB: never process the self-linked entry at the end */
861 			sc->sc_stats.ast_rx_hitqueueend++;
862 			break;
863 		}
864 		/* XXX sync descriptor memory */
865 		/*
866 		 * Must provide the virtual address of the current
867 		 * descriptor, the physical address, and the virtual
868 		 * address of the next descriptor in the h/w chain.
869 		 * This allows the HAL to look ahead to see if the
870 		 * hardware is done with a descriptor by checking the
871 		 * done bit in the following descriptor and the address
872 		 * of the current descriptor the DMA engine is working
873 		 * on.  All this is necessary because of our use of
874 		 * a self-linked list to avoid rx overruns.
875 		 */
876 		rs = &bf->bf_status.ds_rxstat;
877 		status = ath_hal_rxprocdesc(ah, ds,
878 				bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
879 #ifdef ATH_DEBUG
880 		if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
881 			ath_printrxbuf(sc, bf, 0, status == HAL_OK);
882 #endif
883 		if (status == HAL_EINPROGRESS)
884 			break;
885 
886 		TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list);
887 		npkts++;
888 
889 		/*
890 		 * Process a single frame.
891 		 */
892 		if (ath_rx_pkt(sc, rs, status, tsf, nf, HAL_RX_QUEUE_HP, bf))
893 			ngood++;
894 rx_proc_next:
895 		TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
896 	} while (ath_rxbuf_init(sc, bf) == 0);
897 
898 	/* rx signal state monitoring */
899 	ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
900 	if (ngood)
901 		sc->sc_lastrx = tsf;
902 
903 	CTR2(ATH_KTR_INTR, "ath_rx_proc: npkts=%d, ngood=%d", npkts, ngood);
904 	/* Queue DFS tasklet if needed */
905 	if (resched && ath_dfs_tasklet_needed(sc, sc->sc_curchan))
906 		taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask);
907 
908 	/*
909 	 * Now that all the RX frames were handled that
910 	 * need to be handled, kick the PCU if there's
911 	 * been an RXEOL condition.
912 	 */
913 	ATH_PCU_LOCK(sc);
914 	if (resched && sc->sc_kickpcu) {
915 		CTR0(ATH_KTR_ERR, "ath_rx_proc: kickpcu");
916 		device_printf(sc->sc_dev, "%s: kickpcu; handled %d packets\n",
917 		    __func__, npkts);
918 
919 		/* XXX rxslink? */
920 		/*
921 		 * XXX can we hold the PCU lock here?
922 		 * Are there any net80211 buffer calls involved?
923 		 */
924 		bf = TAILQ_FIRST(&sc->sc_rxbuf);
925 		ath_hal_putrxbuf(ah, bf->bf_daddr, HAL_RX_QUEUE_HP);
926 		ath_hal_rxena(ah);		/* enable recv descriptors */
927 		ath_mode_init(sc);		/* set filters, etc. */
928 		ath_hal_startpcurecv(ah);	/* re-enable PCU/DMA engine */
929 
930 		ath_hal_intrset(ah, sc->sc_imask);
931 		sc->sc_kickpcu = 0;
932 	}
933 	ATH_PCU_UNLOCK(sc);
934 
935 	/* XXX check this inside of IF_LOCK? */
936 	if (resched && (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
937 #ifdef IEEE80211_SUPPORT_SUPERG
938 		ieee80211_ff_age_all(ic, 100);
939 #endif
940 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
941 			ath_tx_kick(sc);
942 	}
943 #undef PA2DESC
944 
945 	ATH_PCU_LOCK(sc);
946 	sc->sc_rxproc_cnt--;
947 	ATH_PCU_UNLOCK(sc);
948 }
949 
950 /*
951  * Only run the RX proc if it's not already running.
952  * Since this may get run as part of the reset/flush path,
953  * the task can't clash with an existing, running tasklet.
954  */
955 static void
956 ath_legacy_rx_tasklet(void *arg, int npending)
957 {
958 	struct ath_softc *sc = arg;
959 
960 	CTR1(ATH_KTR_INTR, "ath_rx_proc: pending=%d", npending);
961 	DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending);
962 	ATH_PCU_LOCK(sc);
963 	if (sc->sc_inreset_cnt > 0) {
964 		device_printf(sc->sc_dev,
965 		    "%s: sc_inreset_cnt > 0; skipping\n", __func__);
966 		ATH_PCU_UNLOCK(sc);
967 		return;
968 	}
969 	ATH_PCU_UNLOCK(sc);
970 
971 	ath_rx_proc(sc, 1);
972 }
973 
974 static void
975 ath_legacy_flushrecv(struct ath_softc *sc)
976 {
977 
978 	ath_rx_proc(sc, 0);
979 }
980 
981 /*
982  * Disable the receive h/w in preparation for a reset.
983  */
984 static void
985 ath_legacy_stoprecv(struct ath_softc *sc, int dodelay)
986 {
987 #define	PA2DESC(_sc, _pa) \
988 	((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
989 		((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
990 	struct ath_hal *ah = sc->sc_ah;
991 
992 	ath_hal_stoppcurecv(ah);	/* disable PCU */
993 	ath_hal_setrxfilter(ah, 0);	/* clear recv filter */
994 	ath_hal_stopdmarecv(ah);	/* disable DMA engine */
995 	/*
996 	 * TODO: see if this particular DELAY() is required; it may be
997 	 * masking some missing FIFO flush or DMA sync.
998 	 */
999 #if 0
1000 	if (dodelay)
1001 #endif
1002 		DELAY(3000);		/* 3ms is long enough for 1 frame */
1003 #ifdef ATH_DEBUG
1004 	if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) {
1005 		struct ath_buf *bf;
1006 		u_int ix;
1007 
1008 		device_printf(sc->sc_dev,
1009 		    "%s: rx queue %p, link %p\n",
1010 		    __func__,
1011 		    (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah, HAL_RX_QUEUE_HP),
1012 		    sc->sc_rxlink);
1013 		ix = 0;
1014 		TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
1015 			struct ath_desc *ds = bf->bf_desc;
1016 			struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
1017 			HAL_STATUS status = ath_hal_rxprocdesc(ah, ds,
1018 				bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
1019 			if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL))
1020 				ath_printrxbuf(sc, bf, ix, status == HAL_OK);
1021 			ix++;
1022 		}
1023 	}
1024 #endif
1025 	/*
1026 	 * Free both high/low RX pending, just in case.
1027 	 */
1028 	if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending != NULL) {
1029 		m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending);
1030 		sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL;
1031 	}
1032 	if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending != NULL) {
1033 		m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending);
1034 		sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL;
1035 	}
1036 	sc->sc_rxlink = NULL;		/* just in case */
1037 #undef PA2DESC
1038 }
1039 
1040 /*
1041  * Enable the receive h/w following a reset.
1042  */
1043 static int
1044 ath_legacy_startrecv(struct ath_softc *sc)
1045 {
1046 	struct ath_hal *ah = sc->sc_ah;
1047 	struct ath_buf *bf;
1048 
1049 	sc->sc_rxlink = NULL;
1050 	sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL;
1051 	sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL;
1052 	TAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
1053 		int error = ath_rxbuf_init(sc, bf);
1054 		if (error != 0) {
1055 			DPRINTF(sc, ATH_DEBUG_RECV,
1056 				"%s: ath_rxbuf_init failed %d\n",
1057 				__func__, error);
1058 			return error;
1059 		}
1060 	}
1061 
1062 	bf = TAILQ_FIRST(&sc->sc_rxbuf);
1063 	ath_hal_putrxbuf(ah, bf->bf_daddr, HAL_RX_QUEUE_HP);
1064 	ath_hal_rxena(ah);		/* enable recv descriptors */
1065 	ath_mode_init(sc);		/* set filters, etc. */
1066 	ath_hal_startpcurecv(ah);	/* re-enable PCU/DMA engine */
1067 	return 0;
1068 }
1069 
1070 static int
1071 ath_legacy_dma_rxsetup(struct ath_softc *sc)
1072 {
1073 	int error;
1074 
1075 	error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
1076 	    "rx", sizeof(struct ath_desc), ath_rxbuf, 1);
1077 	if (error != 0)
1078 		return (error);
1079 
1080 	return (0);
1081 }
1082 
1083 static int
1084 ath_legacy_dma_rxteardown(struct ath_softc *sc)
1085 {
1086 
1087 	if (sc->sc_rxdma.dd_desc_len != 0)
1088 		ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
1089 	return (0);
1090 }
1091 
1092 void
1093 ath_recv_setup_legacy(struct ath_softc *sc)
1094 {
1095 
1096 	/* Sensible legacy defaults */
1097 	sc->sc_rx_statuslen = 0;
1098 
1099 	sc->sc_rx.recv_start = ath_legacy_startrecv;
1100 	sc->sc_rx.recv_stop = ath_legacy_stoprecv;
1101 	sc->sc_rx.recv_flush = ath_legacy_flushrecv;
1102 	sc->sc_rx.recv_tasklet = ath_legacy_rx_tasklet;
1103 	sc->sc_rx.recv_rxbuf_init = ath_legacy_rxbuf_init;
1104 
1105 	sc->sc_rx.recv_setup = ath_legacy_dma_rxsetup;
1106 	sc->sc_rx.recv_teardown = ath_legacy_dma_rxteardown;
1107 }
1108