xref: /titanic_44/usr/src/uts/common/io/arn/arn_xmit.c (revision 4f0f5e5be9d3811b437d9156675d584e2a2f204a)
1 /*
2  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2008 Atheros Communications Inc.
8  *
9  * Permission to use, copy, modify, and/or distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 #include <sys/param.h>
22 #include <sys/types.h>
23 #include <sys/signal.h>
24 #include <sys/stream.h>
25 #include <sys/termio.h>
26 #include <sys/errno.h>
27 #include <sys/file.h>
28 #include <sys/cmn_err.h>
29 #include <sys/stropts.h>
30 #include <sys/strsubr.h>
31 #include <sys/strtty.h>
32 #include <sys/kbio.h>
33 #include <sys/cred.h>
34 #include <sys/stat.h>
35 #include <sys/consdev.h>
36 #include <sys/kmem.h>
37 #include <sys/modctl.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/pci.h>
41 #include <sys/errno.h>
42 #include <sys/mac_provider.h>
43 #include <sys/dlpi.h>
44 #include <sys/ethernet.h>
45 #include <sys/list.h>
46 #include <sys/byteorder.h>
47 #include <sys/strsun.h>
48 #include <sys/policy.h>
49 #include <inet/common.h>
50 #include <inet/nd.h>
51 #include <inet/mi.h>
52 #include <inet/wifi_ioctl.h>
53 #include <sys/mac_wifi.h>
54 
55 #include "arn_core.h"
56 
57 #define	BITS_PER_BYTE		8
58 #define	OFDM_PLCP_BITS		22
59 #define	HT_RC_2_MCS(_rc)	((_rc) & 0x0f)
60 #define	HT_RC_2_STREAMS(_rc)	((((_rc) & 0x78) >> 3) + 1)
61 #define	L_STF			8
62 #define	L_LTF			8
63 #define	L_SIG			4
64 #define	HT_SIG			8
65 #define	HT_STF			4
66 #define	HT_LTF(_ns)		(4 * (_ns))
67 #define	SYMBOL_TIME(_ns)	((_ns) << 2) /* ns * 4 us */
68 #define	SYMBOL_TIME_HALFGI(_ns)	(((_ns) * 18 + 4) / 5)  /* ns * 3.6 us */
69 #define	NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
70 #define	NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
71 
72 #define	OFDM_SIFS_TIME	16
73 
74 #define	IS_HT_RATE(_rate)	((_rate) & 0x80)
75 
76 static void
77 arn_get_beaconconfig(struct arn_softc *sc, struct ath_beacon_config *conf)
78 {
79 	ieee80211com_t *ic = (ieee80211com_t *)sc;
80 	struct ieee80211_node *in = ic->ic_bss;
81 
82 	/* fill in beacon config data */
83 
84 	conf->beacon_interval = in->in_intval ?
85 	    in->in_intval : ATH_DEFAULT_BINTVAL;
86 	conf->listen_interval = 100;
87 	conf->dtim_count = 1;
88 	conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
89 }
90 
91 static void
92 arn_tx_stopdma(struct arn_softc *sc, struct ath_txq *txq)
93 {
94 	struct ath_hal *ah = sc->sc_ah;
95 
96 	(void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
97 
98 	ARN_DBG((ARN_DBG_XMIT, "arn: arn_drain_txdataq(): "
99 	    "tx queue [%u] %x, link %p\n",
100 	    txq->axq_qnum,
101 	    ath9k_hw_gettxbuf(ah, txq->axq_qnum), txq->axq_link));
102 
103 }
104 
105 /* Drain only the data queues */
106 /* ARGSUSED */
107 static void
108 arn_drain_txdataq(struct arn_softc *sc, boolean_t retry_tx)
109 {
110 	struct ath_hal *ah = sc->sc_ah;
111 	int i, status, npend = 0;
112 
113 	if (!(sc->sc_flags & SC_OP_INVALID)) {
114 		for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
115 			if (ARN_TXQ_SETUP(sc, i)) {
116 				arn_tx_stopdma(sc, &sc->sc_txq[i]);
117 				/*
118 				 * The TxDMA may not really be stopped.
119 				 * Double check the hal tx pending count
120 				 */
121 				npend += ath9k_hw_numtxpending(ah,
122 				    sc->sc_txq[i].axq_qnum);
123 			}
124 		}
125 	}
126 
127 	if (npend) {
128 		/* TxDMA not stopped, reset the hal */
129 		ARN_DBG((ARN_DBG_XMIT, "arn: arn_drain_txdataq(): "
130 		    "Unable to stop TxDMA. Reset HAL!\n"));
131 
132 		if (!ath9k_hw_reset(ah,
133 		    sc->sc_ah->ah_curchan,
134 		    sc->tx_chan_width,
135 		    sc->sc_tx_chainmask, sc->sc_rx_chainmask,
136 		    sc->sc_ht_extprotspacing, B_TRUE, &status)) {
137 			ARN_DBG((ARN_DBG_FATAL, "arn: arn_drain_txdataq(): "
138 			    "unable to reset hardware; hal status %u\n",
139 			    status));
140 		}
141 	}
142 
143 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
144 		if (ARN_TXQ_SETUP(sc, i))
145 			arn_tx_draintxq(sc, &sc->sc_txq[i]);
146 	}
147 }
148 
149 /* Setup a h/w transmit queue */
150 struct ath_txq *
151 arn_txq_setup(struct arn_softc *sc, int qtype, int subtype)
152 {
153 	struct ath_hal *ah = sc->sc_ah;
154 	struct ath9k_tx_queue_info qi;
155 	int qnum;
156 
157 	(void) memset(&qi, 0, sizeof (qi));
158 	qi.tqi_subtype = subtype;
159 	qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
160 	qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
161 	qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
162 	qi.tqi_physCompBuf = 0;
163 
164 	/*
165 	 * Enable interrupts only for EOL and DESC conditions.
166 	 * We mark tx descriptors to receive a DESC interrupt
167 	 * when a tx queue gets deep; otherwise waiting for the
168 	 * EOL to reap descriptors.  Note that this is done to
169 	 * reduce interrupt load and this only defers reaping
170 	 * descriptors, never transmitting frames.  Aside from
171 	 * reducing interrupts this also permits more concurrency.
172 	 * The only potential downside is if the tx queue backs
173 	 * up in which case the top half of the kernel may backup
174 	 * due to a lack of tx descriptors.
175 	 *
176 	 * The UAPSD queue is an exception, since we take a desc-
177 	 * based intr on the EOSP frames.
178 	 */
179 	if (qtype == ATH9K_TX_QUEUE_UAPSD)
180 		qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
181 	else
182 		qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
183 		    TXQ_FLAG_TXDESCINT_ENABLE;
184 	qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
185 	if (qnum == -1) {
186 		/*
187 		 * NB: don't print a message, this happens
188 		 * normally on parts with too few tx queues
189 		 */
190 		return (NULL);
191 	}
192 	if (qnum >= ARRAY_SIZE(sc->sc_txq)) {
193 		ARN_DBG((ARN_DBG_FATAL, "arn: arn_txq_setup(): "
194 		    "hal qnum %u out of range, max %u!\n",
195 		    qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq)));
196 		(void) ath9k_hw_releasetxqueue(ah, qnum);
197 		return (NULL);
198 	}
199 	if (!ARN_TXQ_SETUP(sc, qnum)) {
200 		struct ath_txq *txq = &sc->sc_txq[qnum];
201 
202 		txq->axq_qnum = qnum;
203 		txq->axq_intrcnt = 0;
204 		txq->axq_link = NULL;
205 
206 		list_create(&txq->axq_list, sizeof (struct ath_buf),
207 		    offsetof(struct ath_buf, bf_node));
208 		mutex_init(&txq->axq_lock, NULL, MUTEX_DRIVER, NULL);
209 
210 		txq->axq_depth = 0;
211 		txq->axq_aggr_depth = 0;
212 		txq->axq_totalqueued = 0;
213 		/* txq->axq_linkbuf = NULL; */
214 		sc->sc_txqsetup |= 1<<qnum;
215 	}
216 	return (&sc->sc_txq[qnum]);
217 }
218 
219 /* Reclaim resources for a setup queue */
220 
221 void
222 arn_tx_cleanupq(struct arn_softc *sc, struct ath_txq *txq)
223 {
224 	(void) ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
225 	sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
226 }
227 
228 /*
229  * Setup a hardware data transmit queue for the specified
230  * access control.  The hal may not support all requested
231  * queues in which case it will return a reference to a
232  * previously setup queue.  We record the mapping from ac's
233  * to h/w queues for use by arn_tx_start and also track
234  * the set of h/w queues being used to optimize work in the
235  * transmit interrupt handler and related routines.
236  */
237 
238 int
239 arn_tx_setup(struct arn_softc *sc, int haltype)
240 {
241 	struct ath_txq *txq;
242 
243 	if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
244 		ARN_DBG((ARN_DBG_FATAL, "arn: arn_tx_setup(): "
245 		    "HAL AC %u out of range, max %zu!\n",
246 		    haltype, ARRAY_SIZE(sc->sc_haltype2q)));
247 		return (0);
248 	}
249 	txq = arn_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
250 	if (txq != NULL) {
251 		sc->sc_haltype2q[haltype] = txq->axq_qnum;
252 		return (1);
253 	} else
254 		return (0);
255 }
256 
257 int
258 arn_tx_get_qnum(struct arn_softc *sc, int qtype, int haltype)
259 {
260 	int qnum;
261 
262 	switch (qtype) {
263 	case ATH9K_TX_QUEUE_DATA:
264 		if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
265 			ARN_DBG((ARN_DBG_FATAL, "arn: arn_tx_get_qnum(): "
266 			    "HAL AC %u out of range, max %zu!\n",
267 			    haltype, ARRAY_SIZE(sc->sc_haltype2q)));
268 			return (-1);
269 		}
270 		qnum = sc->sc_haltype2q[haltype];
271 		break;
272 	case ATH9K_TX_QUEUE_BEACON:
273 		qnum = sc->sc_beaconq;
274 		break;
275 	case ATH9K_TX_QUEUE_CAB:
276 		qnum = sc->sc_cabq->axq_qnum;
277 		break;
278 	default:
279 		qnum = -1;
280 	}
281 	return (qnum);
282 }
283 
284 void
285 arn_tx_draintxq(struct arn_softc *sc, struct ath_txq *txq)
286 {
287 	struct ath_buf *bf;
288 
289 	/*
290 	 * This assumes output has been stopped.
291 	 */
292 	for (;;) {
293 		mutex_enter(&txq->axq_lock);
294 		bf = list_head(&txq->axq_list);
295 		if (bf == NULL) {
296 			txq->axq_link = NULL;
297 			mutex_exit(&txq->axq_lock);
298 			break;
299 		}
300 		list_remove(&txq->axq_list, bf);
301 		mutex_exit(&txq->axq_lock);
302 		bf->bf_in = NULL;
303 		mutex_enter(&sc->sc_txbuflock);
304 		list_insert_tail(&sc->sc_txbuf_list, bf);
305 		mutex_exit(&sc->sc_txbuflock);
306 	}
307 }
308 
309 /* Drain the transmit queues and reclaim resources */
310 
311 void
312 arn_draintxq(struct arn_softc *sc, boolean_t retry_tx)
313 {
314 	/*
315 	 * stop beacon queue. The beacon will be freed when
316 	 * we go to INIT state
317 	 */
318 	if (!(sc->sc_flags & SC_OP_INVALID)) {
319 		(void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_beaconq);
320 		ARN_DBG((ARN_DBG_XMIT, "arn: arn_draintxq(): "
321 		    "beacon queue %x\n",
322 		    ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_beaconq)));
323 	}
324 
325 	arn_drain_txdataq(sc, retry_tx);
326 }
327 
328 uint32_t
329 arn_txq_depth(struct arn_softc *sc, int qnum)
330 {
331 	return (sc->sc_txq[qnum].axq_depth);
332 }
333 
334 uint32_t
335 arn_txq_aggr_depth(struct arn_softc *sc, int qnum)
336 {
337 	return (sc->sc_txq[qnum].axq_aggr_depth);
338 }
339 
340 /* Update parameters for a transmit queue */
341 int
342 arn_txq_update(struct arn_softc *sc, int qnum,
343     struct ath9k_tx_queue_info *qinfo)
344 {
345 	struct ath_hal *ah = sc->sc_ah;
346 	int error = 0;
347 	struct ath9k_tx_queue_info qi;
348 
349 	if (qnum == sc->sc_beaconq) {
350 		/*
351 		 * XXX: for beacon queue, we just save the parameter.
352 		 * It will be picked up by arn_beaconq_config() when
353 		 * it's necessary.
354 		 */
355 		sc->sc_beacon_qi = *qinfo;
356 		return (0);
357 	}
358 
359 	ASSERT(sc->sc_txq[qnum].axq_qnum == qnum);
360 
361 	(void) ath9k_hw_get_txq_props(ah, qnum, &qi);
362 	qi.tqi_aifs = qinfo->tqi_aifs;
363 	qi.tqi_cwmin = qinfo->tqi_cwmin;
364 	qi.tqi_cwmax = qinfo->tqi_cwmax;
365 	qi.tqi_burstTime = qinfo->tqi_burstTime;
366 	qi.tqi_readyTime = qinfo->tqi_readyTime;
367 
368 	if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
369 		ARN_DBG((ARN_DBG_FATAL,
370 		    "Unable to update hardware queue %u!\n", qnum));
371 		error = -EIO;
372 	} else {
373 		(void) ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
374 	}
375 
376 	return (error);
377 }
378 
379 int
380 ath_cabq_update(struct arn_softc *sc)
381 {
382 	struct ath9k_tx_queue_info qi;
383 	int qnum = sc->sc_cabq->axq_qnum;
384 	struct ath_beacon_config conf;
385 
386 	(void) ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
387 	/*
388 	 * Ensure the readytime % is within the bounds.
389 	 */
390 	if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
391 		sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
392 	else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
393 		sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
394 
395 	arn_get_beaconconfig(sc, &conf);
396 	qi.tqi_readyTime =
397 	    (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
398 	(void) arn_txq_update(sc, qnum, &qi);
399 
400 	return (0);
401 }
402 
403 static uint32_t
404 arn_tx_get_keytype(const struct ieee80211_cipher *cip)
405 {
406 	uint32_t index;
407 	static const uint8_t ciphermap[] = {
408 	    ATH9K_CIPHER_WEP,		/* IEEE80211_CIPHER_WEP */
409 	    ATH9K_CIPHER_TKIP,		/* IEEE80211_CIPHER_TKIP */
410 	    ATH9K_CIPHER_AES_OCB,	/* IEEE80211_CIPHER_AES_OCB */
411 	    ATH9K_CIPHER_AES_CCM,	/* IEEE80211_CIPHER_AES_CCM */
412 	    ATH9K_CIPHER_CKIP,		/* IEEE80211_CIPHER_CKIP */
413 	    ATH9K_CIPHER_CLR,		/* IEEE80211_CIPHER_NONE */
414 	};
415 
416 	ASSERT(cip->ic_cipher < ARRAY_SIZE(ciphermap));
417 	index = cip->ic_cipher;
418 
419 	if (ciphermap[index] == ATH9K_CIPHER_WEP)
420 		return (ATH9K_KEY_TYPE_WEP);
421 	else if (ciphermap[index] == ATH9K_CIPHER_TKIP)
422 		return (ATH9K_KEY_TYPE_TKIP);
423 	else if (ciphermap[index] == ATH9K_CIPHER_AES_CCM)
424 		return (ATH9K_KEY_TYPE_AES);
425 
426 	return (ATH9K_KEY_TYPE_CLEAR);
427 
428 }
429 
430 /*
431  * The input parameter mp has following assumption:
432  * For data packets, GLDv3 mac_wifi plugin allocates and fills the
433  * ieee80211 header. For management packets, net80211 allocates and
434  * fills the ieee80211 header. In both cases, enough spaces in the
435  * header are left for encryption option.
436  */
437 static int32_t
438 arn_tx_start(struct arn_softc *sc, struct ieee80211_node *in,
439     struct ath_buf *bf, mblk_t *mp)
440 {
441 	ieee80211com_t *ic = (ieee80211com_t *)sc;
442 	struct ieee80211_frame *wh;
443 	struct ath_hal *ah = sc->sc_ah;
444 	uint32_t flags;
445 	uint32_t subtype, ctsduration;
446 	int32_t keyix, iswep, hdrlen, pktlen, mblen, mbslen;
447 	/* LINTED E_FUNC_SET_NOT_USED */
448 	int32_t try0;
449 	uint8_t rix, cix, txrate, ctsrate;
450 	struct ath_desc *ds;
451 	struct ath_txq *txq;
452 	enum ath9k_pkt_type atype;
453 	struct ath_rate_table *rt;
454 	boolean_t shortPreamble;
455 	boolean_t is_pspoll;
456 	struct ath_node *an;
457 	caddr_t dest;
458 	uint32_t keytype = ATH9K_KEY_TYPE_CLEAR;
459 
460 	/*
461 	 * CRC are added by H/W, not encaped by driver,
462 	 * but we must count it in pkt length.
463 	 */
464 	pktlen = IEEE80211_CRC_LEN;
465 
466 	wh = (struct ieee80211_frame *)mp->b_rptr;
467 	iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
468 	keyix = ATH9K_TXKEYIX_INVALID;
469 	hdrlen = sizeof (struct ieee80211_frame);
470 	if (iswep != 0) {
471 		const struct ieee80211_cipher *cip;
472 		struct ieee80211_key *k;
473 
474 		/*
475 		 * Construct the 802.11 header+trailer for an encrypted
476 		 * frame. The only reason this can fail is because of an
477 		 * unknown or unsupported cipher/key type.
478 		 */
479 		k = ieee80211_crypto_encap(ic, mp);
480 		if (k == NULL) {
481 			ARN_DBG((ARN_DBG_XMIT, "arn: arn_tx_start "
482 			    "crypto_encap failed\n"));
483 			/*
484 			 * This can happen when the key is yanked after the
485 			 * frame was queued.  Just discard the frame; the
486 			 * 802.11 layer counts failures and provides
487 			 * debugging/diagnostics.
488 			 */
489 			return (EIO);
490 		}
491 		cip = k->wk_cipher;
492 
493 		keytype = arn_tx_get_keytype(cip);
494 
495 		/*
496 		 * Adjust the packet + header lengths for the crypto
497 		 * additions and calculate the h/w key index.  When
498 		 * a s/w mic is done the frame will have had any mic
499 		 * added to it prior to entry so m0->m_pkthdr.len above will
500 		 * account for it. Otherwise we need to add it to the
501 		 * packet length.
502 		 */
503 		hdrlen += cip->ic_header;
504 		pktlen += cip->ic_trailer;
505 		if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0)
506 			pktlen += cip->ic_miclen;
507 
508 		keyix = k->wk_keyix;
509 
510 		/* packet header may have moved, reset our local pointer */
511 		wh = (struct ieee80211_frame *)mp->b_rptr;
512 	}
513 
514 	dest = bf->bf_dma.mem_va;
515 	for (; mp != NULL; mp = mp->b_cont) {
516 		mblen = MBLKL(mp);
517 		bcopy(mp->b_rptr, dest, mblen);
518 		dest += mblen;
519 	}
520 	mbslen = (uintptr_t)dest - (uintptr_t)bf->bf_dma.mem_va;
521 	pktlen += mbslen;
522 
523 	bf->bf_in = in;
524 
525 	/* setup descriptors */
526 	ds = bf->bf_desc;
527 	rt = sc->sc_currates;
528 	ASSERT(rt != NULL);
529 
530 	/*
531 	 * The 802.11 layer marks whether or not we should
532 	 * use short preamble based on the current mode and
533 	 * negotiated parameters.
534 	 */
535 	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
536 	    (in->in_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
537 		shortPreamble = B_TRUE;
538 		sc->sc_stats.ast_tx_shortpre++;
539 	} else {
540 		shortPreamble = B_FALSE;
541 	}
542 
543 	an = (struct ath_node *)(in);
544 
545 	/*
546 	 * Calculate Atheros packet type from IEEE80211 packet header
547 	 * and setup for rate calculations.
548 	 */
549 	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
550 	case IEEE80211_FC0_TYPE_MGT:
551 		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
552 		if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
553 			atype = ATH9K_PKT_TYPE_BEACON;
554 		else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
555 			atype = ATH9K_PKT_TYPE_PROBE_RESP;
556 		else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
557 			atype = ATH9K_PKT_TYPE_ATIM;
558 		else
559 			atype = ATH9K_PKT_TYPE_NORMAL;
560 		rix = 0;	/* lowest rate */
561 		try0 = ATH_TXMAXTRY;
562 		if (shortPreamble) {
563 			txrate = an->an_tx_mgtratesp;
564 		} else {
565 			txrate = an->an_tx_mgtrate;
566 		}
567 		/* force all ctl frames to highest queue */
568 		txq = &sc->sc_txq[arn_get_hal_qnum(WME_AC_VO, sc)];
569 		break;
570 	case IEEE80211_FC0_TYPE_CTL:
571 		atype = ATH9K_PKT_TYPE_PSPOLL;
572 		is_pspoll = B_TRUE;
573 		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
574 		rix = 0; /* lowest rate */
575 		try0 = ATH_TXMAXTRY;
576 		if (shortPreamble)
577 			txrate = an->an_tx_mgtratesp;
578 		else
579 			txrate = an->an_tx_mgtrate;
580 		/* force all ctl frames to highest queue */
581 		txq = &sc->sc_txq[arn_get_hal_qnum(WME_AC_VO, sc)];
582 		break;
583 	case IEEE80211_FC0_TYPE_DATA:
584 		atype = ATH9K_PKT_TYPE_NORMAL;
585 		rix = an->an_tx_rix0;
586 		try0 = an->an_tx_try0;
587 		if (shortPreamble)
588 			txrate = an->an_tx_rate0sp;
589 		else
590 			txrate = an->an_tx_rate0;
591 		/* Always use background queue */
592 		txq = &sc->sc_txq[arn_get_hal_qnum(WME_AC_BK, sc)];
593 		break;
594 	default:
595 		/* Unknown 802.11 frame */
596 		sc->sc_stats.ast_tx_invalid++;
597 		return (1);
598 	}
599 
600 	/*
601 	 * Calculate miscellaneous flags.
602 	 */
603 	flags = ATH9K_TXDESC_CLRDMASK;
604 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
605 		flags |= ATH9K_TXDESC_NOACK;	/* no ack on broad/multicast */
606 		sc->sc_stats.ast_tx_noack++;
607 	} else if (pktlen > ic->ic_rtsthreshold) {
608 		flags |= ATH9K_TXDESC_RTSENA;	/* RTS based on frame length */
609 		sc->sc_stats.ast_tx_rts++;
610 	}
611 
612 	/*
613 	 * Calculate duration.  This logically belongs in the 802.11
614 	 * layer but it lacks sufficient information to calculate it.
615 	 */
616 	if ((flags & ATH9K_TXDESC_NOACK) == 0 &&
617 	    (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
618 	    IEEE80211_FC0_TYPE_CTL) {
619 		uint16_t dur;
620 		dur = ath9k_hw_computetxtime(ah, rt, IEEE80211_ACK_SIZE,
621 		    rix, shortPreamble);
622 		/* LINTED E_BAD_PTR_CAST_ALIGN */
623 		*(uint16_t *)wh->i_dur = LE_16(dur);
624 	}
625 
626 	/*
627 	 * Calculate RTS/CTS rate and duration if needed.
628 	 */
629 	ctsduration = 0;
630 	if (flags & (ATH9K_TXDESC_RTSENA|ATH9K_TXDESC_CTSENA)) {
631 		/*
632 		 * CTS transmit rate is derived from the transmit rate
633 		 * by looking in the h/w rate table.  We must also factor
634 		 * in whether or not a short preamble is to be used.
635 		 */
636 		cix = rt->info[rix].ctrl_rate;
637 		ctsrate = rt->info[cix].ratecode;
638 		if (shortPreamble)
639 			ctsrate |= rt->info[cix].short_preamble;
640 		/*
641 		 * Compute the transmit duration based on the size
642 		 * of an ACK frame.  We call into the HAL to do the
643 		 * computation since it depends on the characteristics
644 		 * of the actual PHY being used.
645 		 */
646 		if (flags & ATH9K_TXDESC_RTSENA) {	/* SIFS + CTS */
647 			ctsduration += ath9k_hw_computetxtime(ah,
648 			    rt, IEEE80211_ACK_SIZE, cix, shortPreamble);
649 		}
650 		/* SIFS + data */
651 		ctsduration += ath9k_hw_computetxtime(ah,
652 		    rt, pktlen, rix, shortPreamble);
653 		if ((flags & ATH9K_TXDESC_NOACK) == 0) {  /* SIFS + ACK */
654 			ctsduration += ath9k_hw_computetxtime(ah,
655 			    rt, IEEE80211_ACK_SIZE, cix, shortPreamble);
656 		}
657 	} else
658 		ctsrate = 0;
659 
660 	if (++txq->axq_intrcnt >= 5) {
661 		flags |= ATH9K_TXDESC_INTREQ;
662 		txq->axq_intrcnt = 0;
663 	}
664 
665 	/* setup descriptor */
666 	ds->ds_link = 0;
667 	ds->ds_data = bf->bf_dma.cookie.dmac_address;
668 
669 	/*
670 	 * Formulate first tx descriptor with tx controls.
671 	 */
672 	ath9k_hw_set11n_txdesc(ah, ds,
673 	    pktlen, /* packet length */
674 	    atype, /* Atheros packet type */
675 	    MAX_RATE_POWER /* MAX_RATE_POWER */,
676 	    keyix /* ATH9K_TXKEYIX_INVALID */,
677 	    keytype /* ATH9K_KEY_TYPE_CLEAR */,
678 	    flags /* flags */);
679 	bf->bf_flags = (uint16_t)flags; /* LINT */
680 
681 	/* LINTED E_BAD_PTR_CAST_ALIGN */
682 	ARN_DBG((ARN_DBG_XMIT, "arn: arn_tx_start(): to %s totlen=%d "
683 	    "an->an_tx_rate1sp=%d tx_rate2sp=%d tx_rate3sp=%d "
684 	    "qnum=%d rix=%d sht=%d dur = %d\n",
685 	    ieee80211_macaddr_sprintf(wh->i_addr1), mbslen, an->an_tx_rate1sp,
686 	    an->an_tx_rate2sp, an->an_tx_rate3sp,
687 	    txq->axq_qnum, rix, shortPreamble, *(uint16_t *)wh->i_dur));
688 
689 	(void) ath9k_hw_filltxdesc(ah, ds,
690 	    mbslen,		/* segment length */
691 	    B_TRUE,		/* first segment */
692 	    B_TRUE,		/* last segment */
693 	    ds);		/* first descriptor */
694 
695 	/* set rate related fields in tx descriptor */
696 	struct ath9k_11n_rate_series series[4];
697 	(void) memset(series, 0, sizeof (struct ath9k_11n_rate_series) * 4);
698 
699 #ifdef MULTIRATE_RETRY
700 	int i;
701 	for (i = 1; i < 4; i++) {
702 		series[i].Tries = 2; /* ??? */
703 		series[i].ChSel = sc->sc_tx_chainmask;
704 
705 		series[i].RateFlags &= ~ATH9K_RATESERIES_RTS_CTS;
706 		series[i].RateFlags &= ~ATH9K_RATESERIES_2040;
707 		series[i].RateFlags &= ~ATH9K_RATESERIES_HALFGI;
708 
709 		series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
710 		    rt, pktlen, rix, shortPreamble);
711 	}
712 #endif
713 
714 	/* main rate */
715 	series[0].Rate = txrate;
716 	series[0].Tries = ATH_TXMAXTRY;
717 	series[0].RateFlags &= ~ATH9K_RATESERIES_RTS_CTS;
718 	series[0].RateFlags &= ~ATH9K_RATESERIES_2040;
719 	series[0].RateFlags &= ~ATH9K_RATESERIES_HALFGI;
720 	series[0].ChSel = sc->sc_tx_chainmask;
721 	series[0].PktDuration = ath9k_hw_computetxtime(sc->sc_ah, rt, pktlen,
722 	    rix, shortPreamble);
723 
724 #ifdef MULTIRATE_RETRY
725 	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
726 	    (in->in_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
727 		series[1].Rate = an->an_tx_rate1sp;
728 		series[2].Rate = an->an_tx_rate2sp;
729 		series[3].Rate = an->an_tx_rate3sp;
730 	}
731 	else
732 	{
733 		series[1].Rate = an->an_tx_rate1;
734 		series[2].Rate = an->an_tx_rate2;
735 		series[3].Rate = an->an_tx_rate3;
736 	}
737 #endif
738 
739 	/* set dur_update_en for l-sig computation except for PS-Poll frames */
740 	ath9k_hw_set11n_ratescenario(sc->sc_ah, ds,
741 	    ds, !is_pspoll, ctsrate, 0, series, 4, flags);
742 
743 	ARN_DMA_SYNC(bf->bf_dma, DDI_DMA_SYNC_FORDEV);
744 
745 	mutex_enter(&txq->axq_lock);
746 	list_insert_tail(&txq->axq_list, bf);
747 	if (txq->axq_link == NULL) {
748 		(void) ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
749 	} else {
750 		*txq->axq_link = bf->bf_daddr;
751 	}
752 	txq->axq_link = &ds->ds_link;
753 	mutex_exit(&txq->axq_lock);
754 
755 	(void) ath9k_hw_txstart(ah, txq->axq_qnum);
756 
757 	ic->ic_stats.is_tx_frags++;
758 	ic->ic_stats.is_tx_bytes += pktlen;
759 
760 	return (0);
761 }
762 
763 /*
764  * Transmit a management frame.
765  * Note that management frames come directly from the 802.11 layer
766  * and do not honor the send queue flow control.
767  */
768 /* Upon failure caller should free mp */
769 int
770 arn_tx(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
771 {
772 	struct arn_softc *sc = (struct arn_softc *)ic;
773 	struct ath_hal *ah = sc->sc_ah;
774 	struct ieee80211_node *in = NULL;
775 	struct ath_buf *bf = NULL;
776 	struct ieee80211_frame *wh;
777 	int error = 0;
778 
779 	ASSERT(mp->b_next == NULL);
780 	/* should check later */
781 	if (sc->sc_flags & SC_OP_INVALID) {
782 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
783 		    IEEE80211_FC0_TYPE_DATA) {
784 			freemsg(mp);
785 		}
786 		return (ENXIO);
787 	}
788 
789 	/* Grab a TX buffer */
790 	mutex_enter(&sc->sc_txbuflock);
791 	bf = list_head(&sc->sc_txbuf_list);
792 	/* Check if a tx buffer is available */
793 	if (bf != NULL)
794 		list_remove(&sc->sc_txbuf_list, bf);
795 	if (list_empty(&sc->sc_txbuf_list)) {
796 		ARN_DBG((ARN_DBG_XMIT, "arn: arn_tx(): "
797 		    "stop queue\n"));
798 		sc->sc_stats.ast_tx_qstop++;
799 	}
800 	mutex_exit(&sc->sc_txbuflock);
801 	if (bf == NULL) {
802 		ARN_DBG((ARN_DBG_XMIT, "arn: arn_tx(): discard, "
803 		    "no xmit buf\n"));
804 		ic->ic_stats.is_tx_nobuf++;
805 		if ((type & IEEE80211_FC0_TYPE_MASK) ==
806 		    IEEE80211_FC0_TYPE_DATA) {
807 			sc->sc_stats.ast_tx_nobuf++;
808 			mutex_enter(&sc->sc_resched_lock);
809 			sc->sc_resched_needed = B_TRUE;
810 			mutex_exit(&sc->sc_resched_lock);
811 		} else {
812 			sc->sc_stats.ast_tx_nobufmgt++;
813 			freemsg(mp);
814 		}
815 		return (ENOMEM);
816 	}
817 
818 	wh = (struct ieee80211_frame *)mp->b_rptr;
819 
820 	/* Locate node */
821 	in = ieee80211_find_txnode(ic,  wh->i_addr1);
822 	if (in == NULL) {
823 		error = EIO;
824 		goto bad;
825 	}
826 
827 	in->in_inact = 0;
828 	switch (type & IEEE80211_FC0_TYPE_MASK) {
829 	case IEEE80211_FC0_TYPE_DATA:
830 		(void) ieee80211_encap(ic, mp, in);
831 		break;
832 	default:
833 		if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
834 		    IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
835 			/* fill time stamp */
836 			uint64_t tsf;
837 			uint32_t *tstamp;
838 
839 			tsf = ath9k_hw_gettsf64(ah);
840 			/* adjust 100us delay to xmit */
841 			tsf += 100;
842 			/* LINTED E_BAD_PTR_CAST_ALIGN */
843 			tstamp = (uint32_t *)&wh[1];
844 			tstamp[0] = LE_32(tsf & 0xffffffff);
845 			tstamp[1] = LE_32(tsf >> 32);
846 		}
847 		sc->sc_stats.ast_tx_mgmt++;
848 		break;
849 	}
850 
851 	error = arn_tx_start(sc, in, bf, mp);
852 
853 	if (error != 0) {
854 bad:
855 		ic->ic_stats.is_tx_failed++;
856 		if (bf != NULL) {
857 			mutex_enter(&sc->sc_txbuflock);
858 			list_insert_tail(&sc->sc_txbuf_list, bf);
859 			mutex_exit(&sc->sc_txbuflock);
860 		}
861 	}
862 	if (in != NULL)
863 		ieee80211_free_node(in);
864 	if ((type & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_DATA ||
865 	    error == 0) {
866 		freemsg(mp);
867 	}
868 
869 	return (error);
870 }
871 
872 static void
873 arn_printtxbuf(struct ath_buf *bf, int done)
874 {
875 	struct ath_desc *ds = bf->bf_desc;
876 	const struct ath_tx_status *ts = &ds->ds_txstat;
877 
878 	ARN_DBG((ARN_DBG_XMIT, "arn: T(%p %p) %08x %08x %08x %08x %08x"
879 	    " %08x %08x %08x %c\n",
880 	    ds, bf->bf_daddr,
881 	    ds->ds_link, ds->ds_data,
882 	    ds->ds_ctl0, ds->ds_ctl1,
883 	    ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3],
884 	    !done ? ' ' : (ts->ts_status == 0) ? '*' : '!'));
885 }
886 
887 /* Process completed xmit descriptors from the specified queue */
888 
889 static int
890 arn_tx_processq(struct arn_softc *sc, struct ath_txq *txq)
891 {
892 	ieee80211com_t *ic = (ieee80211com_t *)sc;
893 	struct ath_hal *ah = sc->sc_ah;
894 	struct ath_buf *bf;
895 	struct ath_desc *ds;
896 	struct ieee80211_node *in;
897 	int32_t sr, lr, nacked = 0;
898 	struct ath_tx_status *ts;
899 	int status;
900 	struct ath_node *an;
901 
902 	for (;;) {
903 		mutex_enter(&txq->axq_lock);
904 		bf = list_head(&txq->axq_list);
905 		if (bf == NULL) {
906 			txq->axq_link = NULL;
907 			mutex_exit(&txq->axq_lock);
908 			break;
909 		}
910 		ds = bf->bf_desc;	/* last decriptor */
911 		ts = &ds->ds_txstat;
912 		status = ath9k_hw_txprocdesc(ah, ds);
913 
914 #ifdef DEBUG
915 		arn_printtxbuf(bf, status == 0);
916 #endif
917 
918 		if (status == EINPROGRESS) {
919 			mutex_exit(&txq->axq_lock);
920 			break;
921 		}
922 		list_remove(&txq->axq_list, bf);
923 		mutex_exit(&txq->axq_lock);
924 		in = bf->bf_in;
925 		if (in != NULL) {
926 			an = ATH_NODE(in);
927 			/* Successful transmition */
928 			if (ts->ts_status == 0) {
929 				an->an_tx_ok++;
930 				an->an_tx_antenna = ts->ts_antenna;
931 				sc->sc_stats.ast_tx_rssidelta =
932 				    ts->ts_rssi - sc->sc_stats.ast_tx_rssi;
933 				sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
934 			} else {
935 				an->an_tx_err++;
936 				if (ts->ts_status & ATH9K_TXERR_XRETRY) {
937 					sc->sc_stats.ast_tx_xretries++;
938 				}
939 				if (ts->ts_status & ATH9K_TXERR_FIFO) {
940 					sc->sc_stats.ast_tx_fifoerr++;
941 				}
942 				if (ts->ts_status & ATH9K_TXERR_FILT) {
943 					sc->sc_stats.ast_tx_filtered++;
944 				}
945 				an->an_tx_antenna = 0;	/* invalidate */
946 			}
947 			sr = ts->ts_shortretry;
948 			lr = ts->ts_longretry;
949 			sc->sc_stats.ast_tx_shortretry += sr;
950 			sc->sc_stats.ast_tx_longretry += lr;
951 			/*
952 			 * Hand the descriptor to the rate control algorithm.
953 			 */
954 			if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
955 			    (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
956 				/*
957 				 * If frame was ack'd update the last rx time
958 				 * used to workaround phantom bmiss interrupts.
959 				 */
960 				if (ts->ts_status == 0) {
961 					nacked++;
962 					an->an_tx_ok++;
963 				} else {
964 					an->an_tx_err++;
965 				}
966 				an->an_tx_retr += sr + lr;
967 			}
968 		}
969 		bf->bf_in = NULL;
970 		mutex_enter(&sc->sc_txbuflock);
971 		list_insert_tail(&sc->sc_txbuf_list, bf);
972 		mutex_exit(&sc->sc_txbuflock);
973 
974 		/*
975 		 * Reschedule stalled outbound packets
976 		 */
977 		mutex_enter(&sc->sc_resched_lock);
978 		if (sc->sc_resched_needed) {
979 			sc->sc_resched_needed = B_FALSE;
980 			mac_tx_update(ic->ic_mach);
981 		}
982 		mutex_exit(&sc->sc_resched_lock);
983 	}
984 
985 	return (nacked);
986 }
987 
988 static void
989 arn_tx_handler(struct arn_softc *sc)
990 {
991 	int i;
992 	int nacked = 0;
993 	uint32_t qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
994 	ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
995 
996 	/*
997 	 * Process each active queue.
998 	 */
999 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1000 		if (ARN_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) {
1001 			nacked += arn_tx_processq(sc, &sc->sc_txq[i]);
1002 		}
1003 	}
1004 
1005 	if (nacked)
1006 		sc->sc_lastrx = ath9k_hw_gettsf64(sc->sc_ah);
1007 }
1008 
1009 /* Deferred processing of transmit interrupt */
1010 
1011 void
1012 arn_tx_int_proc(void *arg)
1013 {
1014 	struct arn_softc *sc = arg;
1015 	arn_tx_handler(sc);
1016 }
1017