1 /*
2 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
3 * Use is subject to license terms.
4 */
5
6 /*
7 * Copyright (c) 2008 Atheros Communications Inc.
8 *
9 * Permission to use, copy, modify, and/or distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21 #include <sys/param.h>
22 #include <sys/types.h>
23 #include <sys/signal.h>
24 #include <sys/stream.h>
25 #include <sys/termio.h>
26 #include <sys/errno.h>
27 #include <sys/file.h>
28 #include <sys/cmn_err.h>
29 #include <sys/stropts.h>
30 #include <sys/strsubr.h>
31 #include <sys/strtty.h>
32 #include <sys/kbio.h>
33 #include <sys/cred.h>
34 #include <sys/stat.h>
35 #include <sys/consdev.h>
36 #include <sys/kmem.h>
37 #include <sys/modctl.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/pci.h>
41 #include <sys/errno.h>
42 #include <sys/mac_provider.h>
43 #include <sys/dlpi.h>
44 #include <sys/ethernet.h>
45 #include <sys/list.h>
46 #include <sys/byteorder.h>
47 #include <sys/strsun.h>
48 #include <sys/policy.h>
49 #include <inet/common.h>
50 #include <inet/nd.h>
51 #include <inet/mi.h>
52 #include <inet/wifi_ioctl.h>
53 #include <sys/mac_wifi.h>
54
55 #include "arn_core.h"
56
57 #define BITS_PER_BYTE 8
58 #define OFDM_PLCP_BITS 22
59 #define HT_RC_2_MCS(_rc) ((_rc) & 0x0f)
60 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
61 #define L_STF 8
62 #define L_LTF 8
63 #define L_SIG 4
64 #define HT_SIG 8
65 #define HT_STF 4
66 #define HT_LTF(_ns) (4 * (_ns))
67 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
68 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
69 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
70 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
71
72 #define OFDM_SIFS_TIME 16
73
74 static uint32_t bits_per_symbol[][2] = {
75 /* 20MHz 40MHz */
76 { 26, 54 }, /* 0: BPSK */
77 { 52, 108 }, /* 1: QPSK 1/2 */
78 { 78, 162 }, /* 2: QPSK 3/4 */
79 { 104, 216 }, /* 3: 16-QAM 1/2 */
80 { 156, 324 }, /* 4: 16-QAM 3/4 */
81 { 208, 432 }, /* 5: 64-QAM 2/3 */
82 { 234, 486 }, /* 6: 64-QAM 3/4 */
83 { 260, 540 }, /* 7: 64-QAM 5/6 */
84 { 52, 108 }, /* 8: BPSK */
85 { 104, 216 }, /* 9: QPSK 1/2 */
86 { 156, 324 }, /* 10: QPSK 3/4 */
87 { 208, 432 }, /* 11: 16-QAM 1/2 */
88 { 312, 648 }, /* 12: 16-QAM 3/4 */
89 { 416, 864 }, /* 13: 64-QAM 2/3 */
90 { 468, 972 }, /* 14: 64-QAM 3/4 */
91 { 520, 1080 }, /* 15: 64-QAM 5/6 */
92 };
93
94 #define IS_HT_RATE(_rate) ((_rate) & 0x80)
95
96 #ifdef ARN_TX_AGGREGRATION
97 static void arn_tx_send_ht_normal(struct arn_softc *sc, struct ath_txq *txq,
98 struct ath_atx_tid *tid, list_t *bf_list);
99 static void arn_tx_complete_buf(struct arn_softc *sc, struct ath_buf *bf,
100 list_t *bf_q, int txok, int sendbar);
101 static void arn_tx_txqaddbuf(struct arn_softc *sc, struct ath_txq *txq,
102 list_t *buf_list);
103 static void arn_buf_set_rate(struct arn_softc *sc, struct ath_buf *bf);
104 static int arn_tx_num_badfrms(struct arn_softc *sc,
105 struct ath_buf *bf, int txok);
106 static void arn_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
107 int nbad, int txok, boolean_t update_rc);
108 #endif
109
110 static void
arn_get_beaconconfig(struct arn_softc * sc,struct ath_beacon_config * conf)111 arn_get_beaconconfig(struct arn_softc *sc, struct ath_beacon_config *conf)
112 {
113 ieee80211com_t *ic = (ieee80211com_t *)sc;
114 struct ieee80211_node *in = ic->ic_bss;
115
116 /* fill in beacon config data */
117
118 conf->beacon_interval = in->in_intval ?
119 in->in_intval : ATH_DEFAULT_BINTVAL;
120 conf->listen_interval = 100;
121 conf->dtim_count = 1;
122 conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
123 }
124
125 /* Aggregation logic */
126
127 #ifdef ARN_TX_AGGREGATION
128
129 /* Check if it's okay to send out aggregates */
130 static int
arn_aggr_query(struct arn_softc * sc,struct ath_node * an,uint8_t tidno)131 arn_aggr_query(struct arn_softc *sc, struct ath_node *an, uint8_t tidno)
132 {
133 struct ath_atx_tid *tid;
134 tid = ATH_AN_2_TID(an, tidno);
135
136 if (tid->state & AGGR_ADDBA_COMPLETE ||
137 tid->state & AGGR_ADDBA_PROGRESS)
138 return (1);
139 else
140 return (0);
141 }
142
143 /*
144 * queue up a dest/ac pair for tx scheduling
145 * NB: must be called with txq lock held
146 */
147 static void
arn_tx_queue_tid(struct ath_txq * txq,struct ath_atx_tid * tid)148 arn_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
149 {
150 struct ath_atx_ac *ac = tid->ac;
151
152 /* if tid is paused, hold off */
153 if (tid->paused)
154 return;
155
156 /* add tid to ac atmost once */
157 if (tid->sched)
158 return;
159
160 tid->sched = B_TRUE;
161 list_insert_tail(&ac->tid_q, &tid->list);
162
163 /* add node ac to txq atmost once */
164 if (ac->sched)
165 return;
166
167 ac->sched = B_TRUE;
168 list_insert_tail(&txq->axq_acq, &ac->list);
169 }
170
171 /* pause a tid */
172 static void
arn_tx_pause_tid(struct arn_softc * sc,struct ath_atx_tid * tid)173 arn_tx_pause_tid(struct arn_softc *sc, struct ath_atx_tid *tid)
174 {
175 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
176
177 mutex_enter(&txq->axq_lock);
178
179 tid->paused++;
180
181 mutex_exit(&txq->axq_lock);
182 }
183
184 /* resume a tid and schedule aggregate */
185 void
arn_tx_resume_tid(struct arn_softc * sc,struct ath_atx_tid * tid)186 arn_tx_resume_tid(struct arn_softc *sc, struct ath_atx_tid *tid)
187 {
188 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
189
190 ASSERT(tid->paused > 0);
191 mutex_enter(&txq->axq_lock);
192
193 tid->paused--;
194
195 if (tid->paused > 0)
196 goto unlock;
197
198 if (list_empty(&tid->buf_q))
199 goto unlock;
200
201 /*
202 * Add this TID to scheduler and try to send out aggregates
203 */
204 arn_tx_queue_tid(txq, tid);
205 arn_txq_schedule(sc, txq);
206 unlock:
207 mutex_exit(&txq->axq_lock);
208 }
209
210 /* flush tid's software queue and send frames as non-ampdu's */
211 static void
arn_tx_flush_tid(struct arn_softc * sc,struct ath_atx_tid * tid)212 arn_tx_flush_tid(struct arn_softc *sc, struct ath_atx_tid *tid)
213 {
214 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
215 struct ath_buf *bf;
216
217 list_t list;
218 list_create(&list, sizeof (struct ath_buf),
219 offsetof(struct ath_buf, bf_node));
220
221 ASSERT(tid->paused > 0);
222 mutex_enter(&txq->axq_lock);
223
224 tid->paused--;
225
226 if (tid->paused > 0) {
227 mutex_exit(&txq->axq_lock);
228 return;
229 }
230
231 while (!list_empty(&tid->buf_q)) {
232 bf = list_head(&tid->buf_q);
233 ASSERT(!bf_isretried(bf));
234 list_remove(&tid->buf_q, bf);
235 list_insert_tail(&list, bf);
236 arn_tx_send_ht_normal(sc, txq, tid, &list);
237 }
238
239 mutex_exit(&txq->axq_lock);
240 }
241
242 /* Update block ack window */
243 static void
arn_tx_update_baw(struct arn_softc * sc,struct ath_atx_tid * tid,int seqno)244 arn_tx_update_baw(struct arn_softc *sc, struct ath_atx_tid *tid, int seqno)
245 {
246 int index, cindex;
247
248 index = ATH_BA_INDEX(tid->seq_start, seqno);
249 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
250
251 tid->tx_buf[cindex] = NULL;
252
253 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
254 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
255 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
256 }
257 }
258
259 /* Add a sub-frame to block ack window */
260 static void
arn_tx_addto_baw(struct arn_softc * sc,struct ath_atx_tid * tid,struct ath_buf * bf)261 arn_tx_addto_baw(struct arn_softc *sc, struct ath_atx_tid *tid,
262 struct ath_buf *bf)
263 {
264 int index, cindex;
265
266 if (bf_isretried(bf))
267 return;
268
269 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
270 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
271
272 ASSERT(tid->tx_buf[cindex] == NULL);
273 tid->tx_buf[cindex] = bf;
274
275 if (index >= ((tid->baw_tail - tid->baw_head) &
276 (ATH_TID_MAX_BUFS - 1))) {
277 tid->baw_tail = cindex;
278 INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
279 }
280 }
281
282 /*
283 * TODO: For frame(s) that are in the retry state, we will reuse the
284 * sequence number(s) without setting the retry bit. The
285 * alternative is to give up on these and BAR the receiver's window
286 * forward.
287 */
288 static void
arn_tid_drain(struct arn_softc * sc,struct ath_txq * txq,struct ath_atx_tid * tid)289 arn_tid_drain(struct arn_softc *sc,
290 struct ath_txq *txq,
291 struct ath_atx_tid *tid)
292
293 {
294 struct ath_buf *bf;
295
296 list_t list;
297 list_create(&list, sizeof (struct ath_buf),
298 offsetof(struct ath_buf, bf_node));
299
300 for (;;) {
301 if (list_empty(&tid->buf_q))
302 break;
303
304 bf = list_head(&tid->buf_q);
305 list_remove(&tid->buf_q, bf);
306 list_insert_tail(&list, bf);
307
308 if (bf_isretried(bf))
309 arn_tx_update_baw(sc, tid, bf->bf_seqno);
310
311 mutex_enter(&txq->axq_lock);
312 arn_tx_complete_buf(sc, bf, &list, 0, 0);
313 mutex_exit(&txq->axq_lock);
314 }
315
316 tid->seq_next = tid->seq_start;
317 tid->baw_tail = tid->baw_head;
318 }
319
320 static void
arn_tx_set_retry(struct arn_softc * sc,struct ath_buf * bf)321 arn_tx_set_retry(struct arn_softc *sc, struct ath_buf *bf)
322 {
323 struct ieee80211_frame *wh;
324 wh = (struct ieee80211_frame *)bf->bf_dma.mem_va;
325
326 bf->bf_state.bf_type |= BUF_RETRY;
327 bf->bf_retries++;
328
329 *(uint16_t *)&wh->i_seq[0] |= LE_16(0x0800); /* ??? */
330 }
331
332 static struct ath_buf *
arn_clone_txbuf(struct arn_softc * sc,struct ath_buf * bf)333 arn_clone_txbuf(struct arn_softc *sc, struct ath_buf *bf)
334 {
335 struct ath_buf *tbf;
336
337 mutex_enter(&sc->sc_txbuflock);
338 ASSERT(!list_empty((&sc->sc_txbuf_list)));
339
340 tbf = list_head(&sc->sc_txbuf_list);
341 list_remove(&sc->sc_txbuf_list, tbf);
342 mutex_exit(&sc->sc_txbuflock);
343
344 ATH_TXBUF_RESET(tbf);
345
346 tbf->bf_daddr = bf->bf_daddr; /* physical addr of desc */
347 tbf->bf_dma = bf->bf_dma; /* dma area for buf */
348 *(tbf->bf_desc) = *(bf->bf_desc); /* virtual addr of desc */
349 tbf->bf_state = bf->bf_state; /* buffer state */
350
351 return (tbf);
352 }
353
354 static void
arn_tx_complete_aggr(struct arn_softc * sc,struct ath_txq * txq,struct ath_buf * bf,list_t * bf_q,int txok)355 arn_tx_complete_aggr(struct arn_softc *sc, struct ath_txq *txq,
356 struct ath_buf *bf, list_t *bf_q, int txok)
357 {
358 struct ieee80211_node *in;
359 struct ath_node *an = NULL;
360 struct ath_atx_tid *tid = NULL;
361 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
362 struct ath_desc *ds = bf_last->bf_desc;
363
364 list_t list, list_pending;
365 uint16_t seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
366 uint32_t ba[WME_BA_BMP_SIZE >> 5];
367 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
368 boolean_t rc_update = B_TRUE;
369
370 an = ATH_NODE(in); /* Be sure in != NULL */
371 tid = ATH_AN_2_TID(an, bf->bf_tidno);
372
373 isaggr = bf_isaggr(bf);
374 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
375
376 if (isaggr && txok) {
377 if (ATH_DS_TX_BA(ds)) {
378 seq_st = ATH_DS_BA_SEQ(ds);
379 memcpy(ba, ATH_DS_BA_BITMAP(ds),
380 WME_BA_BMP_SIZE >> 3);
381 } else {
382 /*
383 * AR5416 can become deaf/mute when BA
384 * issue happens. Chip needs to be reset.
385 * But AP code may have sychronization issues
386 * when perform internal reset in this routine.
387 * Only enable reset in STA mode for now.
388 */
389 if (sc->sc_ah->ah_opmode == ATH9K_M_STA)
390 needreset = 1;
391 }
392 }
393
394 list_create(&list_pending, sizeof (struct ath_buf),
395 offsetof(struct ath_buf, bf_node));
396 list_create(&list, sizeof (struct ath_buf),
397 offsetof(struct ath_buf, bf_node));
398
399 nbad = arn_tx_num_badfrms(sc, bf, txok);
400 while (bf) {
401 txfail = txpending = 0;
402 bf_next = bf->bf_next;
403
404 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
405 /*
406 * transmit completion, subframe is
407 * acked by block ack
408 */
409 acked_cnt++;
410 } else if (!isaggr && txok) {
411 /* transmit completion */
412 acked_cnt++;
413 } else {
414 if (!(tid->state & AGGR_CLEANUP) &&
415 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
416 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
417 arn_tx_set_retry(sc, bf);
418 txpending = 1;
419 } else {
420 bf->bf_state.bf_type |= BUF_XRETRY;
421 txfail = 1;
422 sendbar = 1;
423 txfail_cnt++;
424 }
425 } else {
426 /*
427 * cleanup in progress, just fail
428 * the un-acked sub-frames
429 */
430 txfail = 1;
431 }
432 }
433
434 if (bf_next == NULL) {
435 /* INIT_LIST_HEAD */
436 list_create(&list, sizeof (struct ath_buf),
437 offsetof(struct ath_buf, bf_node));
438 } else {
439 ASSERT(!list_empty(bf_q));
440 list_remove(bf_q, bf);
441 list_insert_tail(&list, bf);
442 }
443
444 if (!txpending) {
445 /*
446 * complete the acked-ones/xretried ones; update
447 * block-ack window
448 */
449 mutex_enter(&txq->axq_lock);
450 arn_tx_update_baw(sc, tid, bf->bf_seqno);
451 mutex_exit(&txq->axq_lock);
452
453 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
454 ath_tx_rc_status(bf, ds, nbad, txok, B_TRUE);
455 rc_update = B_FALSE;
456 } else {
457 ath_tx_rc_status(bf, ds, nbad, txok, B_FALSE);
458 }
459
460 ath_tx_complete_buf(sc, bf, list, !txfail, sendbar);
461 } else {
462 /* retry the un-acked ones */
463 if (bf->bf_next == NULL &&
464 bf_last->bf_status & ATH_BUFSTATUS_STALE) {
465 struct ath_buf *tbf;
466
467 tbf = arn_clone_txbuf(sc, bf_last);
468 ath9k_hw_cleartxdesc(sc->sc_ah, tbf->bf_desc);
469 list_insert_tail(&list, tbf);
470 } else {
471 /*
472 * Clear descriptor status words for
473 * software retry
474 */
475 ath9k_hw_cleartxdesc(sc->sc_ah, bf->bf_desc);
476 }
477
478 /*
479 * Put this buffer to the temporary pending
480 * queue to retain ordering
481 */
482 list_splice_tail_init(&list, &list_pending);
483 /*
484 * Insert src list after dst list.
485 * Empty src list thereafter
486 */
487 list_move_tail(&list_pending, &list);
488 /* should re-initialize list here??? */
489 }
490
491 bf = bf_next;
492 }
493
494 if (tid->state & AGGR_CLEANUP) {
495 if (tid->baw_head == tid->baw_tail) {
496 tid->state &= ~AGGR_ADDBA_COMPLETE;
497 tid->addba_exchangeattempts = 0;
498 tid->state &= ~AGGR_CLEANUP;
499
500 /* send buffered frames as singles */
501 arn_tx_flush_tid(sc, tid);
502 }
503 return;
504 }
505
506 /*
507 * prepend un-acked frames to the beginning of
508 * the pending frame queue
509 */
510
511 if (!list_empty(&list_pending)) {
512 mutex_enter(&txq->axq_lock);
513 list_move_tail(&list_pending, &tid->buf_q);
514 arn_tx_queue_tid(txq, tid);
515 mutex_exit(&txq->axq_lock);
516 }
517 }
518
519 static uint32_t
arn_lookup_rate(struct arn_softc * sc,struct ath_buf * bf,struct ath_atx_tid * tid)520 arn_lookup_rate(struct arn_softc *sc, struct ath_buf *bf,
521 struct ath_atx_tid *tid)
522 {
523 struct ath_rate_table *rate_table = sc->sc_currates;
524 struct ath9k_tx_rate *rates;
525 struct ath_tx_info_priv *tx_info_priv;
526 uint32_t max_4ms_framelen, frmlen;
527 uint16_t aggr_limit, legacy = 0, maxampdu;
528 int i;
529
530 /* ??? */
531 rates = (struct ath9k_tx_rate *)bf->rates;
532 tx_info_priv = (struct ath_tx_info_priv *)&bf->tx_info_priv;
533
534 /*
535 * Find the lowest frame length among the rate series that will have a
536 * 4ms transmit duration.
537 * TODO - TXOP limit needs to be considered.
538 */
539 max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
540
541 for (i = 0; i < 4; i++) {
542 if (rates[i].count) {
543 if (!WLAN_RC_PHY_HT
544 (rate_table->info[rates[i].idx].phy)) {
545 legacy = 1;
546 break;
547 }
548
549 frmlen =
550 rate_table->info[rates[i].idx].max_4ms_framelen;
551 max_4ms_framelen = min(max_4ms_framelen, frmlen);
552 }
553 }
554
555 /*
556 * limit aggregate size by the minimum rate if rate selected is
557 * not a probe rate, if rate selected is a probe rate then
558 * avoid aggregation of this packet.
559 */
560 if (legacy)
561 return (0);
562
563 aggr_limit = min(max_4ms_framelen, (uint32_t)ATH_AMPDU_LIMIT_DEFAULT);
564
565 /*
566 * h/w can accept aggregates upto 16 bit lengths (65535).
567 * The IE, however can hold upto 65536, which shows up here
568 * as zero. Ignore 65536 since we are constrained by hw.
569 */
570 maxampdu = tid->an->maxampdu;
571 if (maxampdu)
572 aggr_limit = min(aggr_limit, maxampdu);
573
574 return (aggr_limit);
575 }
576
577 /*
578 * Returns the number of delimiters to be added to
579 * meet the minimum required mpdudensity.
580 * caller should make sure that the rate is HT rate .
581 */
582 static int
arn_compute_num_delims(struct arn_softc * sc,struct ath_atx_tid * tid,struct ath_buf * bf,uint16_t frmlen)583 arn_compute_num_delims(struct arn_softc *sc, struct ath_atx_tid *tid,
584 struct ath_buf *bf, uint16_t frmlen)
585 {
586 struct ath_rate_table *rt = sc->sc_currates;
587 struct ath9k_tx_rate *rates = (struct ath9k_tx_rate *)bf->rates;
588 uint32_t nsymbits, nsymbols, mpdudensity;
589 uint16_t minlen;
590 uint8_t rc, flags, rix;
591 int width, half_gi, ndelim, mindelim;
592
593 /* Select standard number of delimiters based on frame length alone */
594 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
595
596 /*
597 * If encryption enabled, hardware requires some more padding between
598 * subframes.
599 * TODO - this could be improved to be dependent on the rate.
600 * The hardware can keep up at lower rates, but not higher rates
601 */
602 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
603 ndelim += ATH_AGGR_ENCRYPTDELIM;
604
605 /*
606 * Convert desired mpdu density from microeconds to bytes based
607 * on highest rate in rate series (i.e. first rate) to determine
608 * required minimum length for subframe. Take into account
609 * whether high rate is 20 or 40Mhz and half or full GI.
610 */
611 mpdudensity = tid->an->mpdudensity;
612
613 /*
614 * If there is no mpdu density restriction, no further calculation
615 * is needed.
616 */
617 if (mpdudensity == 0)
618 return (ndelim);
619
620 rix = rates[0].idx;
621 flags = rates[0].flags;
622 rc = rt->info[rix].ratecode;
623 width = (flags & ATH9K_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
624 half_gi = (flags & ATH9K_TX_RC_SHORT_GI) ? 1 : 0;
625
626 if (half_gi)
627 nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
628 else
629 nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
630
631 if (nsymbols == 0)
632 nsymbols = 1;
633
634 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
635 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
636
637 if (frmlen < minlen) {
638 mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
639 ndelim = max(mindelim, ndelim);
640 }
641
642 return (ndelim);
643 }
644
645 static enum ATH_AGGR_STATUS
arn_tx_form_aggr(struct arn_softc * sc,struct ath_atx_tid * tid,list_t * bf_q)646 arn_tx_form_aggr(struct arn_softc *sc, struct ath_atx_tid *tid,
647 list_t *bf_q)
648 {
649 #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
650 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
651 int rl = 0, nframes = 0, ndelim, prev_al = 0;
652 uint16_t aggr_limit = 0, al = 0, bpad = 0,
653 al_delta, h_baw = tid->baw_size / 2;
654 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
655
656 bf_first = list_head(&tid->buf_q);
657
658 do {
659 bf = list_head(&tid->buf_q);
660
661 /* do not step over block-ack window */
662 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
663 status = ATH_AGGR_BAW_CLOSED;
664 break;
665 }
666
667 if (!rl) {
668 aggr_limit = arn_lookup_rate(sc, bf, tid);
669 rl = 1;
670 }
671
672 /* do not exceed aggregation limit */
673 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
674
675 if (nframes &&
676 (aggr_limit < (al + bpad + al_delta + prev_al))) {
677 status = ATH_AGGR_LIMITED;
678 break;
679 }
680
681 /* do not exceed subframe limit */
682 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
683 status = ATH_AGGR_LIMITED;
684 break;
685 }
686 nframes++;
687
688 /* add padding for previous frame to aggregation length */
689 al += bpad + al_delta;
690
691 /*
692 * Get the delimiters needed to meet the MPDU
693 * density for this node.
694 */
695 ndelim =
696 arn_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
697 bpad = PADBYTES(al_delta) + (ndelim << 2);
698
699 bf->bf_next = NULL;
700 bf->bf_desc->ds_link = 0;
701
702 /* link buffers of this frame to the aggregate */
703 arn_tx_addto_baw(sc, tid, bf);
704 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
705 list_remove(&tid->buf_q, bf);
706 list_insert_tail(bf_q, bf);
707 if (bf_prev) {
708 bf_prev->bf_next = bf;
709 bf_prev->bf_desc->ds_link = bf->bf_daddr;
710 }
711 bf_prev = bf;
712 } while (!list_empty(&tid->buf_q));
713
714 bf_first->bf_al = al;
715 bf_first->bf_nframes = nframes;
716
717 return (status);
718 #undef PADBYTES
719 }
720
721 static void
arn_tx_sched_aggr(struct arn_softc * sc,struct ath_txq * txq,struct ath_atx_tid * tid)722 arn_tx_sched_aggr(struct arn_softc *sc, struct ath_txq *txq,
723 struct ath_atx_tid *tid)
724 {
725 struct ath_buf *bf;
726 enum ATH_AGGR_STATUS status;
727 list_t bf_q;
728
729 do {
730 if (list_empty(&tid->buf_q))
731 return;
732
733 /* INIT_LIST_HEAD */
734 list_create(&bf_q, sizeof (struct ath_buf),
735 offsetof(struct ath_buf, bf_node));
736
737 status = arn_tx_form_aggr(sc, tid, &bf_q);
738
739 /*
740 * no frames picked up to be aggregated;
741 * block-ack window is not open.
742 */
743 if (list_empty(&bf_q))
744 break;
745
746 bf = list_head(&bf_q);
747 bf->bf_lastbf = list_object(&bf_q, bf->bf_node.list_prev);
748
749 /* if only one frame, send as non-aggregate */
750 if (bf->bf_nframes == 1) {
751 bf->bf_state.bf_type &= ~BUF_AGGR;
752 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
753 ath_buf_set_rate(sc, bf);
754 arn_tx_txqaddbuf(sc, txq, &bf_q);
755 continue;
756 }
757
758 /* setup first desc of aggregate */
759 bf->bf_state.bf_type |= BUF_AGGR;
760 ath_buf_set_rate(sc, bf);
761 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
762
763 /* anchor last desc of aggregate */
764 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
765
766 txq->axq_aggr_depth++;
767 arn_tx_txqaddbuf(sc, txq, &bf_q);
768
769 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
770 status != ATH_AGGR_BAW_CLOSED);
771 }
772
773 int
arn_tx_aggr_start(struct arn_softc * sc,struct ieee80211_node * in,uint16_t tid,uint16_t * ssn)774 arn_tx_aggr_start(struct arn_softc *sc, struct ieee80211_node *in,
775 uint16_t tid, uint16_t *ssn)
776 {
777 struct ath_atx_tid *txtid;
778 struct ath_node *an;
779
780 an = ATH_NODE(in);
781
782 if (sc->sc_flags & SC_OP_TXAGGR) {
783 txtid = ATH_AN_2_TID(an, tid);
784 txtid->state |= AGGR_ADDBA_PROGRESS;
785 arn_tx_pause_tid(sc, txtid);
786 *ssn = txtid->seq_start;
787 }
788
789 return (0);
790 }
791
792 int
arn_tx_aggr_stop(struct arn_softc * sc,struct ieee80211_node * in,uint16_t tid)793 arn_tx_aggr_stop(struct arn_softc *sc, struct ieee80211_node *in, uint16_t tid)
794 {
795 struct ath_node *an = ATH_NODE(in);
796 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
797 struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum];
798 struct ath_buf *bf;
799
800 list_t list;
801 list_create(&list, sizeof (struct ath_buf),
802 offsetof(struct ath_buf, bf_node));
803
804 if (txtid->state & AGGR_CLEANUP)
805 return (0);
806
807 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
808 txtid->addba_exchangeattempts = 0;
809 return (0);
810 }
811
812 arn_tx_pause_tid(sc, txtid);
813
814 /* drop all software retried frames and mark this TID */
815 mutex_enter(&txq->axq_lock);
816 while (!list_empty(&txtid->buf_q)) {
817 /* list_first_entry */
818 bf = list_head(&txtid->buf_q);
819 if (!bf_isretried(bf)) {
820 /*
821 * NB: it's based on the assumption that
822 * software retried frame will always stay
823 * at the head of software queue.
824 */
825 break;
826 }
827 list_remove(&txtid->buf_q, bf);
828 list_insert_tail(&list, bf);
829 arn_tx_update_baw(sc, txtid, bf->bf_seqno);
830 // ath_tx_complete_buf(sc, bf, &list, 0, 0); /* to do */
831 }
832 mutex_exit(&txq->axq_lock);
833
834 if (txtid->baw_head != txtid->baw_tail) {
835 txtid->state |= AGGR_CLEANUP;
836 } else {
837 txtid->state &= ~AGGR_ADDBA_COMPLETE;
838 txtid->addba_exchangeattempts = 0;
839 arn_tx_flush_tid(sc, txtid);
840 }
841
842 return (0);
843 }
844
845 void
arn_tx_aggr_resume(struct arn_softc * sc,struct ieee80211_node * in,uint16_t tid)846 arn_tx_aggr_resume(struct arn_softc *sc,
847 struct ieee80211_node *in,
848 uint16_t tid)
849 {
850 struct ath_atx_tid *txtid;
851 struct ath_node *an;
852
853 an = ATH_NODE(in);
854
855 if (sc->sc_flags & SC_OP_TXAGGR) {
856 txtid = ATH_AN_2_TID(an, tid);
857 txtid->baw_size = (0x8) << sc->sc_ht_conf.ampdu_factor;
858 txtid->state |= AGGR_ADDBA_COMPLETE;
859 txtid->state &= ~AGGR_ADDBA_PROGRESS;
860 arn_tx_resume_tid(sc, txtid);
861 }
862 }
863
864 boolean_t
arn_tx_aggr_check(struct arn_softc * sc,struct ath_node * an,uint8_t tidno)865 arn_tx_aggr_check(struct arn_softc *sc, struct ath_node *an, uint8_t tidno)
866 {
867 struct ath_atx_tid *txtid;
868
869 if (!(sc->sc_flags & SC_OP_TXAGGR))
870 return (B_FALSE);
871
872 txtid = ATH_AN_2_TID(an, tidno);
873
874 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
875 if (!(txtid->state & AGGR_ADDBA_PROGRESS) &&
876 (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
877 txtid->addba_exchangeattempts++;
878 return (B_TRUE);
879 }
880 }
881
882 return (B_FALSE);
883 }
884
885 /* Queue Management */
886
887 static void
arn_txq_drain_pending_buffers(struct arn_softc * sc,struct ath_txq * txq)888 arn_txq_drain_pending_buffers(struct arn_softc *sc, struct ath_txq *txq)
889 {
890 struct ath_atx_ac *ac, *ac_tmp;
891 struct ath_atx_tid *tid, *tid_tmp;
892
893 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq) {
894 list_remove(&txq->axq_acq, ac);
895 ac->sched = B_FALSE;
896 list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q) {
897 list_remove(&ac->tid_q, tid);
898 tid->sched = B_FALSE;
899 arn_tid_drain(sc, txq, tid);
900 }
901 }
902 }
903
904 int
arn_tx_get_qnum(struct arn_softc * sc,int qtype,int haltype)905 arn_tx_get_qnum(struct arn_softc *sc, int qtype, int haltype)
906 {
907 int qnum;
908
909 switch (qtype) {
910 case ATH9K_TX_QUEUE_DATA:
911 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
912 ARN_DBG((ARN_DBG_FATAL, "arn: arn_tx_get_qnum(): "
913 "HAL AC %u out of range, max %zu!\n",
914 haltype, ARRAY_SIZE(sc->sc_haltype2q)));
915 return (-1);
916 }
917 qnum = sc->sc_haltype2q[haltype];
918 break;
919 case ATH9K_TX_QUEUE_BEACON:
920 qnum = sc->sc_beaconq;
921 break;
922 case ATH9K_TX_QUEUE_CAB:
923 qnum = sc->sc_cabq->axq_qnum;
924 break;
925 default:
926 qnum = -1;
927 }
928 return (qnum);
929 }
930
931 struct ath_txq *
arn_test_get_txq(struct arn_softc * sc,struct ieee80211_node * in,struct ieee80211_frame * wh,uint8_t type)932 arn_test_get_txq(struct arn_softc *sc, struct ieee80211_node *in,
933 struct ieee80211_frame *wh, uint8_t type)
934 {
935 struct ieee80211_qosframe *qwh = NULL;
936 struct ath_txq *txq = NULL;
937 int tid = -1;
938 int qos_ac;
939 int qnum;
940
941 if (in->in_flags & IEEE80211_NODE_QOS) {
942
943 if ((type & IEEE80211_FC0_TYPE_MASK) ==
944 IEEE80211_FC0_TYPE_DATA) {
945
946 if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
947 qwh = (struct ieee80211_qosframe *)wh;
948
949 tid = qwh->i_qos[0] & IEEE80211_QOS_TID;
950 switch (tid) {
951 case 1:
952 case 2:
953 qos_ac = WME_AC_BK;
954 case 0:
955 case 3:
956 qos_ac = WME_AC_BE;
957 case 4:
958 case 5:
959 qos_ac = WME_AC_VI;
960 case 6:
961 case 7:
962 qos_ac = WME_AC_VO;
963 }
964 }
965 } else {
966 qos_ac = WME_AC_VO;
967 }
968 } else if ((type & IEEE80211_FC0_TYPE_MASK) ==
969 IEEE80211_FC0_TYPE_MGT) {
970 qos_ac = WME_AC_VO;
971 } else if ((type & IEEE80211_FC0_TYPE_MASK) ==
972 IEEE80211_FC0_TYPE_CTL) {
973 qos_ac = WME_AC_VO;
974 } else {
975 qos_ac = WME_AC_BK;
976 }
977 qnum = arn_get_hal_qnum(qos_ac, sc);
978 txq = &sc->sc_txq[qnum];
979
980 mutex_enter(&txq->axq_lock);
981
982 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
983 ARN_DBG((ARN_DBG_XMIT,
984 "TX queue: %d is full, depth: %d\n",
985 qnum, txq->axq_depth));
986 /* stop th queue */
987 sc->sc_resched_needed = B_TRUE;
988 txq->stopped = 1;
989 mutex_exit(&txq->axq_lock);
990 return (NULL);
991 }
992
993 mutex_exit(&txq->axq_lock);
994
995 return (txq);
996 }
997
998 /* Called only when tx aggregation is enabled and HT is supported */
999 static void
assign_aggr_tid_seqno(struct arn_softc * sc,struct ath_buf * bf,struct ieee80211_frame * wh)1000 assign_aggr_tid_seqno(struct arn_softc *sc,
1001 struct ath_buf *bf,
1002 struct ieee80211_frame *wh)
1003 {
1004 struct ath_node *an;
1005 struct ath_atx_tid *tid;
1006 struct ieee80211_node *in;
1007 struct ieee80211_qosframe *qwh = NULL;
1008 ieee80211com_t *ic = (ieee80211com_t *)sc;
1009
1010 in = ieee80211_find_txnode(ic, wh->i_addr1);
1011 if (in == NULL) {
1012 arn_problem("assign_aggr_tid_seqno():"
1013 "failed to find tx node\n");
1014 return;
1015 }
1016 an = ATH_NODE(in);
1017
1018 /* Get tidno */
1019 if (in->in_flags & IEEE80211_NODE_QOS) {
1020 if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
1021 qwh = (struct ieee80211_qosframe *)wh;
1022 bf->bf_tidno = qwh->i_qos[0] & IEEE80211_QOS_TID;
1023 }
1024 }
1025
1026 /* Get seqno */
1027 /*
1028 * For HT capable stations, we save tidno for later use.
1029 * We also override seqno set by upper layer with the one
1030 * in tx aggregation state.
1031 *
1032 * If fragmentation is on, the sequence number is
1033 * not overridden, since it has been
1034 * incremented by the fragmentation routine.
1035 *
1036 * FIXME: check if the fragmentation threshold exceeds
1037 * IEEE80211 max.
1038 */
1039 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1040
1041 *(uint16_t *)&wh->i_seq[0] =
1042 LE_16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1043 bf->bf_seqno = tid->seq_next;
1044 /* LINTED E_CONSTANT_CONDITION */
1045 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1046
1047 /* release node */
1048 ieee80211_free_node(in);
1049 }
1050
1051 /* Compute the number of bad frames */
1052 /* ARGSUSED */
1053 static int
arn_tx_num_badfrms(struct arn_softc * sc,struct ath_buf * bf,int txok)1054 arn_tx_num_badfrms(struct arn_softc *sc, struct ath_buf *bf, int txok)
1055 {
1056 struct ath_buf *bf_last = bf->bf_lastbf;
1057 struct ath_desc *ds = bf_last->bf_desc;
1058 uint16_t seq_st = 0;
1059 uint32_t ba[WME_BA_BMP_SIZE >> 5];
1060 int ba_index;
1061 int nbad = 0;
1062 int isaggr = 0;
1063
1064 if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
1065 return (0);
1066
1067 isaggr = bf_isaggr(bf);
1068 if (isaggr) {
1069 seq_st = ATH_DS_BA_SEQ(ds);
1070 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
1071 }
1072
1073 while (bf) {
1074 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
1075 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1076 nbad++;
1077
1078 bf = bf->bf_next;
1079 }
1080
1081 return (nbad);
1082 }
1083
1084 static void
arn_tx_send_ht_normal(struct arn_softc * sc,struct ath_txq * txq,struct ath_atx_tid * tid,list_t * list)1085 arn_tx_send_ht_normal(struct arn_softc *sc,
1086 struct ath_txq *txq,
1087 struct ath_atx_tid *tid,
1088 list_t *list)
1089 {
1090 struct ath_buf *bf;
1091
1092 bf = list_head(list);
1093 bf->bf_state.bf_type &= ~BUF_AMPDU;
1094
1095 /* update starting sequence number for subsequent ADDBA request */
1096 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1097
1098 bf->bf_nframes = 1;
1099 bf->bf_lastbf = bf;
1100 ath_buf_set_rate(sc, bf);
1101 arn_tx_txqaddbuf(sc, txq, list);
1102 }
1103
1104 /*
1105 * Insert a chain of ath_buf (descriptors) on a txq and
1106 * assume the descriptors are already chained together by caller.
1107 */
1108 static void
arn_tx_txqaddbuf(struct arn_softc * sc,struct ath_txq * txq,list_t * list)1109 arn_tx_txqaddbuf(struct arn_softc *sc,
1110 struct ath_txq *txq,
1111 list_t *list)
1112 {
1113 struct ath_buf *bf;
1114
1115 /*
1116 * Insert the frame on the outbound list and
1117 * pass it on to the hardware.
1118 */
1119
1120 if (list_empty(list))
1121 return;
1122
1123 bf = list_head(list);
1124
1125 list_splice_tail_init(list, &txq->axq_q);
1126
1127 txq->axq_depth++;
1128 txq->axq_totalqueued++;
1129 txq->axq_linkbuf = list_object(list, txq->axq_q.prev);
1130
1131 ARN_DBG((ARN_DBG_QUEUE,
1132 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth));
1133
1134 if (txq->axq_link == NULL) {
1135 ath9k_hw_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
1136 ARN_DBG((ARN_DBG_XMIT,
1137 "TXDP[%u] = %llx (%p)\n",
1138 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc));
1139 } else {
1140 *txq->axq_link = bf->bf_daddr;
1141 ARN_DBG((ARN_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
1142 txq->axq_qnum, txq->axq_link,
1143 ito64(bf->bf_daddr), bf->bf_desc));
1144 }
1145 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
1146 ath9k_hw_txstart(sc->sc_ah, txq->axq_qnum);
1147 }
1148 #endif /* ARN_TX_AGGREGATION */
1149
1150 /*
1151 * ath_pkt_dur - compute packet duration (NB: not NAV)
1152 * rix - rate index
1153 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1154 * width - 0 for 20 MHz, 1 for 40 MHz
1155 * half_gi - to use 4us v/s 3.6 us for symbol time
1156 */
1157
1158 static uint32_t
1159 /* LINTED E_STATIC_UNUSED */
arn_pkt_duration(struct arn_softc * sc,uint8_t rix,struct ath_buf * bf,int width,int half_gi,boolean_t shortPreamble)1160 arn_pkt_duration(struct arn_softc *sc, uint8_t rix, struct ath_buf *bf,
1161 int width, int half_gi, boolean_t shortPreamble)
1162 {
1163 struct ath_rate_table *rate_table = sc->sc_currates;
1164 uint32_t nbits, nsymbits, duration, nsymbols;
1165 uint8_t rc;
1166 int streams, pktlen;
1167
1168 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
1169 rc = rate_table->info[rix].ratecode;
1170
1171 /* for legacy rates, use old function to compute packet duration */
1172 if (!IS_HT_RATE(rc))
1173 return (ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen,
1174 rix, shortPreamble));
1175
1176 /* find number of symbols: PLCP + data */
1177 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
1178 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1179 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1180
1181 if (!half_gi)
1182 duration = SYMBOL_TIME(nsymbols);
1183 else
1184 duration = SYMBOL_TIME_HALFGI(nsymbols);
1185
1186 /* addup duration for legacy/ht training and signal fields */
1187 streams = HT_RC_2_STREAMS(rc);
1188 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1189
1190 return (duration);
1191 }
1192
1193 static struct ath_buf *
arn_tx_get_buffer(struct arn_softc * sc)1194 arn_tx_get_buffer(struct arn_softc *sc)
1195 {
1196 struct ath_buf *bf = NULL;
1197
1198 mutex_enter(&sc->sc_txbuflock);
1199 bf = list_head(&sc->sc_txbuf_list);
1200 /* Check if a tx buffer is available */
1201 if (bf != NULL)
1202 list_remove(&sc->sc_txbuf_list, bf);
1203 if (list_empty(&sc->sc_txbuf_list)) {
1204 ARN_DBG((ARN_DBG_XMIT, "arn: arn_tx(): "
1205 "stop queue\n"));
1206 sc->sc_stats.ast_tx_qstop++;
1207 }
1208 mutex_exit(&sc->sc_txbuflock);
1209
1210 return (bf);
1211 }
1212
1213 static uint32_t
setup_tx_flags(struct arn_softc * sc,struct ieee80211_frame * wh,uint32_t pktlen)1214 setup_tx_flags(struct arn_softc *sc,
1215 struct ieee80211_frame *wh,
1216 uint32_t pktlen)
1217 {
1218 int flags = 0;
1219 ieee80211com_t *ic = (ieee80211com_t *)sc;
1220
1221 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1222 flags |= ATH9K_TXDESC_INTREQ;
1223
1224 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1225 flags |= ATH9K_TXDESC_NOACK; /* no ack on broad/multicast */
1226 sc->sc_stats.ast_tx_noack++;
1227 }
1228 if (pktlen > ic->ic_rtsthreshold) {
1229 flags |= ATH9K_TXDESC_RTSENA; /* RTS based on frame length */
1230 sc->sc_stats.ast_tx_rts++;
1231 }
1232
1233 return (flags);
1234 }
1235
1236 static void
ath_tx_setup_buffer(struct arn_softc * sc,struct ath_buf * bf,struct ieee80211_node * in,struct ieee80211_frame * wh,uint32_t pktlen,uint32_t keytype)1237 ath_tx_setup_buffer(struct arn_softc *sc, struct ath_buf *bf,
1238 struct ieee80211_node *in, struct ieee80211_frame *wh,
1239 uint32_t pktlen, uint32_t keytype)
1240 {
1241 ieee80211com_t *ic = (ieee80211com_t *)sc;
1242 int i;
1243
1244 /* Buf reset */
1245 ATH_TXBUF_RESET(bf);
1246 for (i = 0; i < 4; i++) {
1247 bf->rates[i].idx = -1;
1248 bf->rates[i].flags = 0;
1249 bf->rates[i].count = 1;
1250 }
1251
1252 bf->bf_in = in;
1253 /* LINTED E_ASSIGN_NARROW_CONV */
1254 bf->bf_frmlen = pktlen;
1255
1256 /* Frame type */
1257 IEEE80211_IS_DATA(wh) ?
1258 (bf->bf_state.bf_type |= BUF_DATA) :
1259 (bf->bf_state.bf_type &= ~BUF_DATA);
1260 IEEE80211_IS_BACK_REQ(wh) ?
1261 (bf->bf_state.bf_type |= BUF_BAR) :
1262 (bf->bf_state.bf_type &= ~BUF_BAR);
1263 IEEE80211_IS_PSPOLL(wh) ?
1264 (bf->bf_state.bf_type |= BUF_PSPOLL) :
1265 (bf->bf_state.bf_type &= ~BUF_PSPOLL);
1266 /*
1267 * The 802.11 layer marks whether or not we should
1268 * use short preamble based on the current mode and
1269 * negotiated parameters.
1270 */
1271 ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
1272 (in->in_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) ?
1273 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
1274 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
1275
1276 bf->bf_flags = setup_tx_flags(sc, wh, pktlen);
1277
1278 /* Crypto */
1279 bf->bf_keytype = keytype;
1280
1281 /* Assign seqno, tidno for tx aggrefation */
1282
1283 #ifdef ARN_TX_AGGREGATION
1284 if (ieee80211_is_data_qos(wh) && (sc->sc_flags & SC_OP_TXAGGR))
1285 assign_aggr_tid_seqno(sc, bf, wh);
1286 #endif /* ARN_TX_AGGREGATION */
1287
1288 }
1289
1290 /*
1291 * ath_pkt_dur - compute packet duration (NB: not NAV)
1292 *
1293 * rix - rate index
1294 * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1295 * width - 0 for 20 MHz, 1 for 40 MHz
1296 * half_gi - to use 4us v/s 3.6 us for symbol time
1297 */
1298 static uint32_t
ath_pkt_duration(struct arn_softc * sc,uint8_t rix,struct ath_buf * bf,int width,int half_gi,boolean_t shortPreamble)1299 ath_pkt_duration(struct arn_softc *sc, uint8_t rix, struct ath_buf *bf,
1300 int width, int half_gi, boolean_t shortPreamble)
1301 {
1302 struct ath_rate_table *rate_table = sc->sc_currates;
1303 uint32_t nbits, nsymbits, duration, nsymbols;
1304 uint8_t rc;
1305 int streams, pktlen;
1306
1307 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
1308 rc = rate_table->info[rix].ratecode;
1309
1310 /* for legacy rates, use old function to compute packet duration */
1311 if (!IS_HT_RATE(rc))
1312 return (ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen,
1313 rix, shortPreamble));
1314
1315 /* find number of symbols: PLCP + data */
1316 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
1317 nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1318 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1319
1320 if (!half_gi)
1321 duration = SYMBOL_TIME(nsymbols);
1322 else
1323 duration = SYMBOL_TIME_HALFGI(nsymbols);
1324
1325 /* addup duration for legacy/ht training and signal fields */
1326 streams = HT_RC_2_STREAMS(rc);
1327 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1328
1329 return (duration);
1330 }
1331
1332 /* Rate module function to set rate related fields in tx descriptor */
1333 static void
ath_buf_set_rate(struct arn_softc * sc,struct ath_buf * bf,struct ieee80211_frame * wh)1334 ath_buf_set_rate(struct arn_softc *sc,
1335 struct ath_buf *bf,
1336 struct ieee80211_frame *wh)
1337 {
1338 struct ath_hal *ah = sc->sc_ah;
1339 struct ath_rate_table *rt;
1340 struct ath_desc *ds = bf->bf_desc;
1341 struct ath_desc *lastds = bf->bf_desc; /* temp workground */
1342 struct ath9k_11n_rate_series series[4];
1343 struct ath9k_tx_rate *rates;
1344 int i, flags, rtsctsena = 0;
1345 uint32_t ctsduration = 0;
1346 uint8_t rix = 0, cix, ctsrate = 0;
1347
1348 (void) memset(series, 0, sizeof (struct ath9k_11n_rate_series) * 4);
1349
1350 rates = bf->rates;
1351
1352 if (IEEE80211_HAS_MOREFRAGS(wh) ||
1353 wh->i_seq[0] & IEEE80211_SEQ_FRAG_MASK) {
1354 rates[1].count = rates[2].count = rates[3].count = 0;
1355 rates[1].idx = rates[2].idx = rates[3].idx = 0;
1356 rates[0].count = ATH_TXMAXTRY;
1357 }
1358
1359 /* get the cix for the lowest valid rix */
1360 rt = sc->sc_currates;
1361 for (i = 3; i >= 0; i--) {
1362 if (rates[i].count && (rates[i].idx >= 0)) {
1363 rix = rates[i].idx;
1364 break;
1365 }
1366 }
1367
1368 flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
1369 cix = rt->info[rix].ctrl_rate;
1370
1371 /*
1372 * If 802.11g protection is enabled, determine whether to use RTS/CTS or
1373 * just CTS. Note that this is only done for OFDM/HT unicast frames.
1374 */
1375 if (sc->sc_protmode != PROT_M_NONE &&
1376 !(bf->bf_flags & ATH9K_TXDESC_NOACK) &&
1377 (rt->info[rix].phy == WLAN_RC_PHY_OFDM ||
1378 WLAN_RC_PHY_HT(rt->info[rix].phy))) {
1379 if (sc->sc_protmode == PROT_M_RTSCTS)
1380 flags = ATH9K_TXDESC_RTSENA;
1381 else if (sc->sc_protmode == PROT_M_CTSONLY)
1382 flags = ATH9K_TXDESC_CTSENA;
1383
1384 cix = rt->info[sc->sc_protrix].ctrl_rate;
1385 rtsctsena = 1;
1386 }
1387
1388 /*
1389 * For 11n, the default behavior is to enable RTS for hw retried frames.
1390 * We enable the global flag here and let rate series flags determine
1391 * which rates will actually use RTS.
1392 */
1393 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
1394 /* 802.11g protection not needed, use our default behavior */
1395 if (!rtsctsena)
1396 flags = ATH9K_TXDESC_RTSENA;
1397 }
1398
1399 /* Set protection if aggregate protection on */
1400 if (sc->sc_config.ath_aggr_prot &&
1401 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
1402 flags = ATH9K_TXDESC_RTSENA;
1403 cix = rt->info[sc->sc_protrix].ctrl_rate;
1404 rtsctsena = 1;
1405 }
1406
1407 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1408 if (bf_isaggr(bf) && (bf->bf_al > ah->ah_caps.rts_aggr_limit))
1409 flags &= ~(ATH9K_TXDESC_RTSENA);
1410
1411 /*
1412 * CTS transmit rate is derived from the transmit rate by looking in the
1413 * h/w rate table. We must also factor in whether or not a short
1414 * preamble is to be used. NB: cix is set above where RTS/CTS is enabled
1415 */
1416 ctsrate = rt->info[cix].ratecode |
1417 (bf_isshpreamble(bf) ? rt->info[cix].short_preamble : 0);
1418
1419 for (i = 0; i < 4; i++) {
1420 if (!rates[i].count || (rates[i].idx < 0))
1421 continue;
1422
1423 rix = rates[i].idx;
1424
1425 series[i].Rate = rt->info[rix].ratecode |
1426 (bf_isshpreamble(bf) ?
1427 rt->info[rix].short_preamble : 0);
1428
1429 series[i].Tries = rates[i].count;
1430
1431 series[i].RateFlags =
1432 ((rates[i].flags & ATH9K_TX_RC_USE_RTS_CTS) ?
1433 ATH9K_RATESERIES_RTS_CTS : 0) |
1434 ((rates[i].flags & ATH9K_TX_RC_40_MHZ_WIDTH) ?
1435 ATH9K_RATESERIES_2040 : 0) |
1436 ((rates[i].flags & ATH9K_TX_RC_SHORT_GI) ?
1437 ATH9K_RATESERIES_HALFGI : 0);
1438
1439 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1440 (rates[i].flags & ATH9K_TX_RC_40_MHZ_WIDTH) != 0,
1441 (rates[i].flags & ATH9K_TX_RC_SHORT_GI),
1442 bf_isshpreamble(bf));
1443
1444 series[i].ChSel = sc->sc_tx_chainmask;
1445
1446 if (rtsctsena)
1447 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1448
1449 ARN_DBG((ARN_DBG_RATE,
1450 "series[%d]--flags & ATH9K_TX_RC_USE_RTS_CTS = %08x"
1451 "--flags & ATH9K_TX_RC_40_MHZ_WIDTH = %08x"
1452 "--flags & ATH9K_TX_RC_SHORT_GI = %08x\n",
1453 rates[i].flags & ATH9K_TX_RC_USE_RTS_CTS,
1454 rates[i].flags & ATH9K_TX_RC_40_MHZ_WIDTH,
1455 rates[i].flags & ATH9K_TX_RC_SHORT_GI));
1456
1457 ARN_DBG((ARN_DBG_RATE,
1458 "series[%d]:"
1459 "dot11rate:%d"
1460 "index:%d"
1461 "retry count:%d\n",
1462 i,
1463 (rt->info[rates[i].idx].ratekbps)/1000,
1464 rates[i].idx,
1465 rates[i].count));
1466 }
1467
1468 /* set dur_update_en for l-sig computation except for PS-Poll frames */
1469 ath9k_hw_set11n_ratescenario(ah, ds, lastds, !bf_ispspoll(bf),
1470 ctsrate, ctsduration,
1471 series, 4, flags);
1472
1473 if (sc->sc_config.ath_aggr_prot && flags)
1474 ath9k_hw_set11n_burstduration(ah, ds, 8192);
1475 }
1476
1477 static void
ath_tx_complete(struct arn_softc * sc,struct ath_buf * bf,struct ath_xmit_status * tx_status)1478 ath_tx_complete(struct arn_softc *sc, struct ath_buf *bf,
1479 struct ath_xmit_status *tx_status)
1480 {
1481 boolean_t is_data = bf_isdata(bf);
1482
1483 ARN_DBG((ARN_DBG_XMIT, "TX complete\n"));
1484
1485 if (tx_status->flags & ATH_TX_BAR)
1486 tx_status->flags &= ~ATH_TX_BAR;
1487
1488 bf->rates[0].count = tx_status->retries + 1;
1489
1490 arn_tx_status(sc, bf, is_data);
1491 }
1492
1493 /* To complete a chain of buffers associated a frame */
1494 static void
ath_tx_complete_buf(struct arn_softc * sc,struct ath_buf * bf,int txok,int sendbar)1495 ath_tx_complete_buf(struct arn_softc *sc, struct ath_buf *bf,
1496 int txok, int sendbar)
1497 {
1498 struct ath_xmit_status tx_status;
1499
1500 /*
1501 * Set retry information.
1502 * NB: Don't use the information in the descriptor, because the frame
1503 * could be software retried.
1504 */
1505 tx_status.retries = bf->bf_retries;
1506 tx_status.flags = 0;
1507
1508 if (sendbar)
1509 tx_status.flags = ATH_TX_BAR;
1510
1511 if (!txok) {
1512 tx_status.flags |= ATH_TX_ERROR;
1513
1514 if (bf_isxretried(bf))
1515 tx_status.flags |= ATH_TX_XRETRY;
1516 }
1517
1518 /* complete this frame */
1519 ath_tx_complete(sc, bf, &tx_status);
1520
1521 /*
1522 * Return the list of ath_buf of this mpdu to free queue
1523 */
1524 }
1525
1526 static void
arn_tx_stopdma(struct arn_softc * sc,struct ath_txq * txq)1527 arn_tx_stopdma(struct arn_softc *sc, struct ath_txq *txq)
1528 {
1529 struct ath_hal *ah = sc->sc_ah;
1530
1531 (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1532
1533 ARN_DBG((ARN_DBG_XMIT, "arn: arn_drain_txdataq(): "
1534 "tx queue [%u] %x, link %p\n",
1535 txq->axq_qnum,
1536 ath9k_hw_gettxbuf(ah, txq->axq_qnum), txq->axq_link));
1537
1538 }
1539
1540 /* Drain only the data queues */
1541 /* ARGSUSED */
1542 static void
arn_drain_txdataq(struct arn_softc * sc,boolean_t retry_tx)1543 arn_drain_txdataq(struct arn_softc *sc, boolean_t retry_tx)
1544 {
1545 struct ath_hal *ah = sc->sc_ah;
1546 int i, status, npend = 0;
1547
1548 if (!(sc->sc_flags & SC_OP_INVALID)) {
1549 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1550 if (ARN_TXQ_SETUP(sc, i)) {
1551 arn_tx_stopdma(sc, &sc->sc_txq[i]);
1552 /*
1553 * The TxDMA may not really be stopped.
1554 * Double check the hal tx pending count
1555 */
1556 npend += ath9k_hw_numtxpending(ah,
1557 sc->sc_txq[i].axq_qnum);
1558 }
1559 }
1560 }
1561
1562 if (npend) {
1563 /* TxDMA not stopped, reset the hal */
1564 ARN_DBG((ARN_DBG_XMIT, "arn: arn_drain_txdataq(): "
1565 "Unable to stop TxDMA. Reset HAL!\n"));
1566
1567 if (!ath9k_hw_reset(ah,
1568 sc->sc_ah->ah_curchan,
1569 sc->tx_chan_width,
1570 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1571 sc->sc_ht_extprotspacing, B_TRUE, &status)) {
1572 ARN_DBG((ARN_DBG_FATAL, "arn: arn_drain_txdataq(): "
1573 "unable to reset hardware; hal status %u\n",
1574 status));
1575 }
1576 }
1577
1578 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1579 if (ARN_TXQ_SETUP(sc, i))
1580 arn_tx_draintxq(sc, &sc->sc_txq[i]);
1581 }
1582 }
1583
1584 /* Setup a h/w transmit queue */
1585 struct ath_txq *
arn_txq_setup(struct arn_softc * sc,int qtype,int subtype)1586 arn_txq_setup(struct arn_softc *sc, int qtype, int subtype)
1587 {
1588 struct ath_hal *ah = sc->sc_ah;
1589 struct ath9k_tx_queue_info qi;
1590 int qnum;
1591
1592 (void) memset(&qi, 0, sizeof (qi));
1593 qi.tqi_subtype = subtype;
1594 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1595 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1596 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1597 qi.tqi_physCompBuf = 0;
1598
1599 /*
1600 * Enable interrupts only for EOL and DESC conditions.
1601 * We mark tx descriptors to receive a DESC interrupt
1602 * when a tx queue gets deep; otherwise waiting for the
1603 * EOL to reap descriptors. Note that this is done to
1604 * reduce interrupt load and this only defers reaping
1605 * descriptors, never transmitting frames. Aside from
1606 * reducing interrupts this also permits more concurrency.
1607 * The only potential downside is if the tx queue backs
1608 * up in which case the top half of the kernel may backup
1609 * due to a lack of tx descriptors.
1610 *
1611 * The UAPSD queue is an exception, since we take a desc-
1612 * based intr on the EOSP frames.
1613 */
1614 if (qtype == ATH9K_TX_QUEUE_UAPSD)
1615 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1616 else
1617 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1618 TXQ_FLAG_TXDESCINT_ENABLE;
1619 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1620 if (qnum == -1) {
1621 /*
1622 * NB: don't print a message, this happens
1623 * normally on parts with too few tx queues
1624 */
1625 return (NULL);
1626 }
1627 if (qnum >= ARRAY_SIZE(sc->sc_txq)) {
1628 ARN_DBG((ARN_DBG_FATAL, "arn: arn_txq_setup(): "
1629 "hal qnum %u out of range, max %u!\n",
1630 qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq)));
1631 (void) ath9k_hw_releasetxqueue(ah, qnum);
1632 return (NULL);
1633 }
1634 if (!ARN_TXQ_SETUP(sc, qnum)) {
1635 struct ath_txq *txq = &sc->sc_txq[qnum];
1636
1637 txq->axq_qnum = qnum;
1638 txq->axq_intrcnt = 0; /* legacy */
1639 txq->axq_link = NULL;
1640
1641 list_create(&txq->axq_list, sizeof (struct ath_buf),
1642 offsetof(struct ath_buf, bf_node));
1643 list_create(&txq->axq_acq, sizeof (struct ath_buf),
1644 offsetof(struct ath_buf, bf_node));
1645 mutex_init(&txq->axq_lock, NULL, MUTEX_DRIVER, NULL);
1646
1647 txq->axq_depth = 0;
1648 txq->axq_aggr_depth = 0;
1649 txq->axq_totalqueued = 0;
1650 txq->axq_linkbuf = NULL;
1651 sc->sc_txqsetup |= 1<<qnum;
1652 }
1653 return (&sc->sc_txq[qnum]);
1654 }
1655
1656 /* Reclaim resources for a setup queue */
1657
1658 void
arn_tx_cleanupq(struct arn_softc * sc,struct ath_txq * txq)1659 arn_tx_cleanupq(struct arn_softc *sc, struct ath_txq *txq)
1660 {
1661 (void) ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1662 sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
1663 }
1664
1665 /*
1666 * Setup a hardware data transmit queue for the specified
1667 * access control. The hal may not support all requested
1668 * queues in which case it will return a reference to a
1669 * previously setup queue. We record the mapping from ac's
1670 * to h/w queues for use by arn_tx_start and also track
1671 * the set of h/w queues being used to optimize work in the
1672 * transmit interrupt handler and related routines.
1673 */
1674
1675 int
arn_tx_setup(struct arn_softc * sc,int haltype)1676 arn_tx_setup(struct arn_softc *sc, int haltype)
1677 {
1678 struct ath_txq *txq;
1679
1680 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
1681 ARN_DBG((ARN_DBG_FATAL, "arn: arn_tx_setup(): "
1682 "HAL AC %u out of range, max %zu!\n",
1683 haltype, ARRAY_SIZE(sc->sc_haltype2q)));
1684 return (0);
1685 }
1686 txq = arn_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1687 if (txq != NULL) {
1688 sc->sc_haltype2q[haltype] = txq->axq_qnum;
1689 return (1);
1690 } else
1691 return (0);
1692 }
1693
1694 void
arn_tx_draintxq(struct arn_softc * sc,struct ath_txq * txq)1695 arn_tx_draintxq(struct arn_softc *sc, struct ath_txq *txq)
1696 {
1697 struct ath_buf *bf;
1698
1699 /*
1700 * This assumes output has been stopped.
1701 */
1702 for (;;) {
1703 mutex_enter(&txq->axq_lock);
1704 bf = list_head(&txq->axq_list);
1705 if (bf == NULL) {
1706 txq->axq_link = NULL;
1707 mutex_exit(&txq->axq_lock);
1708 break;
1709 }
1710 list_remove(&txq->axq_list, bf);
1711 mutex_exit(&txq->axq_lock);
1712 bf->bf_in = NULL;
1713 mutex_enter(&sc->sc_txbuflock);
1714 list_insert_tail(&sc->sc_txbuf_list, bf);
1715 mutex_exit(&sc->sc_txbuflock);
1716 }
1717 }
1718
1719 /* Drain the transmit queues and reclaim resources */
1720
1721 void
arn_draintxq(struct arn_softc * sc,boolean_t retry_tx)1722 arn_draintxq(struct arn_softc *sc, boolean_t retry_tx)
1723 {
1724 /*
1725 * stop beacon queue. The beacon will be freed when
1726 * we go to INIT state
1727 */
1728 if (!(sc->sc_flags & SC_OP_INVALID)) {
1729 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_beaconq);
1730 ARN_DBG((ARN_DBG_XMIT, "arn: arn_draintxq(): "
1731 "beacon queue %x\n",
1732 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_beaconq)));
1733 }
1734
1735 arn_drain_txdataq(sc, retry_tx);
1736 }
1737
1738 uint32_t
arn_txq_depth(struct arn_softc * sc,int qnum)1739 arn_txq_depth(struct arn_softc *sc, int qnum)
1740 {
1741 return (sc->sc_txq[qnum].axq_depth);
1742 }
1743
1744 uint32_t
arn_txq_aggr_depth(struct arn_softc * sc,int qnum)1745 arn_txq_aggr_depth(struct arn_softc *sc, int qnum)
1746 {
1747 return (sc->sc_txq[qnum].axq_aggr_depth);
1748 }
1749
1750 /* Update parameters for a transmit queue */
1751 int
arn_txq_update(struct arn_softc * sc,int qnum,struct ath9k_tx_queue_info * qinfo)1752 arn_txq_update(struct arn_softc *sc, int qnum,
1753 struct ath9k_tx_queue_info *qinfo)
1754 {
1755 struct ath_hal *ah = sc->sc_ah;
1756 int error = 0;
1757 struct ath9k_tx_queue_info qi;
1758
1759 if (qnum == sc->sc_beaconq) {
1760 /*
1761 * XXX: for beacon queue, we just save the parameter.
1762 * It will be picked up by arn_beaconq_config() when
1763 * it's necessary.
1764 */
1765 sc->sc_beacon_qi = *qinfo;
1766 return (0);
1767 }
1768
1769 ASSERT(sc->sc_txq[qnum].axq_qnum == qnum);
1770
1771 (void) ath9k_hw_get_txq_props(ah, qnum, &qi);
1772 qi.tqi_aifs = qinfo->tqi_aifs;
1773 qi.tqi_cwmin = qinfo->tqi_cwmin;
1774 qi.tqi_cwmax = qinfo->tqi_cwmax;
1775 qi.tqi_burstTime = qinfo->tqi_burstTime;
1776 qi.tqi_readyTime = qinfo->tqi_readyTime;
1777
1778 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1779 ARN_DBG((ARN_DBG_FATAL,
1780 "Unable to update hardware queue %u!\n", qnum));
1781 error = -EIO;
1782 } else {
1783 (void) ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
1784 }
1785
1786 return (error);
1787 }
1788
1789 int
ath_cabq_update(struct arn_softc * sc)1790 ath_cabq_update(struct arn_softc *sc)
1791 {
1792 struct ath9k_tx_queue_info qi;
1793 int qnum = sc->sc_cabq->axq_qnum;
1794 struct ath_beacon_config conf;
1795
1796 (void) ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1797 /*
1798 * Ensure the readytime % is within the bounds.
1799 */
1800 if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1801 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1802 else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1803 sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
1804
1805 arn_get_beaconconfig(sc, &conf);
1806 qi.tqi_readyTime =
1807 (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
1808 (void) arn_txq_update(sc, qnum, &qi);
1809
1810 return (0);
1811 }
1812
1813 static uint32_t
arn_tx_get_keytype(const struct ieee80211_cipher * cip)1814 arn_tx_get_keytype(const struct ieee80211_cipher *cip)
1815 {
1816 uint32_t index;
1817 static const uint8_t ciphermap[] = {
1818 ATH9K_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */
1819 ATH9K_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */
1820 ATH9K_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */
1821 ATH9K_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */
1822 ATH9K_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */
1823 ATH9K_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */
1824 };
1825
1826 ASSERT(cip->ic_cipher < ARRAY_SIZE(ciphermap));
1827 index = cip->ic_cipher;
1828
1829 if (ciphermap[index] == ATH9K_CIPHER_WEP)
1830 return (ATH9K_KEY_TYPE_WEP);
1831 else if (ciphermap[index] == ATH9K_CIPHER_TKIP)
1832 return (ATH9K_KEY_TYPE_TKIP);
1833 else if (ciphermap[index] == ATH9K_CIPHER_AES_CCM)
1834 return (ATH9K_KEY_TYPE_AES);
1835
1836 return (ATH9K_KEY_TYPE_CLEAR);
1837
1838 }
1839
1840 /* Display buffer */
1841 void
arn_dump_line(unsigned char * p,uint32_t len,boolean_t isaddress,uint32_t group)1842 arn_dump_line(unsigned char *p, uint32_t len, boolean_t isaddress,
1843 uint32_t group)
1844 {
1845 char *pnumeric = "0123456789ABCDEF";
1846 char hex[((2 + 1) * 16) + 1];
1847 char *phex = hex;
1848 char ascii[16 + 1];
1849 char *pascii = ascii;
1850 uint32_t grouped = 0;
1851
1852 if (isaddress) {
1853 arn_problem("arn: %08x: ", p);
1854 } else {
1855 arn_problem("arn: ");
1856 }
1857
1858 while (len) {
1859 *phex++ = pnumeric[((uint8_t)*p) / 16];
1860 *phex++ = pnumeric[((uint8_t)*p) % 16];
1861 if (++grouped >= group) {
1862 *phex++ = ' ';
1863 grouped = 0;
1864 }
1865
1866 *pascii++ = (*p >= 32 && *p < 128) ? *p : '.';
1867
1868 ++p;
1869 --len;
1870 }
1871
1872 *phex = '\0';
1873 *pascii = '\0';
1874
1875 arn_problem("%-*s|%-*s|\n", (2 * 16) +
1876 (16 / group), hex, 16, ascii);
1877 }
1878
1879 void
arn_dump_pkg(unsigned char * p,uint32_t len,boolean_t isaddress,uint32_t group)1880 arn_dump_pkg(unsigned char *p, uint32_t len, boolean_t isaddress,
1881 uint32_t group)
1882 {
1883 uint32_t perline;
1884 while (len) {
1885 perline = (len < 16) ? len : 16;
1886 arn_dump_line(p, perline, isaddress, group);
1887 len -= perline;
1888 p += perline;
1889 }
1890 }
1891
1892 /*
1893 * The input parameter mp has following assumption:
1894 * For data packets, GLDv3 mac_wifi plugin allocates and fills the
1895 * ieee80211 header. For management packets, net80211 allocates and
1896 * fills the ieee80211 header. In both cases, enough spaces in the
1897 * header are left for encryption option.
1898 */
1899 static int32_t
arn_tx_start(struct arn_softc * sc,struct ieee80211_node * in,struct ath_buf * bf,mblk_t * mp)1900 arn_tx_start(struct arn_softc *sc, struct ieee80211_node *in,
1901 struct ath_buf *bf, mblk_t *mp)
1902 {
1903 ieee80211com_t *ic = (ieee80211com_t *)sc;
1904 struct ieee80211_frame *wh = (struct ieee80211_frame *)mp->b_rptr;
1905 struct ath_hal *ah = sc->sc_ah;
1906 struct ath_node *an;
1907 struct ath_desc *ds;
1908 struct ath_txq *txq;
1909 struct ath_rate_table *rt;
1910 enum ath9k_pkt_type atype;
1911 boolean_t shortPreamble, is_padding = B_FALSE;
1912 uint32_t subtype, keytype = ATH9K_KEY_TYPE_CLEAR;
1913 int32_t keyix, iswep, hdrlen, pktlen, mblen, mbslen;
1914 caddr_t dest;
1915
1916 /*
1917 * CRC are added by H/W, not encaped by driver,
1918 * but we must count it in pkt length.
1919 */
1920 pktlen = IEEE80211_CRC_LEN;
1921 iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
1922 keyix = ATH9K_TXKEYIX_INVALID;
1923 hdrlen = ieee80211_hdrspace(ic, mp->b_rptr);
1924 if (hdrlen == 28)
1925 is_padding = B_TRUE;
1926
1927 if (iswep != 0) {
1928 const struct ieee80211_cipher *cip;
1929 struct ieee80211_key *k;
1930
1931 /*
1932 * Construct the 802.11 header+trailer for an encrypted
1933 * frame. The only reason this can fail is because of an
1934 * unknown or unsupported cipher/key type.
1935 */
1936 k = ieee80211_crypto_encap(ic, mp);
1937 if (k == NULL) {
1938 ARN_DBG((ARN_DBG_XMIT, "arn: arn_tx_start "
1939 "crypto_encap failed\n"));
1940 /*
1941 * This can happen when the key is yanked after the
1942 * frame was queued. Just discard the frame; the
1943 * 802.11 layer counts failures and provides
1944 * debugging/diagnostics.
1945 */
1946 return (EIO);
1947 }
1948 cip = k->wk_cipher;
1949
1950 keytype = arn_tx_get_keytype(cip);
1951
1952 /*
1953 * Adjust the packet + header lengths for the crypto
1954 * additions and calculate the h/w key index. When
1955 * a s/w mic is done the frame will have had any mic
1956 * added to it prior to entry so m0->m_pkthdr.len above will
1957 * account for it. Otherwise we need to add it to the
1958 * packet length.
1959 */
1960 hdrlen += cip->ic_header;
1961 pktlen += cip->ic_trailer;
1962 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0)
1963 pktlen += cip->ic_miclen;
1964
1965 keyix = k->wk_keyix;
1966
1967 /* packet header may have moved, reset our local pointer */
1968 wh = (struct ieee80211_frame *)mp->b_rptr;
1969 }
1970
1971 dest = bf->bf_dma.mem_va;
1972 for (; mp != NULL; mp = mp->b_cont) {
1973 mblen = MBLKL(mp);
1974 bcopy(mp->b_rptr, dest, mblen);
1975 dest += mblen;
1976 }
1977 mbslen = (uintptr_t)dest - (uintptr_t)bf->bf_dma.mem_va;
1978 pktlen += mbslen;
1979 if (is_padding && (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
1980 IEEE80211_FC0_TYPE_DATA)
1981 pktlen -= 2; /* real pkg len */
1982
1983 /* buf setup */
1984 ath_tx_setup_buffer(sc, bf, in, wh, pktlen, keytype);
1985
1986 /* setup descriptors */
1987 ds = bf->bf_desc;
1988 rt = sc->sc_currates;
1989 ASSERT(rt != NULL);
1990
1991 arn_get_rate(sc, bf, wh);
1992 an = (struct ath_node *)(in);
1993
1994 /*
1995 * Calculate Atheros packet type from IEEE80211 packet header
1996 * and setup for rate calculations.
1997 */
1998 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
1999 case IEEE80211_FC0_TYPE_MGT:
2000 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2001 if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
2002 atype = ATH9K_PKT_TYPE_BEACON;
2003 else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2004 atype = ATH9K_PKT_TYPE_PROBE_RESP;
2005 else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
2006 atype = ATH9K_PKT_TYPE_ATIM;
2007 else
2008 atype = ATH9K_PKT_TYPE_NORMAL;
2009
2010 /* force all ctl frames to highest queue */
2011 txq = &sc->sc_txq[arn_get_hal_qnum(WME_AC_VO, sc)];
2012 break;
2013 case IEEE80211_FC0_TYPE_CTL:
2014 atype = ATH9K_PKT_TYPE_PSPOLL;
2015 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2016
2017 /* force all ctl frames to highest queue */
2018 txq = &sc->sc_txq[arn_get_hal_qnum(WME_AC_VO, sc)];
2019 break;
2020 case IEEE80211_FC0_TYPE_DATA:
2021 // arn_dump_pkg((unsigned char *)bf->bf_dma.mem_va,
2022 // pktlen, 1, 1);
2023 atype = ATH9K_PKT_TYPE_NORMAL;
2024
2025 /* Always use background queue */
2026 txq = &sc->sc_txq[arn_get_hal_qnum(WME_AC_BE, sc)];
2027 break;
2028 default:
2029 /* Unknown 802.11 frame */
2030 sc->sc_stats.ast_tx_invalid++;
2031 return (1);
2032 }
2033
2034 /* setup descriptor */
2035 ds->ds_link = 0;
2036 ds->ds_data = bf->bf_dma.cookie.dmac_address;
2037
2038 /*
2039 * Formulate first tx descriptor with tx controls.
2040 */
2041 ath9k_hw_set11n_txdesc(ah, ds,
2042 (pktlen), /* packet length */
2043 atype, /* Atheros packet type */
2044 MAX_RATE_POWER /* MAX_RATE_POWER */,
2045 keyix /* ATH9K_TXKEYIX_INVALID */,
2046 keytype /* ATH9K_KEY_TYPE_CLEAR */,
2047 bf->bf_flags /* flags */);
2048
2049 /* LINTED E_BAD_PTR_CAST_ALIGN */
2050 ARN_DBG((ARN_DBG_XMIT, "arn: arn_tx_start(): to %s totlen=%d "
2051 "an->an_tx_rate1sp=%d tx_rate2sp=%d tx_rate3sp=%d "
2052 "qnum=%d sht=%d dur = %d\n",
2053 ieee80211_macaddr_sprintf(wh->i_addr1), mbslen, an->an_tx_rate1sp,
2054 an->an_tx_rate2sp, an->an_tx_rate3sp,
2055 txq->axq_qnum, shortPreamble, *(uint16_t *)wh->i_dur));
2056
2057 (void) ath9k_hw_filltxdesc(ah, ds,
2058 mbslen, /* segment length */
2059 B_TRUE, /* first segment */
2060 B_TRUE, /* last segment */
2061 ds); /* first descriptor */
2062
2063 /* set rate related fields in tx descriptor */
2064 ath_buf_set_rate(sc, bf, wh);
2065
2066 ARN_DMA_SYNC(bf->bf_dma, DDI_DMA_SYNC_FORDEV);
2067
2068 mutex_enter(&txq->axq_lock);
2069 list_insert_tail(&txq->axq_list, bf);
2070 if (txq->axq_link == NULL) {
2071 (void) ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
2072 } else {
2073 *txq->axq_link = bf->bf_daddr;
2074 }
2075 txq->axq_link = &ds->ds_link;
2076 mutex_exit(&txq->axq_lock);
2077
2078 // arn_dump_pkg((unsigned char *)bf->bf_dma.mem_va, pktlen, 1, 1);
2079
2080 (void) ath9k_hw_txstart(ah, txq->axq_qnum);
2081
2082 ic->ic_stats.is_tx_frags++;
2083 ic->ic_stats.is_tx_bytes += pktlen;
2084
2085 return (0);
2086 }
2087
2088 /*
2089 * Transmit a management frame.
2090 * Note that management frames come directly from the 802.11 layer
2091 * and do not honor the send queue flow control.
2092 */
2093 /* Upon failure caller should free mp */
2094 int
arn_tx(ieee80211com_t * ic,mblk_t * mp,uint8_t type)2095 arn_tx(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2096 {
2097 struct arn_softc *sc = (struct arn_softc *)ic;
2098 struct ath_hal *ah = sc->sc_ah;
2099 struct ieee80211_node *in = NULL;
2100 struct ath_buf *bf = NULL;
2101 struct ieee80211_frame *wh;
2102 int error = 0;
2103
2104 ASSERT(mp->b_next == NULL);
2105 /* should check later */
2106 if (sc->sc_flags & SC_OP_INVALID) {
2107 if ((type & IEEE80211_FC0_TYPE_MASK) !=
2108 IEEE80211_FC0_TYPE_DATA) {
2109 freemsg(mp);
2110 }
2111 return (ENXIO);
2112 }
2113
2114 /* Grab a TX buffer */
2115 bf = arn_tx_get_buffer(sc);
2116 if (bf == NULL) {
2117 ARN_DBG((ARN_DBG_XMIT, "arn: arn_tx(): discard, "
2118 "no xmit buf\n"));
2119 ic->ic_stats.is_tx_nobuf++;
2120 if ((type & IEEE80211_FC0_TYPE_MASK) ==
2121 IEEE80211_FC0_TYPE_DATA) {
2122 sc->sc_stats.ast_tx_nobuf++;
2123 mutex_enter(&sc->sc_resched_lock);
2124 sc->sc_resched_needed = B_TRUE;
2125 mutex_exit(&sc->sc_resched_lock);
2126 } else {
2127 sc->sc_stats.ast_tx_nobufmgt++;
2128 freemsg(mp);
2129 }
2130 return (ENOMEM);
2131 }
2132
2133 wh = (struct ieee80211_frame *)mp->b_rptr;
2134
2135 /* Locate node */
2136 in = ieee80211_find_txnode(ic, wh->i_addr1);
2137 if (in == NULL) {
2138 error = EIO;
2139 goto bad;
2140 }
2141
2142 in->in_inact = 0;
2143 switch (type & IEEE80211_FC0_TYPE_MASK) {
2144 case IEEE80211_FC0_TYPE_DATA:
2145 (void) ieee80211_encap(ic, mp, in);
2146 break;
2147 default:
2148 if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2149 IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
2150 /* fill time stamp */
2151 uint64_t tsf;
2152 uint32_t *tstamp;
2153
2154 tsf = ath9k_hw_gettsf64(ah);
2155 /* adjust 100us delay to xmit */
2156 tsf += 100;
2157 /* LINTED E_BAD_PTR_CAST_ALIGN */
2158 tstamp = (uint32_t *)&wh[1];
2159 tstamp[0] = LE_32(tsf & 0xffffffff);
2160 tstamp[1] = LE_32(tsf >> 32);
2161 }
2162 sc->sc_stats.ast_tx_mgmt++;
2163 break;
2164 }
2165
2166 error = arn_tx_start(sc, in, bf, mp);
2167
2168 if (error != 0) {
2169 bad:
2170 ic->ic_stats.is_tx_failed++;
2171 if (bf != NULL) {
2172 mutex_enter(&sc->sc_txbuflock);
2173 list_insert_tail(&sc->sc_txbuf_list, bf);
2174 mutex_exit(&sc->sc_txbuflock);
2175 }
2176 }
2177 if (in != NULL)
2178 ieee80211_free_node(in);
2179 if ((type & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_DATA ||
2180 error == 0) {
2181 freemsg(mp);
2182 }
2183
2184 return (error);
2185 }
2186
2187 static void
arn_printtxbuf(struct ath_buf * bf,int done)2188 arn_printtxbuf(struct ath_buf *bf, int done)
2189 {
2190 struct ath_desc *ds = bf->bf_desc;
2191 const struct ath_tx_status *ts = &ds->ds_txstat;
2192
2193 ARN_DBG((ARN_DBG_XMIT, "arn: T(%p %p) %08x %08x %08x %08x %08x"
2194 " %08x %08x %08x %c\n",
2195 ds, bf->bf_daddr,
2196 ds->ds_link, ds->ds_data,
2197 ds->ds_ctl0, ds->ds_ctl1,
2198 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3],
2199 !done ? ' ' : (ts->ts_status == 0) ? '*' : '!'));
2200 }
2201
2202 /* ARGSUSED */
2203 static void
ath_tx_rc_status(struct ath_buf * bf,struct ath_desc * ds,int nbad,int txok,boolean_t update_rc)2204 ath_tx_rc_status(struct ath_buf *bf,
2205 struct ath_desc *ds,
2206 int nbad,
2207 int txok,
2208 boolean_t update_rc)
2209 {
2210 struct ath_tx_info_priv *tx_info_priv =
2211 (struct ath_tx_info_priv *)&bf->tx_info_priv;
2212
2213 tx_info_priv->update_rc = B_FALSE;
2214
2215 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
2216 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
2217 if (bf_isdata(bf)) {
2218 (void) memcpy(&tx_info_priv->tx, &ds->ds_txstat,
2219 sizeof (tx_info_priv->tx));
2220 tx_info_priv->n_frames = bf->bf_nframes;
2221 tx_info_priv->n_bad_frames = nbad;
2222 tx_info_priv->update_rc = B_TRUE;
2223 }
2224 }
2225 }
2226
2227 /* Process completed xmit descriptors from the specified queue */
2228 static int
arn_tx_processq(struct arn_softc * sc,struct ath_txq * txq)2229 arn_tx_processq(struct arn_softc *sc, struct ath_txq *txq)
2230 {
2231 ieee80211com_t *ic = (ieee80211com_t *)sc;
2232 struct ath_hal *ah = sc->sc_ah;
2233 struct ath_buf *bf;
2234 struct ath_desc *ds;
2235 struct ieee80211_node *in;
2236 struct ath_tx_status *ts;
2237 struct ath_node *an;
2238 int32_t sr, lr, nacked = 0;
2239 int txok, nbad = 0;
2240 int status;
2241
2242 for (;;) {
2243 mutex_enter(&txq->axq_lock);
2244 bf = list_head(&txq->axq_list);
2245 if (bf == NULL) {
2246 txq->axq_link = NULL;
2247 /* txq->axq_linkbuf = NULL; */
2248 mutex_exit(&txq->axq_lock);
2249 break;
2250 }
2251 ds = bf->bf_desc; /* last decriptor */
2252 ts = &ds->ds_txstat;
2253 status = ath9k_hw_txprocdesc(ah, ds);
2254
2255 #ifdef DEBUG
2256 arn_printtxbuf(bf, status == 0);
2257 #endif
2258
2259 if (status == EINPROGRESS) {
2260 mutex_exit(&txq->axq_lock);
2261 break;
2262 }
2263 list_remove(&txq->axq_list, bf);
2264 mutex_exit(&txq->axq_lock);
2265 in = bf->bf_in;
2266 if (in != NULL) {
2267 an = ATH_NODE(in);
2268 /* Successful transmition */
2269 if (ts->ts_status == 0) {
2270 an->an_tx_ok++;
2271 an->an_tx_antenna = ts->ts_antenna;
2272 sc->sc_stats.ast_tx_rssidelta =
2273 ts->ts_rssi - sc->sc_stats.ast_tx_rssi;
2274 sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
2275 } else {
2276 an->an_tx_err++;
2277 if (ts->ts_status & ATH9K_TXERR_XRETRY) {
2278 sc->sc_stats.ast_tx_xretries++;
2279 }
2280 if (ts->ts_status & ATH9K_TXERR_FIFO) {
2281 sc->sc_stats.ast_tx_fifoerr++;
2282 }
2283 if (ts->ts_status & ATH9K_TXERR_FILT) {
2284 sc->sc_stats.ast_tx_filtered++;
2285 }
2286 an->an_tx_antenna = 0; /* invalidate */
2287 }
2288 sr = ts->ts_shortretry;
2289 lr = ts->ts_longretry;
2290 sc->sc_stats.ast_tx_shortretry += sr;
2291 sc->sc_stats.ast_tx_longretry += lr;
2292 /*
2293 * Hand the descriptor to the rate control algorithm.
2294 */
2295 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2296 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
2297 /*
2298 * If frame was ack'd update the last rx time
2299 * used to workaround phantom bmiss interrupts.
2300 */
2301 if (ts->ts_status == 0) {
2302 nacked++;
2303 an->an_tx_ok++;
2304 } else {
2305 an->an_tx_err++;
2306 }
2307 an->an_tx_retr += sr + lr;
2308 }
2309 }
2310
2311 txok = (ds->ds_txstat.ts_status == 0);
2312 if (!bf_isampdu(bf)) {
2313 /*
2314 * This frame is sent out as a single frame.
2315 * Use hardware retry status for this frame.
2316 */
2317 bf->bf_retries = ds->ds_txstat.ts_longretry;
2318 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
2319 bf->bf_state.bf_type |= BUF_XRETRY;
2320 nbad = 0;
2321 }
2322 ath_tx_rc_status(bf, ds, nbad, B_TRUE, txok);
2323
2324 ath_tx_complete_buf(sc, bf, txok, 0);
2325
2326 // arn_dump_pkg((unsigned char *)bf->bf_dma.mem_va,
2327 // bf->bf_frmlen, 1, 1);
2328
2329 bf->bf_in = NULL;
2330 mutex_enter(&sc->sc_txbuflock);
2331 list_insert_tail(&sc->sc_txbuf_list, bf);
2332 mutex_exit(&sc->sc_txbuflock);
2333
2334 /*
2335 * Reschedule stalled outbound packets
2336 */
2337 mutex_enter(&sc->sc_resched_lock);
2338 if (sc->sc_resched_needed) {
2339 sc->sc_resched_needed = B_FALSE;
2340 mac_tx_update(ic->ic_mach);
2341 }
2342 mutex_exit(&sc->sc_resched_lock);
2343 }
2344
2345 return (nacked);
2346 }
2347
2348 static void
arn_tx_handler(struct arn_softc * sc)2349 arn_tx_handler(struct arn_softc *sc)
2350 {
2351 int i;
2352 int nacked = 0;
2353 uint32_t qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2354 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2355
2356 /*
2357 * Process each active queue.
2358 */
2359 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2360 if (ARN_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) {
2361 nacked += arn_tx_processq(sc, &sc->sc_txq[i]);
2362 }
2363 }
2364
2365 if (nacked)
2366 sc->sc_lastrx = ath9k_hw_gettsf64(sc->sc_ah);
2367 }
2368
2369 /* Deferred processing of transmit interrupt */
2370
2371 void
arn_tx_int_proc(void * arg)2372 arn_tx_int_proc(void *arg)
2373 {
2374 struct arn_softc *sc = arg;
2375 arn_tx_handler(sc);
2376 }
2377
2378 /* Node init & cleanup functions */
2379
2380 #ifdef ARN_TX_AGGREGATION
2381 void
arn_tx_node_init(struct arn_softc * sc,struct ath_node * an)2382 arn_tx_node_init(struct arn_softc *sc, struct ath_node *an)
2383 {
2384 struct ath_atx_tid *tid;
2385 struct ath_atx_ac *ac;
2386 int tidno, acno;
2387
2388 for (tidno = 0, tid = &an->tid[tidno]; tidno < WME_NUM_TID;
2389 tidno++, tid++) {
2390 tid->an = an;
2391 tid->tidno = tidno;
2392 tid->seq_start = tid->seq_next = 0;
2393 tid->baw_size = WME_MAX_BA;
2394 tid->baw_head = tid->baw_tail = 0;
2395 tid->sched = B_FALSE;
2396 tid->paused = B_FALSE;
2397 tid->state &= ~AGGR_CLEANUP;
2398 list_create(&tid->buf_q, sizeof (struct ath_buf),
2399 offsetof(struct ath_buf, bf_node));
2400 acno = TID_TO_WME_AC(tidno);
2401 tid->ac = &an->ac[acno];
2402 tid->state &= ~AGGR_ADDBA_COMPLETE;
2403 tid->state &= ~AGGR_ADDBA_PROGRESS;
2404 tid->addba_exchangeattempts = 0;
2405 }
2406
2407 for (acno = 0, ac = &an->ac[acno]; acno < WME_NUM_AC; acno++, ac++) {
2408 ac->sched = B_FALSE;
2409 list_create(&ac->tid_q, sizeof (struct ath_atx_tid),
2410 offsetof(struct ath_atx_tid, list));
2411
2412 switch (acno) {
2413 case WME_AC_BE:
2414 ac->qnum = arn_tx_get_qnum(sc,
2415 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2416 break;
2417 case WME_AC_BK:
2418 ac->qnum = arn_tx_get_qnum(sc,
2419 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2420 break;
2421 case WME_AC_VI:
2422 ac->qnum = arn_tx_get_qnum(sc,
2423 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2424 break;
2425 case WME_AC_VO:
2426 ac->qnum = arn_tx_get_qnum(sc,
2427 ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2428 break;
2429 }
2430 }
2431 }
2432
2433 void
arn_tx_node_cleanup(struct arn_softc * sc,struct ieee80211_node * in)2434 arn_tx_node_cleanup(struct arn_softc *sc, struct ieee80211_node *in)
2435 {
2436 int i;
2437 struct ath_atx_ac *ac, *ac_tmp;
2438 struct ath_atx_tid *tid, *tid_tmp;
2439 struct ath_txq *txq;
2440 struct ath_node *an = ATH_NODE(in);
2441
2442 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2443 if (ARN_TXQ_SETUP(sc, i)) {
2444 txq = &sc->sc_txq[i];
2445
2446 mutex_enter(&txq->axq_lock);
2447
2448 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq) {
2449 tid = list_head(&ac->tid_q);
2450 if (tid && tid->an != an)
2451 continue;
2452 list_remove(&txq->axq_acq, ac);
2453 ac->sched = B_FALSE;
2454
2455 list_for_each_entry_safe(tid, tid_tmp,
2456 &ac->tid_q) {
2457 list_remove(&ac->tid_q, tid);
2458 bf = list_head(&tid->buf_q);
2459 while (bf != NULL) {
2460 if (bf->bf_in == in)
2461 bf->bf_in = NULL;
2462 }
2463 bf = list_next(&txq->axq_list, bf);
2464 tid->sched = B_FALSE;
2465 arn_tid_drain(sc, txq, tid);
2466 tid->state &= ~AGGR_ADDBA_COMPLETE;
2467 tid->addba_exchangeattempts = 0;
2468 tid->state &= ~AGGR_CLEANUP;
2469 }
2470 }
2471
2472 mutex_exit(&txq->axq_lock);
2473 }
2474 }
2475 }
2476 #endif /* ARN_TX_AGGREGATION */
2477