1 /*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 * 29 * $FreeBSD$ 30 */ 31 32 /* 33 * Defintions for the Atheros Wireless LAN controller driver. 34 */ 35 #ifndef _DEV_ATH_ATHVAR_H 36 #define _DEV_ATH_ATHVAR_H 37 38 #include <machine/atomic.h> 39 40 #include <dev/ath/ath_hal/ah.h> 41 #include <dev/ath/ath_hal/ah_desc.h> 42 #include <net80211/ieee80211_radiotap.h> 43 #include <dev/ath/if_athioctl.h> 44 #include <dev/ath/if_athrate.h> 45 #ifdef ATH_DEBUG_ALQ 46 #include <dev/ath/if_ath_alq.h> 47 #endif 48 49 #define ATH_TIMEOUT 1000 50 51 /* 52 * There is a separate TX ath_buf pool for management frames. 53 * This ensures that management frames such as probe responses 54 * and BAR frames can be transmitted during periods of high 55 * TX activity. 56 */ 57 #define ATH_MGMT_TXBUF 32 58 59 /* 60 * 802.11n requires more TX and RX buffers to do AMPDU. 61 */ 62 #ifdef ATH_ENABLE_11N 63 #define ATH_TXBUF 512 64 #define ATH_RXBUF 512 65 #endif 66 67 #ifndef ATH_RXBUF 68 #define ATH_RXBUF 40 /* number of RX buffers */ 69 #endif 70 #ifndef ATH_TXBUF 71 #define ATH_TXBUF 200 /* number of TX buffers */ 72 #endif 73 #define ATH_BCBUF 4 /* number of beacon buffers */ 74 75 #define ATH_TXDESC 10 /* number of descriptors per buffer */ 76 #define ATH_TXMAXTRY 11 /* max number of transmit attempts */ 77 #define ATH_TXMGTTRY 4 /* xmit attempts for mgt/ctl frames */ 78 #define ATH_TXINTR_PERIOD 5 /* max number of batched tx descriptors */ 79 80 #define ATH_BEACON_AIFS_DEFAULT 1 /* default aifs for ap beacon q */ 81 #define ATH_BEACON_CWMIN_DEFAULT 0 /* default cwmin for ap beacon q */ 82 #define ATH_BEACON_CWMAX_DEFAULT 0 /* default cwmax for ap beacon q */ 83 84 /* 85 * The key cache is used for h/w cipher state and also for 86 * tracking station state such as the current tx antenna. 87 * We also setup a mapping table between key cache slot indices 88 * and station state to short-circuit node lookups on rx. 89 * Different parts have different size key caches. We handle 90 * up to ATH_KEYMAX entries (could dynamically allocate state). 91 */ 92 #define ATH_KEYMAX 128 /* max key cache size we handle */ 93 #define ATH_KEYBYTES (ATH_KEYMAX/NBBY) /* storage space in bytes */ 94 95 struct taskqueue; 96 struct kthread; 97 struct ath_buf; 98 99 #define ATH_TID_MAX_BUFS (2 * IEEE80211_AGGR_BAWMAX) 100 101 /* 102 * Per-TID state 103 * 104 * Note that TID 16 (WME_NUM_TID+1) is for handling non-QoS frames. 105 */ 106 struct ath_tid { 107 TAILQ_HEAD(,ath_buf) tid_q; /* pending buffers */ 108 struct ath_node *an; /* pointer to parent */ 109 int tid; /* tid */ 110 int ac; /* which AC gets this trafic */ 111 int hwq_depth; /* how many buffers are on HW */ 112 u_int axq_depth; /* SW queue depth */ 113 114 struct { 115 TAILQ_HEAD(,ath_buf) tid_q; /* filtered queue */ 116 u_int axq_depth; /* SW queue depth */ 117 } filtq; 118 119 /* 120 * Entry on the ath_txq; when there's traffic 121 * to send 122 */ 123 TAILQ_ENTRY(ath_tid) axq_qelem; 124 int sched; 125 int paused; /* >0 if the TID has been paused */ 126 127 /* 128 * These are flags - perhaps later collapse 129 * down to a single uint32_t ? 130 */ 131 int addba_tx_pending; /* TX ADDBA pending */ 132 int bar_wait; /* waiting for BAR */ 133 int bar_tx; /* BAR TXed */ 134 int isfiltered; /* is this node currently filtered */ 135 136 /* 137 * Is the TID being cleaned up after a transition 138 * from aggregation to non-aggregation? 139 * When this is set to 1, this TID will be paused 140 * and no further traffic will be queued until all 141 * the hardware packets pending for this TID have been 142 * TXed/completed; at which point (non-aggregation) 143 * traffic will resume being TXed. 144 */ 145 int cleanup_inprogress; 146 /* 147 * How many hardware-queued packets are 148 * waiting to be cleaned up. 149 * This is only valid if cleanup_inprogress is 1. 150 */ 151 int incomp; 152 153 /* 154 * The following implements a ring representing 155 * the frames in the current BAW. 156 * To avoid copying the array content each time 157 * the BAW is moved, the baw_head/baw_tail point 158 * to the current BAW begin/end; when the BAW is 159 * shifted the head/tail of the array are also 160 * appropriately shifted. 161 */ 162 /* active tx buffers, beginning at current BAW */ 163 struct ath_buf *tx_buf[ATH_TID_MAX_BUFS]; 164 /* where the baw head is in the array */ 165 int baw_head; 166 /* where the BAW tail is in the array */ 167 int baw_tail; 168 }; 169 170 /* driver-specific node state */ 171 struct ath_node { 172 struct ieee80211_node an_node; /* base class */ 173 u_int8_t an_mgmtrix; /* min h/w rate index */ 174 u_int8_t an_mcastrix; /* mcast h/w rate index */ 175 uint32_t an_is_powersave; /* node is sleeping */ 176 uint32_t an_stack_psq; /* net80211 psq isn't empty */ 177 uint32_t an_tim_set; /* TIM has been set */ 178 struct ath_buf *an_ff_buf[WME_NUM_AC]; /* ff staging area */ 179 struct ath_tid an_tid[IEEE80211_TID_SIZE]; /* per-TID state */ 180 char an_name[32]; /* eg "wlan0_a1" */ 181 struct mtx an_mtx; /* protecting the rate control state */ 182 uint32_t an_swq_depth; /* how many SWQ packets for this 183 node */ 184 int clrdmask; /* has clrdmask been set */ 185 uint32_t an_leak_count; /* How many frames to leak during pause */ 186 /* variable-length rate control state follows */ 187 }; 188 #define ATH_NODE(ni) ((struct ath_node *)(ni)) 189 #define ATH_NODE_CONST(ni) ((const struct ath_node *)(ni)) 190 191 #define ATH_RSSI_LPF_LEN 10 192 #define ATH_RSSI_DUMMY_MARKER 0x127 193 #define ATH_EP_MUL(x, mul) ((x) * (mul)) 194 #define ATH_RSSI_IN(x) (ATH_EP_MUL((x), HAL_RSSI_EP_MULTIPLIER)) 195 #define ATH_LPF_RSSI(x, y, len) \ 196 ((x != ATH_RSSI_DUMMY_MARKER) ? (((x) * ((len) - 1) + (y)) / (len)) : (y)) 197 #define ATH_RSSI_LPF(x, y) do { \ 198 if ((y) >= -20) \ 199 x = ATH_LPF_RSSI((x), ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \ 200 } while (0) 201 #define ATH_EP_RND(x,mul) \ 202 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 203 #define ATH_RSSI(x) ATH_EP_RND(x, HAL_RSSI_EP_MULTIPLIER) 204 205 typedef enum { 206 ATH_BUFTYPE_NORMAL = 0, 207 ATH_BUFTYPE_MGMT = 1, 208 } ath_buf_type_t; 209 210 struct ath_buf { 211 TAILQ_ENTRY(ath_buf) bf_list; 212 struct ath_buf * bf_next; /* next buffer in the aggregate */ 213 int bf_nseg; 214 HAL_STATUS bf_rxstatus; 215 uint16_t bf_flags; /* status flags (below) */ 216 uint16_t bf_descid; /* 16 bit descriptor ID */ 217 struct ath_desc *bf_desc; /* virtual addr of desc */ 218 struct ath_desc_status bf_status; /* tx/rx status */ 219 bus_addr_t bf_daddr; /* physical addr of desc */ 220 bus_dmamap_t bf_dmamap; /* DMA map for mbuf chain */ 221 struct mbuf *bf_m; /* mbuf for buf */ 222 struct ieee80211_node *bf_node; /* pointer to the node */ 223 struct ath_desc *bf_lastds; /* last descriptor for comp status */ 224 struct ath_buf *bf_last; /* last buffer in aggregate, or self for non-aggregate */ 225 bus_size_t bf_mapsize; 226 #define ATH_MAX_SCATTER ATH_TXDESC /* max(tx,rx,beacon) desc's */ 227 bus_dma_segment_t bf_segs[ATH_MAX_SCATTER]; 228 uint32_t bf_nextfraglen; /* length of next fragment */ 229 230 /* Completion function to call on TX complete (fail or not) */ 231 /* 232 * "fail" here is set to 1 if the queue entries were removed 233 * through a call to ath_tx_draintxq(). 234 */ 235 void(* bf_comp) (struct ath_softc *sc, struct ath_buf *bf, int fail); 236 237 /* This state is kept to support software retries and aggregation */ 238 struct { 239 uint16_t bfs_seqno; /* sequence number of this packet */ 240 uint16_t bfs_ndelim; /* number of delims for padding */ 241 242 uint8_t bfs_retries; /* retry count */ 243 uint8_t bfs_tid; /* packet TID (or TID_MAX for no QoS) */ 244 uint8_t bfs_nframes; /* number of frames in aggregate */ 245 uint8_t bfs_pri; /* packet AC priority */ 246 uint8_t bfs_tx_queue; /* destination hardware TX queue */ 247 248 u_int32_t bfs_aggr:1, /* part of aggregate? */ 249 bfs_aggrburst:1, /* part of aggregate burst? */ 250 bfs_isretried:1, /* retried frame? */ 251 bfs_dobaw:1, /* actually check against BAW? */ 252 bfs_addedbaw:1, /* has been added to the BAW */ 253 bfs_shpream:1, /* use short preamble */ 254 bfs_istxfrag:1, /* is fragmented */ 255 bfs_ismrr:1, /* do multi-rate TX retry */ 256 bfs_doprot:1, /* do RTS/CTS based protection */ 257 bfs_doratelookup:1; /* do rate lookup before each TX */ 258 259 /* 260 * These fields are passed into the 261 * descriptor setup functions. 262 */ 263 264 /* Make this an 8 bit value? */ 265 HAL_PKT_TYPE bfs_atype; /* packet type */ 266 267 uint32_t bfs_pktlen; /* length of this packet */ 268 269 uint16_t bfs_hdrlen; /* length of this packet header */ 270 uint16_t bfs_al; /* length of aggregate */ 271 272 uint16_t bfs_txflags; /* HAL (tx) descriptor flags */ 273 uint8_t bfs_txrate0; /* first TX rate */ 274 uint8_t bfs_try0; /* first try count */ 275 276 uint16_t bfs_txpower; /* tx power */ 277 uint8_t bfs_ctsrate0; /* Non-zero - use this as ctsrate */ 278 uint8_t bfs_ctsrate; /* CTS rate */ 279 280 /* 16 bit? */ 281 int32_t bfs_keyix; /* crypto key index */ 282 int32_t bfs_txantenna; /* TX antenna config */ 283 284 /* Make this an 8 bit value? */ 285 enum ieee80211_protmode bfs_protmode; 286 287 /* 16 bit? */ 288 uint32_t bfs_ctsduration; /* CTS duration (pre-11n NICs) */ 289 struct ath_rc_series bfs_rc[ATH_RC_NUM]; /* non-11n TX series */ 290 } bf_state; 291 }; 292 typedef TAILQ_HEAD(ath_bufhead_s, ath_buf) ath_bufhead; 293 294 #define ATH_BUF_MGMT 0x00000001 /* (tx) desc is a mgmt desc */ 295 #define ATH_BUF_BUSY 0x00000002 /* (tx) desc owned by h/w */ 296 #define ATH_BUF_FIFOEND 0x00000004 297 #define ATH_BUF_FIFOPTR 0x00000008 298 299 #define ATH_BUF_FLAGS_CLONE (ATH_BUF_MGMT) 300 301 /* 302 * DMA state for tx/rx descriptors. 303 */ 304 struct ath_descdma { 305 const char* dd_name; 306 struct ath_desc *dd_desc; /* descriptors */ 307 int dd_descsize; /* size of single descriptor */ 308 bus_addr_t dd_desc_paddr; /* physical addr of dd_desc */ 309 bus_size_t dd_desc_len; /* size of dd_desc */ 310 bus_dma_segment_t dd_dseg; 311 bus_dma_tag_t dd_dmat; /* bus DMA tag */ 312 bus_dmamap_t dd_dmamap; /* DMA map for descriptors */ 313 struct ath_buf *dd_bufptr; /* associated buffers */ 314 }; 315 316 /* 317 * Data transmit queue state. One of these exists for each 318 * hardware transmit queue. Packets sent to us from above 319 * are assigned to queues based on their priority. Not all 320 * devices support a complete set of hardware transmit queues. 321 * For those devices the array sc_ac2q will map multiple 322 * priorities to fewer hardware queues (typically all to one 323 * hardware queue). 324 */ 325 struct ath_txq { 326 struct ath_softc *axq_softc; /* Needed for scheduling */ 327 u_int axq_qnum; /* hardware q number */ 328 #define ATH_TXQ_SWQ (HAL_NUM_TX_QUEUES+1) /* qnum for s/w only queue */ 329 u_int axq_ac; /* WME AC */ 330 u_int axq_flags; 331 //#define ATH_TXQ_PUTPENDING 0x0001 /* ath_hal_puttxbuf pending */ 332 #define ATH_TXQ_PUTRUNNING 0x0002 /* ath_hal_puttxbuf has been called */ 333 u_int axq_depth; /* queue depth (stat only) */ 334 u_int axq_aggr_depth; /* how many aggregates are queued */ 335 u_int axq_intrcnt; /* interrupt count */ 336 u_int32_t *axq_link; /* link ptr in last TX desc */ 337 TAILQ_HEAD(axq_q_s, ath_buf) axq_q; /* transmit queue */ 338 struct mtx axq_lock; /* lock on q and link */ 339 340 /* 341 * This is the FIFO staging buffer when doing EDMA. 342 * 343 * For legacy chips, we just push the head pointer to 344 * the hardware and we ignore this list. 345 * 346 * For EDMA, the staging buffer is treated as normal; 347 * when it's time to push a list of frames to the hardware 348 * we move that list here and we stamp buffers with 349 * flags to identify the beginning/end of that particular 350 * FIFO entry. 351 */ 352 struct { 353 TAILQ_HEAD(axq_q_f_s, ath_buf) axq_q; 354 u_int axq_depth; 355 } fifo; 356 u_int axq_fifo_depth; /* depth of FIFO frames */ 357 358 /* 359 * XXX the holdingbf field is protected by the TXBUF lock 360 * for now, NOT the TXQ lock. 361 * 362 * Architecturally, it would likely be better to move 363 * the holdingbf field to a separate array in ath_softc 364 * just to highlight that it's not protected by the normal 365 * TX path lock. 366 */ 367 struct ath_buf *axq_holdingbf; /* holding TX buffer */ 368 char axq_name[12]; /* e.g. "ath0_txq4" */ 369 370 /* Per-TID traffic queue for software -> hardware TX */ 371 /* 372 * This is protected by the general TX path lock, not (for now) 373 * by the TXQ lock. 374 */ 375 TAILQ_HEAD(axq_t_s,ath_tid) axq_tidq; 376 }; 377 378 #define ATH_TXQ_LOCK_INIT(_sc, _tq) do { \ 379 snprintf((_tq)->axq_name, sizeof((_tq)->axq_name), "%s_txq%u", \ 380 device_get_nameunit((_sc)->sc_dev), (_tq)->axq_qnum); \ 381 mtx_init(&(_tq)->axq_lock, (_tq)->axq_name, NULL, MTX_DEF); \ 382 } while (0) 383 #define ATH_TXQ_LOCK_DESTROY(_tq) mtx_destroy(&(_tq)->axq_lock) 384 #define ATH_TXQ_LOCK(_tq) mtx_lock(&(_tq)->axq_lock) 385 #define ATH_TXQ_UNLOCK(_tq) mtx_unlock(&(_tq)->axq_lock) 386 #define ATH_TXQ_LOCK_ASSERT(_tq) mtx_assert(&(_tq)->axq_lock, MA_OWNED) 387 #define ATH_TXQ_UNLOCK_ASSERT(_tq) mtx_assert(&(_tq)->axq_lock, \ 388 MA_NOTOWNED) 389 390 391 #define ATH_NODE_LOCK(_an) mtx_lock(&(_an)->an_mtx) 392 #define ATH_NODE_UNLOCK(_an) mtx_unlock(&(_an)->an_mtx) 393 #define ATH_NODE_LOCK_ASSERT(_an) mtx_assert(&(_an)->an_mtx, MA_OWNED) 394 #define ATH_NODE_UNLOCK_ASSERT(_an) mtx_assert(&(_an)->an_mtx, \ 395 MA_NOTOWNED) 396 397 /* 398 * These are for the hardware queue. 399 */ 400 #define ATH_TXQ_INSERT_HEAD(_tq, _elm, _field) do { \ 401 TAILQ_INSERT_HEAD(&(_tq)->axq_q, (_elm), _field); \ 402 (_tq)->axq_depth++; \ 403 } while (0) 404 #define ATH_TXQ_INSERT_TAIL(_tq, _elm, _field) do { \ 405 TAILQ_INSERT_TAIL(&(_tq)->axq_q, (_elm), _field); \ 406 (_tq)->axq_depth++; \ 407 } while (0) 408 #define ATH_TXQ_REMOVE(_tq, _elm, _field) do { \ 409 TAILQ_REMOVE(&(_tq)->axq_q, _elm, _field); \ 410 (_tq)->axq_depth--; \ 411 } while (0) 412 #define ATH_TXQ_FIRST(_tq) TAILQ_FIRST(&(_tq)->axq_q) 413 #define ATH_TXQ_LAST(_tq, _field) TAILQ_LAST(&(_tq)->axq_q, _field) 414 415 /* 416 * These are for the TID software queue. 417 */ 418 #define ATH_TID_INSERT_HEAD(_tq, _elm, _field) do { \ 419 TAILQ_INSERT_HEAD(&(_tq)->tid_q, (_elm), _field); \ 420 (_tq)->axq_depth++; \ 421 (_tq)->an->an_swq_depth++; \ 422 } while (0) 423 #define ATH_TID_INSERT_TAIL(_tq, _elm, _field) do { \ 424 TAILQ_INSERT_TAIL(&(_tq)->tid_q, (_elm), _field); \ 425 (_tq)->axq_depth++; \ 426 (_tq)->an->an_swq_depth++; \ 427 } while (0) 428 #define ATH_TID_REMOVE(_tq, _elm, _field) do { \ 429 TAILQ_REMOVE(&(_tq)->tid_q, _elm, _field); \ 430 (_tq)->axq_depth--; \ 431 (_tq)->an->an_swq_depth--; \ 432 } while (0) 433 #define ATH_TID_FIRST(_tq) TAILQ_FIRST(&(_tq)->tid_q) 434 #define ATH_TID_LAST(_tq, _field) TAILQ_LAST(&(_tq)->tid_q, _field) 435 436 /* 437 * These are for the TID filtered frame queue 438 */ 439 #define ATH_TID_FILT_INSERT_HEAD(_tq, _elm, _field) do { \ 440 TAILQ_INSERT_HEAD(&(_tq)->filtq.tid_q, (_elm), _field); \ 441 (_tq)->axq_depth++; \ 442 (_tq)->an->an_swq_depth++; \ 443 } while (0) 444 #define ATH_TID_FILT_INSERT_TAIL(_tq, _elm, _field) do { \ 445 TAILQ_INSERT_TAIL(&(_tq)->filtq.tid_q, (_elm), _field); \ 446 (_tq)->axq_depth++; \ 447 (_tq)->an->an_swq_depth++; \ 448 } while (0) 449 #define ATH_TID_FILT_REMOVE(_tq, _elm, _field) do { \ 450 TAILQ_REMOVE(&(_tq)->filtq.tid_q, _elm, _field); \ 451 (_tq)->axq_depth--; \ 452 (_tq)->an->an_swq_depth--; \ 453 } while (0) 454 #define ATH_TID_FILT_FIRST(_tq) TAILQ_FIRST(&(_tq)->filtq.tid_q) 455 #define ATH_TID_FILT_LAST(_tq, _field) TAILQ_LAST(&(_tq)->filtq.tid_q,_field) 456 457 struct ath_vap { 458 struct ieee80211vap av_vap; /* base class */ 459 int av_bslot; /* beacon slot index */ 460 struct ath_buf *av_bcbuf; /* beacon buffer */ 461 struct ieee80211_beacon_offsets av_boff;/* dynamic update state */ 462 struct ath_txq av_mcastq; /* buffered mcast s/w queue */ 463 464 void (*av_recv_mgmt)(struct ieee80211_node *, 465 struct mbuf *, int, int, int); 466 int (*av_newstate)(struct ieee80211vap *, 467 enum ieee80211_state, int); 468 void (*av_bmiss)(struct ieee80211vap *); 469 void (*av_node_ps)(struct ieee80211_node *, int); 470 int (*av_set_tim)(struct ieee80211_node *, int); 471 void (*av_recv_pspoll)(struct ieee80211_node *, 472 struct mbuf *); 473 }; 474 #define ATH_VAP(vap) ((struct ath_vap *)(vap)) 475 476 struct taskqueue; 477 struct ath_tx99; 478 479 /* 480 * Whether to reset the TX/RX queue with or without 481 * a queue flush. 482 */ 483 typedef enum { 484 ATH_RESET_DEFAULT = 0, 485 ATH_RESET_NOLOSS = 1, 486 ATH_RESET_FULL = 2, 487 } ATH_RESET_TYPE; 488 489 struct ath_rx_methods { 490 void (*recv_sched_queue)(struct ath_softc *sc, 491 HAL_RX_QUEUE q, int dosched); 492 void (*recv_sched)(struct ath_softc *sc, int dosched); 493 void (*recv_stop)(struct ath_softc *sc, int dodelay); 494 int (*recv_start)(struct ath_softc *sc); 495 void (*recv_flush)(struct ath_softc *sc); 496 void (*recv_tasklet)(void *arg, int npending); 497 int (*recv_rxbuf_init)(struct ath_softc *sc, 498 struct ath_buf *bf); 499 int (*recv_setup)(struct ath_softc *sc); 500 int (*recv_teardown)(struct ath_softc *sc); 501 }; 502 503 /* 504 * Represent the current state of the RX FIFO. 505 */ 506 struct ath_rx_edma { 507 struct ath_buf **m_fifo; 508 int m_fifolen; 509 int m_fifo_head; 510 int m_fifo_tail; 511 int m_fifo_depth; 512 struct mbuf *m_rxpending; 513 }; 514 515 struct ath_tx_edma_fifo { 516 struct ath_buf **m_fifo; 517 int m_fifolen; 518 int m_fifo_head; 519 int m_fifo_tail; 520 int m_fifo_depth; 521 }; 522 523 struct ath_tx_methods { 524 int (*xmit_setup)(struct ath_softc *sc); 525 int (*xmit_teardown)(struct ath_softc *sc); 526 void (*xmit_attach_comp_func)(struct ath_softc *sc); 527 528 void (*xmit_dma_restart)(struct ath_softc *sc, 529 struct ath_txq *txq); 530 void (*xmit_handoff)(struct ath_softc *sc, 531 struct ath_txq *txq, struct ath_buf *bf); 532 void (*xmit_drain)(struct ath_softc *sc, 533 ATH_RESET_TYPE reset_type); 534 }; 535 536 struct ath_softc { 537 struct ifnet *sc_ifp; /* interface common */ 538 struct ath_stats sc_stats; /* interface statistics */ 539 struct ath_tx_aggr_stats sc_aggr_stats; 540 struct ath_intr_stats sc_intr_stats; 541 uint64_t sc_debug; 542 uint64_t sc_ktrdebug; 543 int sc_nvaps; /* # vaps */ 544 int sc_nstavaps; /* # station vaps */ 545 int sc_nmeshvaps; /* # mbss vaps */ 546 u_int8_t sc_hwbssidmask[IEEE80211_ADDR_LEN]; 547 u_int8_t sc_nbssid0; /* # vap's using base mac */ 548 uint32_t sc_bssidmask; /* bssid mask */ 549 550 struct ath_rx_methods sc_rx; 551 struct ath_rx_edma sc_rxedma[HAL_NUM_RX_QUEUES]; /* HP/LP queues */ 552 ath_bufhead sc_rx_rxlist[HAL_NUM_RX_QUEUES]; /* deferred RX completion */ 553 struct ath_tx_methods sc_tx; 554 struct ath_tx_edma_fifo sc_txedma[HAL_NUM_TX_QUEUES]; 555 556 /* 557 * This is (currently) protected by the TX queue lock; 558 * it should migrate to a separate lock later 559 * so as to minimise contention. 560 */ 561 ath_bufhead sc_txbuf_list; 562 563 int sc_rx_statuslen; 564 int sc_tx_desclen; 565 int sc_tx_statuslen; 566 int sc_tx_nmaps; /* Number of TX maps */ 567 int sc_edma_bufsize; 568 569 void (*sc_node_cleanup)(struct ieee80211_node *); 570 void (*sc_node_free)(struct ieee80211_node *); 571 device_t sc_dev; 572 HAL_BUS_TAG sc_st; /* bus space tag */ 573 HAL_BUS_HANDLE sc_sh; /* bus space handle */ 574 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 575 struct mtx sc_mtx; /* master lock (recursive) */ 576 struct mtx sc_pcu_mtx; /* PCU access mutex */ 577 char sc_pcu_mtx_name[32]; 578 struct mtx sc_rx_mtx; /* RX access mutex */ 579 char sc_rx_mtx_name[32]; 580 struct mtx sc_tx_mtx; /* TX handling/comp mutex */ 581 char sc_tx_mtx_name[32]; 582 struct mtx sc_tx_ic_mtx; /* TX queue mutex */ 583 char sc_tx_ic_mtx_name[32]; 584 struct taskqueue *sc_tq; /* private task queue */ 585 struct ath_hal *sc_ah; /* Atheros HAL */ 586 struct ath_ratectrl *sc_rc; /* tx rate control support */ 587 struct ath_tx99 *sc_tx99; /* tx99 adjunct state */ 588 void (*sc_setdefantenna)(struct ath_softc *, u_int); 589 590 /* 591 * First set of flags. 592 */ 593 uint32_t sc_invalid : 1,/* disable hardware accesses */ 594 sc_mrretry : 1,/* multi-rate retry support */ 595 sc_mrrprot : 1,/* MRR + protection support */ 596 sc_softled : 1,/* enable LED gpio status */ 597 sc_hardled : 1,/* enable MAC LED status */ 598 sc_splitmic : 1,/* split TKIP MIC keys */ 599 sc_needmib : 1,/* enable MIB stats intr */ 600 sc_diversity: 1,/* enable rx diversity */ 601 sc_hasveol : 1,/* tx VEOL support */ 602 sc_ledstate : 1,/* LED on/off state */ 603 sc_blinking : 1,/* LED blink operation active */ 604 sc_mcastkey : 1,/* mcast key cache search */ 605 sc_scanning : 1,/* scanning active */ 606 sc_syncbeacon:1,/* sync/resync beacon timers */ 607 sc_hasclrkey: 1,/* CLR key supported */ 608 sc_xchanmode: 1,/* extended channel mode */ 609 sc_outdoor : 1,/* outdoor operation */ 610 sc_dturbo : 1,/* dynamic turbo in use */ 611 sc_hasbmask : 1,/* bssid mask support */ 612 sc_hasbmatch: 1,/* bssid match disable support*/ 613 sc_hastsfadd: 1,/* tsf adjust support */ 614 sc_beacons : 1,/* beacons running */ 615 sc_swbmiss : 1,/* sta mode using sw bmiss */ 616 sc_stagbeacons:1,/* use staggered beacons */ 617 sc_wmetkipmic:1,/* can do WME+TKIP MIC */ 618 sc_resume_up: 1,/* on resume, start all vaps */ 619 sc_tdma : 1,/* TDMA in use */ 620 sc_setcca : 1,/* set/clr CCA with TDMA */ 621 sc_resetcal : 1,/* reset cal state next trip */ 622 sc_rxslink : 1,/* do self-linked final descriptor */ 623 sc_rxtsf32 : 1,/* RX dec TSF is 32 bits */ 624 sc_isedma : 1;/* supports EDMA */ 625 626 /* 627 * Second set of flags. 628 */ 629 u_int32_t sc_use_ent : 1, 630 sc_rx_stbc : 1, 631 sc_tx_stbc : 1, 632 sc_hasenforcetxop : 1, /* support enforce TxOP */ 633 sc_hasdivcomb : 1, /* RX diversity combining */ 634 sc_rx_lnamixer : 1; /* RX using LNA mixing */ 635 636 int sc_cabq_enable; /* Enable cabq transmission */ 637 638 /* 639 * Enterprise mode configuration for AR9380 and later chipsets. 640 */ 641 uint32_t sc_ent_cfg; 642 643 uint32_t sc_eerd; /* regdomain from EEPROM */ 644 uint32_t sc_eecc; /* country code from EEPROM */ 645 /* rate tables */ 646 const HAL_RATE_TABLE *sc_rates[IEEE80211_MODE_MAX]; 647 const HAL_RATE_TABLE *sc_currates; /* current rate table */ 648 enum ieee80211_phymode sc_curmode; /* current phy mode */ 649 HAL_OPMODE sc_opmode; /* current operating mode */ 650 u_int16_t sc_curtxpow; /* current tx power limit */ 651 u_int16_t sc_curaid; /* current association id */ 652 struct ieee80211_channel *sc_curchan; /* current installed channel */ 653 u_int8_t sc_curbssid[IEEE80211_ADDR_LEN]; 654 u_int8_t sc_rixmap[256]; /* IEEE to h/w rate table ix */ 655 struct { 656 u_int8_t ieeerate; /* IEEE rate */ 657 u_int8_t rxflags; /* radiotap rx flags */ 658 u_int8_t txflags; /* radiotap tx flags */ 659 u_int16_t ledon; /* softled on time */ 660 u_int16_t ledoff; /* softled off time */ 661 } sc_hwmap[32]; /* h/w rate ix mappings */ 662 u_int8_t sc_protrix; /* protection rate index */ 663 u_int8_t sc_lastdatarix; /* last data frame rate index */ 664 u_int sc_mcastrate; /* ieee rate for mcastrateix */ 665 u_int sc_fftxqmin; /* min frames before staging */ 666 u_int sc_fftxqmax; /* max frames before drop */ 667 u_int sc_txantenna; /* tx antenna (fixed or auto) */ 668 669 HAL_INT sc_imask; /* interrupt mask copy */ 670 671 /* 672 * These are modified in the interrupt handler as well as 673 * the task queues and other contexts. Thus these must be 674 * protected by a mutex, or they could clash. 675 * 676 * For now, access to these is behind the ATH_LOCK, 677 * just to save time. 678 */ 679 uint32_t sc_txq_active; /* bitmap of active TXQs */ 680 uint32_t sc_kickpcu; /* whether to kick the PCU */ 681 uint32_t sc_rxproc_cnt; /* In RX processing */ 682 uint32_t sc_txproc_cnt; /* In TX processing */ 683 uint32_t sc_txstart_cnt; /* In TX output (raw/start) */ 684 uint32_t sc_inreset_cnt; /* In active reset/chanchange */ 685 uint32_t sc_txrx_cnt; /* refcount on stop/start'ing TX */ 686 uint32_t sc_intr_cnt; /* refcount on interrupt handling */ 687 688 u_int sc_keymax; /* size of key cache */ 689 u_int8_t sc_keymap[ATH_KEYBYTES];/* key use bit map */ 690 691 /* 692 * Software based LED blinking 693 */ 694 u_int sc_ledpin; /* GPIO pin for driving LED */ 695 u_int sc_ledon; /* pin setting for LED on */ 696 u_int sc_ledidle; /* idle polling interval */ 697 int sc_ledevent; /* time of last LED event */ 698 u_int8_t sc_txrix; /* current tx rate for LED */ 699 u_int16_t sc_ledoff; /* off time for current blink */ 700 struct callout sc_ledtimer; /* led off timer */ 701 702 /* 703 * Hardware based LED blinking 704 */ 705 int sc_led_pwr_pin; /* MAC power LED GPIO pin */ 706 int sc_led_net_pin; /* MAC network LED GPIO pin */ 707 708 u_int sc_rfsilentpin; /* GPIO pin for rfkill int */ 709 u_int sc_rfsilentpol; /* pin setting for rfkill on */ 710 711 struct ath_descdma sc_rxdma; /* RX descriptors */ 712 ath_bufhead sc_rxbuf; /* receive buffer */ 713 u_int32_t *sc_rxlink; /* link ptr in last RX desc */ 714 struct task sc_rxtask; /* rx int processing */ 715 u_int8_t sc_defant; /* current default antenna */ 716 u_int8_t sc_rxotherant; /* rx's on non-default antenna*/ 717 u_int64_t sc_lastrx; /* tsf at last rx'd frame */ 718 struct ath_rx_status *sc_lastrs; /* h/w status of last rx */ 719 struct ath_rx_radiotap_header sc_rx_th; 720 int sc_rx_th_len; 721 u_int sc_monpass; /* frames to pass in mon.mode */ 722 723 struct ath_descdma sc_txdma; /* TX descriptors */ 724 uint16_t sc_txbuf_descid; 725 ath_bufhead sc_txbuf; /* transmit buffer */ 726 int sc_txbuf_cnt; /* how many buffers avail */ 727 struct ath_descdma sc_txdma_mgmt; /* mgmt TX descriptors */ 728 ath_bufhead sc_txbuf_mgmt; /* mgmt transmit buffer */ 729 struct ath_descdma sc_txsdma; /* EDMA TX status desc's */ 730 struct mtx sc_txbuflock; /* txbuf lock */ 731 char sc_txname[12]; /* e.g. "ath0_buf" */ 732 u_int sc_txqsetup; /* h/w queues setup */ 733 u_int sc_txintrperiod;/* tx interrupt batching */ 734 struct ath_txq sc_txq[HAL_NUM_TX_QUEUES]; 735 struct ath_txq *sc_ac2q[5]; /* WME AC -> h/w q map */ 736 struct task sc_txtask; /* tx int processing */ 737 struct task sc_txqtask; /* tx proc processing */ 738 739 struct ath_descdma sc_txcompdma; /* TX EDMA completion */ 740 struct mtx sc_txcomplock; /* TX EDMA completion lock */ 741 char sc_txcompname[12]; /* eg ath0_txcomp */ 742 743 int sc_wd_timer; /* count down for wd timer */ 744 struct callout sc_wd_ch; /* tx watchdog timer */ 745 struct ath_tx_radiotap_header sc_tx_th; 746 int sc_tx_th_len; 747 748 struct ath_descdma sc_bdma; /* beacon descriptors */ 749 ath_bufhead sc_bbuf; /* beacon buffers */ 750 u_int sc_bhalq; /* HAL q for outgoing beacons */ 751 u_int sc_bmisscount; /* missed beacon transmits */ 752 u_int32_t sc_ant_tx[8]; /* recent tx frames/antenna */ 753 struct ath_txq *sc_cabq; /* tx q for cab frames */ 754 struct task sc_bmisstask; /* bmiss int processing */ 755 struct task sc_bstucktask; /* stuck beacon processing */ 756 struct task sc_resettask; /* interface reset task */ 757 struct task sc_fataltask; /* fatal task */ 758 enum { 759 OK, /* no change needed */ 760 UPDATE, /* update pending */ 761 COMMIT /* beacon sent, commit change */ 762 } sc_updateslot; /* slot time update fsm */ 763 int sc_slotupdate; /* slot to advance fsm */ 764 struct ieee80211vap *sc_bslot[ATH_BCBUF]; 765 int sc_nbcnvaps; /* # vaps with beacons */ 766 767 struct callout sc_cal_ch; /* callout handle for cals */ 768 int sc_lastlongcal; /* last long cal completed */ 769 int sc_lastcalreset;/* last cal reset done */ 770 int sc_lastani; /* last ANI poll */ 771 int sc_lastshortcal; /* last short calibration */ 772 HAL_BOOL sc_doresetcal; /* Yes, we're doing a reset cal atm */ 773 HAL_NODE_STATS sc_halstats; /* station-mode rssi stats */ 774 u_int sc_tdmadbaprep; /* TDMA DBA prep time */ 775 u_int sc_tdmaswbaprep;/* TDMA SWBA prep time */ 776 u_int sc_tdmaswba; /* TDMA SWBA counter */ 777 u_int32_t sc_tdmabintval; /* TDMA beacon interval (TU) */ 778 u_int32_t sc_tdmaguard; /* TDMA guard time (usec) */ 779 u_int sc_tdmaslotlen; /* TDMA slot length (usec) */ 780 u_int32_t sc_avgtsfdeltap;/* TDMA slot adjust (+) */ 781 u_int32_t sc_avgtsfdeltam;/* TDMA slot adjust (-) */ 782 uint16_t *sc_eepromdata; /* Local eeprom data, if AR9100 */ 783 uint32_t sc_txchainmask; /* hardware TX chainmask */ 784 uint32_t sc_rxchainmask; /* hardware RX chainmask */ 785 uint32_t sc_cur_txchainmask; /* currently configured TX chainmask */ 786 uint32_t sc_cur_rxchainmask; /* currently configured RX chainmask */ 787 uint32_t sc_rts_aggr_limit; /* TX limit on RTS aggregates */ 788 int sc_aggr_limit; /* TX limit on all aggregates */ 789 int sc_delim_min_pad; /* Minimum delimiter count */ 790 791 /* Queue limits */ 792 793 /* 794 * To avoid queue starvation in congested conditions, 795 * these parameters tune the maximum number of frames 796 * queued to the data/mcastq before they're dropped. 797 * 798 * This is to prevent: 799 * + a single destination overwhelming everything, including 800 * management/multicast frames; 801 * + multicast frames overwhelming everything (when the 802 * air is sufficiently busy that cabq can't drain.) 803 * + A node in powersave shouldn't be allowed to exhaust 804 * all available mbufs; 805 * 806 * These implement: 807 * + data_minfree is the maximum number of free buffers 808 * overall to successfully allow a data frame. 809 * 810 * + mcastq_maxdepth is the maximum depth allowed of the cabq. 811 */ 812 int sc_txq_node_maxdepth; 813 int sc_txq_data_minfree; 814 int sc_txq_mcastq_maxdepth; 815 int sc_txq_node_psq_maxdepth; 816 817 /* 818 * Software queue twiddles 819 * 820 * hwq_limit_nonaggr: 821 * when to begin limiting non-aggregate frames to the 822 * hardware queue, regardless of the TID. 823 * hwq_limit_aggr: 824 * when to begin limiting A-MPDU frames to the 825 * hardware queue, regardless of the TID. 826 * tid_hwq_lo: how low the per-TID hwq count has to be before the 827 * TID will be scheduled again 828 * tid_hwq_hi: how many frames to queue to the HWQ before the TID 829 * stops being scheduled. 830 */ 831 int sc_hwq_limit_nonaggr; 832 int sc_hwq_limit_aggr; 833 int sc_tid_hwq_lo; 834 int sc_tid_hwq_hi; 835 836 /* DFS related state */ 837 void *sc_dfs; /* Used by an optional DFS module */ 838 int sc_dodfs; /* Whether to enable DFS rx filter bits */ 839 struct task sc_dfstask; /* DFS processing task */ 840 841 /* Spectral related state */ 842 void *sc_spectral; 843 int sc_dospectral; 844 845 /* LNA diversity related state */ 846 void *sc_lna_div; 847 int sc_dolnadiv; 848 849 /* ALQ */ 850 #ifdef ATH_DEBUG_ALQ 851 struct if_ath_alq sc_alq; 852 #endif 853 854 /* TX AMPDU handling */ 855 int (*sc_addba_request)(struct ieee80211_node *, 856 struct ieee80211_tx_ampdu *, int, int, int); 857 int (*sc_addba_response)(struct ieee80211_node *, 858 struct ieee80211_tx_ampdu *, int, int, int); 859 void (*sc_addba_stop)(struct ieee80211_node *, 860 struct ieee80211_tx_ampdu *); 861 void (*sc_addba_response_timeout) 862 (struct ieee80211_node *, 863 struct ieee80211_tx_ampdu *); 864 void (*sc_bar_response)(struct ieee80211_node *ni, 865 struct ieee80211_tx_ampdu *tap, 866 int status); 867 }; 868 869 #define ATH_LOCK_INIT(_sc) \ 870 mtx_init(&(_sc)->sc_mtx, device_get_nameunit((_sc)->sc_dev), \ 871 NULL, MTX_DEF | MTX_RECURSE) 872 #define ATH_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_mtx) 873 #define ATH_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) 874 #define ATH_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) 875 #define ATH_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED) 876 #define ATH_UNLOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_NOTOWNED) 877 878 /* 879 * The TX lock is non-reentrant and serialises the TX frame send 880 * and completion operations. 881 */ 882 #define ATH_TX_LOCK_INIT(_sc) do {\ 883 snprintf((_sc)->sc_tx_mtx_name, \ 884 sizeof((_sc)->sc_tx_mtx_name), \ 885 "%s TX lock", \ 886 device_get_nameunit((_sc)->sc_dev)); \ 887 mtx_init(&(_sc)->sc_tx_mtx, (_sc)->sc_tx_mtx_name, \ 888 NULL, MTX_DEF); \ 889 } while (0) 890 #define ATH_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_tx_mtx) 891 #define ATH_TX_LOCK(_sc) mtx_lock(&(_sc)->sc_tx_mtx) 892 #define ATH_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_tx_mtx) 893 #define ATH_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_tx_mtx, \ 894 MA_OWNED) 895 #define ATH_TX_UNLOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_tx_mtx, \ 896 MA_NOTOWNED) 897 #define ATH_TX_TRYLOCK(_sc) (mtx_owned(&(_sc)->sc_tx_mtx) != 0 && \ 898 mtx_trylock(&(_sc)->sc_tx_mtx)) 899 900 /* 901 * The IC TX lock is non-reentrant and serialises packet queuing from 902 * the upper layers. 903 */ 904 #define ATH_TX_IC_LOCK_INIT(_sc) do {\ 905 snprintf((_sc)->sc_tx_ic_mtx_name, \ 906 sizeof((_sc)->sc_tx_ic_mtx_name), \ 907 "%s IC TX lock", \ 908 device_get_nameunit((_sc)->sc_dev)); \ 909 mtx_init(&(_sc)->sc_tx_ic_mtx, (_sc)->sc_tx_ic_mtx_name, \ 910 NULL, MTX_DEF); \ 911 } while (0) 912 #define ATH_TX_IC_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_tx_ic_mtx) 913 #define ATH_TX_IC_LOCK(_sc) mtx_lock(&(_sc)->sc_tx_ic_mtx) 914 #define ATH_TX_IC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_tx_ic_mtx) 915 #define ATH_TX_IC_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_tx_ic_mtx, \ 916 MA_OWNED) 917 #define ATH_TX_IC_UNLOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_tx_ic_mtx, \ 918 MA_NOTOWNED) 919 920 /* 921 * The PCU lock is non-recursive and should be treated as a spinlock. 922 * Although currently the interrupt code is run in netisr context and 923 * doesn't require this, this may change in the future. 924 * Please keep this in mind when protecting certain code paths 925 * with the PCU lock. 926 * 927 * The PCU lock is used to serialise access to the PCU so things such 928 * as TX, RX, state change (eg channel change), channel reset and updates 929 * from interrupt context (eg kickpcu, txqactive bits) do not clash. 930 * 931 * Although the current single-thread taskqueue mechanism protects the 932 * majority of these situations by simply serialising them, there are 933 * a few others which occur at the same time. These include the TX path 934 * (which only acquires ATH_LOCK when recycling buffers to the free list), 935 * ath_set_channel, the channel scanning API and perhaps quite a bit more. 936 */ 937 #define ATH_PCU_LOCK_INIT(_sc) do {\ 938 snprintf((_sc)->sc_pcu_mtx_name, \ 939 sizeof((_sc)->sc_pcu_mtx_name), \ 940 "%s PCU lock", \ 941 device_get_nameunit((_sc)->sc_dev)); \ 942 mtx_init(&(_sc)->sc_pcu_mtx, (_sc)->sc_pcu_mtx_name, \ 943 NULL, MTX_DEF); \ 944 } while (0) 945 #define ATH_PCU_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_pcu_mtx) 946 #define ATH_PCU_LOCK(_sc) mtx_lock(&(_sc)->sc_pcu_mtx) 947 #define ATH_PCU_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_pcu_mtx) 948 #define ATH_PCU_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_pcu_mtx, \ 949 MA_OWNED) 950 #define ATH_PCU_UNLOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_pcu_mtx, \ 951 MA_NOTOWNED) 952 953 /* 954 * The RX lock is primarily a(nother) workaround to ensure that the 955 * RX FIFO/list isn't modified by various execution paths. 956 * Even though RX occurs in a single context (the ath taskqueue), the 957 * RX path can be executed via various reset/channel change paths. 958 */ 959 #define ATH_RX_LOCK_INIT(_sc) do {\ 960 snprintf((_sc)->sc_rx_mtx_name, \ 961 sizeof((_sc)->sc_rx_mtx_name), \ 962 "%s RX lock", \ 963 device_get_nameunit((_sc)->sc_dev)); \ 964 mtx_init(&(_sc)->sc_rx_mtx, (_sc)->sc_rx_mtx_name, \ 965 NULL, MTX_DEF); \ 966 } while (0) 967 #define ATH_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_rx_mtx) 968 #define ATH_RX_LOCK(_sc) mtx_lock(&(_sc)->sc_rx_mtx) 969 #define ATH_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_rx_mtx) 970 #define ATH_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_rx_mtx, \ 971 MA_OWNED) 972 #define ATH_RX_UNLOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_rx_mtx, \ 973 MA_NOTOWNED) 974 975 #define ATH_TXQ_SETUP(sc, i) ((sc)->sc_txqsetup & (1<<i)) 976 977 #define ATH_TXBUF_LOCK_INIT(_sc) do { \ 978 snprintf((_sc)->sc_txname, sizeof((_sc)->sc_txname), "%s_buf", \ 979 device_get_nameunit((_sc)->sc_dev)); \ 980 mtx_init(&(_sc)->sc_txbuflock, (_sc)->sc_txname, NULL, MTX_DEF); \ 981 } while (0) 982 #define ATH_TXBUF_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_txbuflock) 983 #define ATH_TXBUF_LOCK(_sc) mtx_lock(&(_sc)->sc_txbuflock) 984 #define ATH_TXBUF_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_txbuflock) 985 #define ATH_TXBUF_LOCK_ASSERT(_sc) \ 986 mtx_assert(&(_sc)->sc_txbuflock, MA_OWNED) 987 #define ATH_TXBUF_UNLOCK_ASSERT(_sc) \ 988 mtx_assert(&(_sc)->sc_txbuflock, MA_NOTOWNED) 989 990 #define ATH_TXSTATUS_LOCK_INIT(_sc) do { \ 991 snprintf((_sc)->sc_txcompname, sizeof((_sc)->sc_txcompname), \ 992 "%s_buf", \ 993 device_get_nameunit((_sc)->sc_dev)); \ 994 mtx_init(&(_sc)->sc_txcomplock, (_sc)->sc_txcompname, NULL, \ 995 MTX_DEF); \ 996 } while (0) 997 #define ATH_TXSTATUS_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_txcomplock) 998 #define ATH_TXSTATUS_LOCK(_sc) mtx_lock(&(_sc)->sc_txcomplock) 999 #define ATH_TXSTATUS_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_txcomplock) 1000 #define ATH_TXSTATUS_LOCK_ASSERT(_sc) \ 1001 mtx_assert(&(_sc)->sc_txcomplock, MA_OWNED) 1002 1003 int ath_attach(u_int16_t, struct ath_softc *); 1004 int ath_detach(struct ath_softc *); 1005 void ath_resume(struct ath_softc *); 1006 void ath_suspend(struct ath_softc *); 1007 void ath_shutdown(struct ath_softc *); 1008 void ath_intr(void *); 1009 1010 /* 1011 * HAL definitions to comply with local coding convention. 1012 */ 1013 #define ath_hal_detach(_ah) \ 1014 ((*(_ah)->ah_detach)((_ah))) 1015 #define ath_hal_reset(_ah, _opmode, _chan, _outdoor, _pstatus) \ 1016 ((*(_ah)->ah_reset)((_ah), (_opmode), (_chan), (_outdoor), (_pstatus))) 1017 #define ath_hal_macversion(_ah) \ 1018 (((_ah)->ah_macVersion << 4) | ((_ah)->ah_macRev)) 1019 #define ath_hal_getratetable(_ah, _mode) \ 1020 ((*(_ah)->ah_getRateTable)((_ah), (_mode))) 1021 #define ath_hal_getmac(_ah, _mac) \ 1022 ((*(_ah)->ah_getMacAddress)((_ah), (_mac))) 1023 #define ath_hal_setmac(_ah, _mac) \ 1024 ((*(_ah)->ah_setMacAddress)((_ah), (_mac))) 1025 #define ath_hal_getbssidmask(_ah, _mask) \ 1026 ((*(_ah)->ah_getBssIdMask)((_ah), (_mask))) 1027 #define ath_hal_setbssidmask(_ah, _mask) \ 1028 ((*(_ah)->ah_setBssIdMask)((_ah), (_mask))) 1029 #define ath_hal_intrset(_ah, _mask) \ 1030 ((*(_ah)->ah_setInterrupts)((_ah), (_mask))) 1031 #define ath_hal_intrget(_ah) \ 1032 ((*(_ah)->ah_getInterrupts)((_ah))) 1033 #define ath_hal_intrpend(_ah) \ 1034 ((*(_ah)->ah_isInterruptPending)((_ah))) 1035 #define ath_hal_getisr(_ah, _pmask) \ 1036 ((*(_ah)->ah_getPendingInterrupts)((_ah), (_pmask))) 1037 #define ath_hal_updatetxtriglevel(_ah, _inc) \ 1038 ((*(_ah)->ah_updateTxTrigLevel)((_ah), (_inc))) 1039 #define ath_hal_setpower(_ah, _mode) \ 1040 ((*(_ah)->ah_setPowerMode)((_ah), (_mode), AH_TRUE)) 1041 #define ath_hal_keycachesize(_ah) \ 1042 ((*(_ah)->ah_getKeyCacheSize)((_ah))) 1043 #define ath_hal_keyreset(_ah, _ix) \ 1044 ((*(_ah)->ah_resetKeyCacheEntry)((_ah), (_ix))) 1045 #define ath_hal_keyset(_ah, _ix, _pk, _mac) \ 1046 ((*(_ah)->ah_setKeyCacheEntry)((_ah), (_ix), (_pk), (_mac), AH_FALSE)) 1047 #define ath_hal_keyisvalid(_ah, _ix) \ 1048 (((*(_ah)->ah_isKeyCacheEntryValid)((_ah), (_ix)))) 1049 #define ath_hal_keysetmac(_ah, _ix, _mac) \ 1050 ((*(_ah)->ah_setKeyCacheEntryMac)((_ah), (_ix), (_mac))) 1051 #define ath_hal_getrxfilter(_ah) \ 1052 ((*(_ah)->ah_getRxFilter)((_ah))) 1053 #define ath_hal_setrxfilter(_ah, _filter) \ 1054 ((*(_ah)->ah_setRxFilter)((_ah), (_filter))) 1055 #define ath_hal_setmcastfilter(_ah, _mfilt0, _mfilt1) \ 1056 ((*(_ah)->ah_setMulticastFilter)((_ah), (_mfilt0), (_mfilt1))) 1057 #define ath_hal_waitforbeacon(_ah, _bf) \ 1058 ((*(_ah)->ah_waitForBeaconDone)((_ah), (_bf)->bf_daddr)) 1059 #define ath_hal_putrxbuf(_ah, _bufaddr, _rxq) \ 1060 ((*(_ah)->ah_setRxDP)((_ah), (_bufaddr), (_rxq))) 1061 /* NB: common across all chips */ 1062 #define AR_TSF_L32 0x804c /* MAC local clock lower 32 bits */ 1063 #define ath_hal_gettsf32(_ah) \ 1064 OS_REG_READ(_ah, AR_TSF_L32) 1065 #define ath_hal_gettsf64(_ah) \ 1066 ((*(_ah)->ah_getTsf64)((_ah))) 1067 #define ath_hal_settsf64(_ah, _val) \ 1068 ((*(_ah)->ah_setTsf64)((_ah), (_val))) 1069 #define ath_hal_resettsf(_ah) \ 1070 ((*(_ah)->ah_resetTsf)((_ah))) 1071 #define ath_hal_rxena(_ah) \ 1072 ((*(_ah)->ah_enableReceive)((_ah))) 1073 #define ath_hal_puttxbuf(_ah, _q, _bufaddr) \ 1074 ((*(_ah)->ah_setTxDP)((_ah), (_q), (_bufaddr))) 1075 #define ath_hal_gettxbuf(_ah, _q) \ 1076 ((*(_ah)->ah_getTxDP)((_ah), (_q))) 1077 #define ath_hal_numtxpending(_ah, _q) \ 1078 ((*(_ah)->ah_numTxPending)((_ah), (_q))) 1079 #define ath_hal_getrxbuf(_ah, _rxq) \ 1080 ((*(_ah)->ah_getRxDP)((_ah), (_rxq))) 1081 #define ath_hal_txstart(_ah, _q) \ 1082 ((*(_ah)->ah_startTxDma)((_ah), (_q))) 1083 #define ath_hal_setchannel(_ah, _chan) \ 1084 ((*(_ah)->ah_setChannel)((_ah), (_chan))) 1085 #define ath_hal_calibrate(_ah, _chan, _iqcal) \ 1086 ((*(_ah)->ah_perCalibration)((_ah), (_chan), (_iqcal))) 1087 #define ath_hal_calibrateN(_ah, _chan, _lcal, _isdone) \ 1088 ((*(_ah)->ah_perCalibrationN)((_ah), (_chan), 0x1, (_lcal), (_isdone))) 1089 #define ath_hal_calreset(_ah, _chan) \ 1090 ((*(_ah)->ah_resetCalValid)((_ah), (_chan))) 1091 #define ath_hal_setledstate(_ah, _state) \ 1092 ((*(_ah)->ah_setLedState)((_ah), (_state))) 1093 #define ath_hal_beaconinit(_ah, _nextb, _bperiod) \ 1094 ((*(_ah)->ah_beaconInit)((_ah), (_nextb), (_bperiod))) 1095 #define ath_hal_beaconreset(_ah) \ 1096 ((*(_ah)->ah_resetStationBeaconTimers)((_ah))) 1097 #define ath_hal_beaconsettimers(_ah, _bt) \ 1098 ((*(_ah)->ah_setBeaconTimers)((_ah), (_bt))) 1099 #define ath_hal_beacontimers(_ah, _bs) \ 1100 ((*(_ah)->ah_setStationBeaconTimers)((_ah), (_bs))) 1101 #define ath_hal_getnexttbtt(_ah) \ 1102 ((*(_ah)->ah_getNextTBTT)((_ah))) 1103 #define ath_hal_setassocid(_ah, _bss, _associd) \ 1104 ((*(_ah)->ah_writeAssocid)((_ah), (_bss), (_associd))) 1105 #define ath_hal_phydisable(_ah) \ 1106 ((*(_ah)->ah_phyDisable)((_ah))) 1107 #define ath_hal_setopmode(_ah) \ 1108 ((*(_ah)->ah_setPCUConfig)((_ah))) 1109 #define ath_hal_stoptxdma(_ah, _qnum) \ 1110 ((*(_ah)->ah_stopTxDma)((_ah), (_qnum))) 1111 #define ath_hal_stoppcurecv(_ah) \ 1112 ((*(_ah)->ah_stopPcuReceive)((_ah))) 1113 #define ath_hal_startpcurecv(_ah) \ 1114 ((*(_ah)->ah_startPcuReceive)((_ah))) 1115 #define ath_hal_stopdmarecv(_ah) \ 1116 ((*(_ah)->ah_stopDmaReceive)((_ah))) 1117 #define ath_hal_getdiagstate(_ah, _id, _indata, _insize, _outdata, _outsize) \ 1118 ((*(_ah)->ah_getDiagState)((_ah), (_id), \ 1119 (_indata), (_insize), (_outdata), (_outsize))) 1120 #define ath_hal_getfatalstate(_ah, _outdata, _outsize) \ 1121 ath_hal_getdiagstate(_ah, 29, NULL, 0, (_outdata), _outsize) 1122 #define ath_hal_setuptxqueue(_ah, _type, _irq) \ 1123 ((*(_ah)->ah_setupTxQueue)((_ah), (_type), (_irq))) 1124 #define ath_hal_resettxqueue(_ah, _q) \ 1125 ((*(_ah)->ah_resetTxQueue)((_ah), (_q))) 1126 #define ath_hal_releasetxqueue(_ah, _q) \ 1127 ((*(_ah)->ah_releaseTxQueue)((_ah), (_q))) 1128 #define ath_hal_gettxqueueprops(_ah, _q, _qi) \ 1129 ((*(_ah)->ah_getTxQueueProps)((_ah), (_q), (_qi))) 1130 #define ath_hal_settxqueueprops(_ah, _q, _qi) \ 1131 ((*(_ah)->ah_setTxQueueProps)((_ah), (_q), (_qi))) 1132 /* NB: common across all chips */ 1133 #define AR_Q_TXE 0x0840 /* MAC Transmit Queue enable */ 1134 #define ath_hal_txqenabled(_ah, _qnum) \ 1135 (OS_REG_READ(_ah, AR_Q_TXE) & (1<<(_qnum))) 1136 #define ath_hal_getrfgain(_ah) \ 1137 ((*(_ah)->ah_getRfGain)((_ah))) 1138 #define ath_hal_getdefantenna(_ah) \ 1139 ((*(_ah)->ah_getDefAntenna)((_ah))) 1140 #define ath_hal_setdefantenna(_ah, _ant) \ 1141 ((*(_ah)->ah_setDefAntenna)((_ah), (_ant))) 1142 #define ath_hal_rxmonitor(_ah, _arg, _chan) \ 1143 ((*(_ah)->ah_rxMonitor)((_ah), (_arg), (_chan))) 1144 #define ath_hal_ani_poll(_ah, _chan) \ 1145 ((*(_ah)->ah_aniPoll)((_ah), (_chan))) 1146 #define ath_hal_mibevent(_ah, _stats) \ 1147 ((*(_ah)->ah_procMibEvent)((_ah), (_stats))) 1148 #define ath_hal_setslottime(_ah, _us) \ 1149 ((*(_ah)->ah_setSlotTime)((_ah), (_us))) 1150 #define ath_hal_getslottime(_ah) \ 1151 ((*(_ah)->ah_getSlotTime)((_ah))) 1152 #define ath_hal_setacktimeout(_ah, _us) \ 1153 ((*(_ah)->ah_setAckTimeout)((_ah), (_us))) 1154 #define ath_hal_getacktimeout(_ah) \ 1155 ((*(_ah)->ah_getAckTimeout)((_ah))) 1156 #define ath_hal_setctstimeout(_ah, _us) \ 1157 ((*(_ah)->ah_setCTSTimeout)((_ah), (_us))) 1158 #define ath_hal_getctstimeout(_ah) \ 1159 ((*(_ah)->ah_getCTSTimeout)((_ah))) 1160 #define ath_hal_getcapability(_ah, _cap, _param, _result) \ 1161 ((*(_ah)->ah_getCapability)((_ah), (_cap), (_param), (_result))) 1162 #define ath_hal_setcapability(_ah, _cap, _param, _v, _status) \ 1163 ((*(_ah)->ah_setCapability)((_ah), (_cap), (_param), (_v), (_status))) 1164 #define ath_hal_ciphersupported(_ah, _cipher) \ 1165 (ath_hal_getcapability(_ah, HAL_CAP_CIPHER, _cipher, NULL) == HAL_OK) 1166 #define ath_hal_getregdomain(_ah, _prd) \ 1167 (ath_hal_getcapability(_ah, HAL_CAP_REG_DMN, 0, (_prd)) == HAL_OK) 1168 #define ath_hal_setregdomain(_ah, _rd) \ 1169 ath_hal_setcapability(_ah, HAL_CAP_REG_DMN, 0, _rd, NULL) 1170 #define ath_hal_getcountrycode(_ah, _pcc) \ 1171 (*(_pcc) = (_ah)->ah_countryCode) 1172 #define ath_hal_gettkipmic(_ah) \ 1173 (ath_hal_getcapability(_ah, HAL_CAP_TKIP_MIC, 1, NULL) == HAL_OK) 1174 #define ath_hal_settkipmic(_ah, _v) \ 1175 ath_hal_setcapability(_ah, HAL_CAP_TKIP_MIC, 1, _v, NULL) 1176 #define ath_hal_hastkipsplit(_ah) \ 1177 (ath_hal_getcapability(_ah, HAL_CAP_TKIP_SPLIT, 0, NULL) == HAL_OK) 1178 #define ath_hal_gettkipsplit(_ah) \ 1179 (ath_hal_getcapability(_ah, HAL_CAP_TKIP_SPLIT, 1, NULL) == HAL_OK) 1180 #define ath_hal_settkipsplit(_ah, _v) \ 1181 ath_hal_setcapability(_ah, HAL_CAP_TKIP_SPLIT, 1, _v, NULL) 1182 #define ath_hal_haswmetkipmic(_ah) \ 1183 (ath_hal_getcapability(_ah, HAL_CAP_WME_TKIPMIC, 0, NULL) == HAL_OK) 1184 #define ath_hal_hwphycounters(_ah) \ 1185 (ath_hal_getcapability(_ah, HAL_CAP_PHYCOUNTERS, 0, NULL) == HAL_OK) 1186 #define ath_hal_hasdiversity(_ah) \ 1187 (ath_hal_getcapability(_ah, HAL_CAP_DIVERSITY, 0, NULL) == HAL_OK) 1188 #define ath_hal_getdiversity(_ah) \ 1189 (ath_hal_getcapability(_ah, HAL_CAP_DIVERSITY, 1, NULL) == HAL_OK) 1190 #define ath_hal_setdiversity(_ah, _v) \ 1191 ath_hal_setcapability(_ah, HAL_CAP_DIVERSITY, 1, _v, NULL) 1192 #define ath_hal_getantennaswitch(_ah) \ 1193 ((*(_ah)->ah_getAntennaSwitch)((_ah))) 1194 #define ath_hal_setantennaswitch(_ah, _v) \ 1195 ((*(_ah)->ah_setAntennaSwitch)((_ah), (_v))) 1196 #define ath_hal_getdiag(_ah, _pv) \ 1197 (ath_hal_getcapability(_ah, HAL_CAP_DIAG, 0, _pv) == HAL_OK) 1198 #define ath_hal_setdiag(_ah, _v) \ 1199 ath_hal_setcapability(_ah, HAL_CAP_DIAG, 0, _v, NULL) 1200 #define ath_hal_getnumtxqueues(_ah, _pv) \ 1201 (ath_hal_getcapability(_ah, HAL_CAP_NUM_TXQUEUES, 0, _pv) == HAL_OK) 1202 #define ath_hal_hasveol(_ah) \ 1203 (ath_hal_getcapability(_ah, HAL_CAP_VEOL, 0, NULL) == HAL_OK) 1204 #define ath_hal_hastxpowlimit(_ah) \ 1205 (ath_hal_getcapability(_ah, HAL_CAP_TXPOW, 0, NULL) == HAL_OK) 1206 #define ath_hal_settxpowlimit(_ah, _pow) \ 1207 ((*(_ah)->ah_setTxPowerLimit)((_ah), (_pow))) 1208 #define ath_hal_gettxpowlimit(_ah, _ppow) \ 1209 (ath_hal_getcapability(_ah, HAL_CAP_TXPOW, 1, _ppow) == HAL_OK) 1210 #define ath_hal_getmaxtxpow(_ah, _ppow) \ 1211 (ath_hal_getcapability(_ah, HAL_CAP_TXPOW, 2, _ppow) == HAL_OK) 1212 #define ath_hal_gettpscale(_ah, _scale) \ 1213 (ath_hal_getcapability(_ah, HAL_CAP_TXPOW, 3, _scale) == HAL_OK) 1214 #define ath_hal_settpscale(_ah, _v) \ 1215 ath_hal_setcapability(_ah, HAL_CAP_TXPOW, 3, _v, NULL) 1216 #define ath_hal_hastpc(_ah) \ 1217 (ath_hal_getcapability(_ah, HAL_CAP_TPC, 0, NULL) == HAL_OK) 1218 #define ath_hal_gettpc(_ah) \ 1219 (ath_hal_getcapability(_ah, HAL_CAP_TPC, 1, NULL) == HAL_OK) 1220 #define ath_hal_settpc(_ah, _v) \ 1221 ath_hal_setcapability(_ah, HAL_CAP_TPC, 1, _v, NULL) 1222 #define ath_hal_hasbursting(_ah) \ 1223 (ath_hal_getcapability(_ah, HAL_CAP_BURST, 0, NULL) == HAL_OK) 1224 #define ath_hal_setmcastkeysearch(_ah, _v) \ 1225 ath_hal_setcapability(_ah, HAL_CAP_MCAST_KEYSRCH, 0, _v, NULL) 1226 #define ath_hal_hasmcastkeysearch(_ah) \ 1227 (ath_hal_getcapability(_ah, HAL_CAP_MCAST_KEYSRCH, 0, NULL) == HAL_OK) 1228 #define ath_hal_getmcastkeysearch(_ah) \ 1229 (ath_hal_getcapability(_ah, HAL_CAP_MCAST_KEYSRCH, 1, NULL) == HAL_OK) 1230 #define ath_hal_hasfastframes(_ah) \ 1231 (ath_hal_getcapability(_ah, HAL_CAP_FASTFRAME, 0, NULL) == HAL_OK) 1232 #define ath_hal_hasbssidmask(_ah) \ 1233 (ath_hal_getcapability(_ah, HAL_CAP_BSSIDMASK, 0, NULL) == HAL_OK) 1234 #define ath_hal_hasbssidmatch(_ah) \ 1235 (ath_hal_getcapability(_ah, HAL_CAP_BSSIDMATCH, 0, NULL) == HAL_OK) 1236 #define ath_hal_hastsfadjust(_ah) \ 1237 (ath_hal_getcapability(_ah, HAL_CAP_TSF_ADJUST, 0, NULL) == HAL_OK) 1238 #define ath_hal_gettsfadjust(_ah) \ 1239 (ath_hal_getcapability(_ah, HAL_CAP_TSF_ADJUST, 1, NULL) == HAL_OK) 1240 #define ath_hal_settsfadjust(_ah, _onoff) \ 1241 ath_hal_setcapability(_ah, HAL_CAP_TSF_ADJUST, 1, _onoff, NULL) 1242 #define ath_hal_hasrfsilent(_ah) \ 1243 (ath_hal_getcapability(_ah, HAL_CAP_RFSILENT, 0, NULL) == HAL_OK) 1244 #define ath_hal_getrfkill(_ah) \ 1245 (ath_hal_getcapability(_ah, HAL_CAP_RFSILENT, 1, NULL) == HAL_OK) 1246 #define ath_hal_setrfkill(_ah, _onoff) \ 1247 ath_hal_setcapability(_ah, HAL_CAP_RFSILENT, 1, _onoff, NULL) 1248 #define ath_hal_getrfsilent(_ah, _prfsilent) \ 1249 (ath_hal_getcapability(_ah, HAL_CAP_RFSILENT, 2, _prfsilent) == HAL_OK) 1250 #define ath_hal_setrfsilent(_ah, _rfsilent) \ 1251 ath_hal_setcapability(_ah, HAL_CAP_RFSILENT, 2, _rfsilent, NULL) 1252 #define ath_hal_gettpack(_ah, _ptpack) \ 1253 (ath_hal_getcapability(_ah, HAL_CAP_TPC_ACK, 0, _ptpack) == HAL_OK) 1254 #define ath_hal_settpack(_ah, _tpack) \ 1255 ath_hal_setcapability(_ah, HAL_CAP_TPC_ACK, 0, _tpack, NULL) 1256 #define ath_hal_gettpcts(_ah, _ptpcts) \ 1257 (ath_hal_getcapability(_ah, HAL_CAP_TPC_CTS, 0, _ptpcts) == HAL_OK) 1258 #define ath_hal_settpcts(_ah, _tpcts) \ 1259 ath_hal_setcapability(_ah, HAL_CAP_TPC_CTS, 0, _tpcts, NULL) 1260 #define ath_hal_hasintmit(_ah) \ 1261 (ath_hal_getcapability(_ah, HAL_CAP_INTMIT, \ 1262 HAL_CAP_INTMIT_PRESENT, NULL) == HAL_OK) 1263 #define ath_hal_getintmit(_ah) \ 1264 (ath_hal_getcapability(_ah, HAL_CAP_INTMIT, \ 1265 HAL_CAP_INTMIT_ENABLE, NULL) == HAL_OK) 1266 #define ath_hal_setintmit(_ah, _v) \ 1267 ath_hal_setcapability(_ah, HAL_CAP_INTMIT, \ 1268 HAL_CAP_INTMIT_ENABLE, _v, NULL) 1269 1270 #define ath_hal_hasenforcetxop(_ah) \ 1271 (ath_hal_getcapability(_ah, HAL_CAP_ENFORCE_TXOP, 0, NULL) == HAL_OK) 1272 #define ath_hal_getenforcetxop(_ah) \ 1273 (ath_hal_getcapability(_ah, HAL_CAP_ENFORCE_TXOP, 1, NULL) == HAL_OK) 1274 #define ath_hal_setenforcetxop(_ah, _v) \ 1275 ath_hal_setcapability(_ah, HAL_CAP_ENFORCE_TXOP, 1, _v, NULL) 1276 1277 #define ath_hal_hasrxlnamixer(_ah) \ 1278 (ath_hal_getcapability(_ah, HAL_CAP_RX_LNA_MIXING, 0, NULL) == HAL_OK) 1279 1280 #define ath_hal_hasdivantcomb(_ah) \ 1281 (ath_hal_getcapability(_ah, HAL_CAP_ANT_DIV_COMB, 0, NULL) == HAL_OK) 1282 1283 /* EDMA definitions */ 1284 #define ath_hal_hasedma(_ah) \ 1285 (ath_hal_getcapability(_ah, HAL_CAP_ENHANCED_DMA_SUPPORT, \ 1286 0, NULL) == HAL_OK) 1287 #define ath_hal_getrxfifodepth(_ah, _qtype, _req) \ 1288 (ath_hal_getcapability(_ah, HAL_CAP_RXFIFODEPTH, _qtype, _req) \ 1289 == HAL_OK) 1290 #define ath_hal_getntxmaps(_ah, _req) \ 1291 (ath_hal_getcapability(_ah, HAL_CAP_NUM_TXMAPS, 0, _req) \ 1292 == HAL_OK) 1293 #define ath_hal_gettxdesclen(_ah, _req) \ 1294 (ath_hal_getcapability(_ah, HAL_CAP_TXDESCLEN, 0, _req) \ 1295 == HAL_OK) 1296 #define ath_hal_gettxstatuslen(_ah, _req) \ 1297 (ath_hal_getcapability(_ah, HAL_CAP_TXSTATUSLEN, 0, _req) \ 1298 == HAL_OK) 1299 #define ath_hal_getrxstatuslen(_ah, _req) \ 1300 (ath_hal_getcapability(_ah, HAL_CAP_RXSTATUSLEN, 0, _req) \ 1301 == HAL_OK) 1302 #define ath_hal_setrxbufsize(_ah, _req) \ 1303 (ath_hal_setcapability(_ah, HAL_CAP_RXBUFSIZE, 0, _req, NULL) \ 1304 == HAL_OK) 1305 1306 #define ath_hal_getchannoise(_ah, _c) \ 1307 ((*(_ah)->ah_getChanNoise)((_ah), (_c))) 1308 1309 /* 802.11n HAL methods */ 1310 #define ath_hal_getrxchainmask(_ah, _prxchainmask) \ 1311 (ath_hal_getcapability(_ah, HAL_CAP_RX_CHAINMASK, 0, _prxchainmask)) 1312 #define ath_hal_gettxchainmask(_ah, _ptxchainmask) \ 1313 (ath_hal_getcapability(_ah, HAL_CAP_TX_CHAINMASK, 0, _ptxchainmask)) 1314 #define ath_hal_setrxchainmask(_ah, _rx) \ 1315 (ath_hal_setcapability(_ah, HAL_CAP_RX_CHAINMASK, 1, _rx, NULL)) 1316 #define ath_hal_settxchainmask(_ah, _tx) \ 1317 (ath_hal_setcapability(_ah, HAL_CAP_TX_CHAINMASK, 1, _tx, NULL)) 1318 #define ath_hal_split4ktrans(_ah) \ 1319 (ath_hal_getcapability(_ah, HAL_CAP_SPLIT_4KB_TRANS, \ 1320 0, NULL) == HAL_OK) 1321 #define ath_hal_self_linked_final_rxdesc(_ah) \ 1322 (ath_hal_getcapability(_ah, HAL_CAP_RXDESC_SELFLINK, \ 1323 0, NULL) == HAL_OK) 1324 #define ath_hal_gtxto_supported(_ah) \ 1325 (ath_hal_getcapability(_ah, HAL_CAP_GTXTO, 0, NULL) == HAL_OK) 1326 #define ath_hal_has_long_rxdesc_tsf(_ah) \ 1327 (ath_hal_getcapability(_ah, HAL_CAP_LONG_RXDESC_TSF, \ 1328 0, NULL) == HAL_OK) 1329 #define ath_hal_setuprxdesc(_ah, _ds, _size, _intreq) \ 1330 ((*(_ah)->ah_setupRxDesc)((_ah), (_ds), (_size), (_intreq))) 1331 #define ath_hal_rxprocdesc(_ah, _ds, _dspa, _dsnext, _rs) \ 1332 ((*(_ah)->ah_procRxDesc)((_ah), (_ds), (_dspa), (_dsnext), 0, (_rs))) 1333 #define ath_hal_setuptxdesc(_ah, _ds, _plen, _hlen, _atype, _txpow, \ 1334 _txr0, _txtr0, _keyix, _ant, _flags, \ 1335 _rtsrate, _rtsdura) \ 1336 ((*(_ah)->ah_setupTxDesc)((_ah), (_ds), (_plen), (_hlen), (_atype), \ 1337 (_txpow), (_txr0), (_txtr0), (_keyix), (_ant), \ 1338 (_flags), (_rtsrate), (_rtsdura), 0, 0, 0)) 1339 #define ath_hal_setupxtxdesc(_ah, _ds, \ 1340 _txr1, _txtr1, _txr2, _txtr2, _txr3, _txtr3) \ 1341 ((*(_ah)->ah_setupXTxDesc)((_ah), (_ds), \ 1342 (_txr1), (_txtr1), (_txr2), (_txtr2), (_txr3), (_txtr3))) 1343 #define ath_hal_filltxdesc(_ah, _ds, _b, _l, _did, _qid, _first, _last, _ds0) \ 1344 ((*(_ah)->ah_fillTxDesc)((_ah), (_ds), (_b), (_l), (_did), (_qid), \ 1345 (_first), (_last), (_ds0))) 1346 #define ath_hal_txprocdesc(_ah, _ds, _ts) \ 1347 ((*(_ah)->ah_procTxDesc)((_ah), (_ds), (_ts))) 1348 #define ath_hal_gettxintrtxqs(_ah, _txqs) \ 1349 ((*(_ah)->ah_getTxIntrQueue)((_ah), (_txqs))) 1350 #define ath_hal_gettxcompletionrates(_ah, _ds, _rates, _tries) \ 1351 ((*(_ah)->ah_getTxCompletionRates)((_ah), (_ds), (_rates), (_tries))) 1352 #define ath_hal_settxdesclink(_ah, _ds, _link) \ 1353 ((*(_ah)->ah_setTxDescLink)((_ah), (_ds), (_link))) 1354 #define ath_hal_gettxdesclink(_ah, _ds, _link) \ 1355 ((*(_ah)->ah_getTxDescLink)((_ah), (_ds), (_link))) 1356 #define ath_hal_gettxdesclinkptr(_ah, _ds, _linkptr) \ 1357 ((*(_ah)->ah_getTxDescLinkPtr)((_ah), (_ds), (_linkptr))) 1358 #define ath_hal_setuptxstatusring(_ah, _tsstart, _tspstart, _size) \ 1359 ((*(_ah)->ah_setupTxStatusRing)((_ah), (_tsstart), (_tspstart), \ 1360 (_size))) 1361 #define ath_hal_gettxrawtxdesc(_ah, _txstatus) \ 1362 ((*(_ah)->ah_getTxRawTxDesc)((_ah), (_txstatus))) 1363 1364 #define ath_hal_setupfirsttxdesc(_ah, _ds, _aggrlen, _flags, _txpower, \ 1365 _txr0, _txtr0, _antm, _rcr, _rcd) \ 1366 ((*(_ah)->ah_setupFirstTxDesc)((_ah), (_ds), (_aggrlen), (_flags), \ 1367 (_txpower), (_txr0), (_txtr0), (_antm), (_rcr), (_rcd))) 1368 #define ath_hal_chaintxdesc(_ah, _ds, _bl, _sl, _pktlen, _hdrlen, _type, \ 1369 _keyix, _cipher, _delims, _first, _last, _lastaggr) \ 1370 ((*(_ah)->ah_chainTxDesc)((_ah), (_ds), (_bl), (_sl), \ 1371 (_pktlen), (_hdrlen), (_type), (_keyix), (_cipher), (_delims), \ 1372 (_first), (_last), (_lastaggr))) 1373 #define ath_hal_setuplasttxdesc(_ah, _ds, _ds0) \ 1374 ((*(_ah)->ah_setupLastTxDesc)((_ah), (_ds), (_ds0))) 1375 1376 #define ath_hal_set11nratescenario(_ah, _ds, _dur, _rt, _series, _ns, _flags) \ 1377 ((*(_ah)->ah_set11nRateScenario)((_ah), (_ds), (_dur), (_rt), \ 1378 (_series), (_ns), (_flags))) 1379 1380 #define ath_hal_set11n_aggr_first(_ah, _ds, _len, _num) \ 1381 ((*(_ah)->ah_set11nAggrFirst)((_ah), (_ds), (_len), (_num))) 1382 #define ath_hal_set11n_aggr_middle(_ah, _ds, _num) \ 1383 ((*(_ah)->ah_set11nAggrMiddle)((_ah), (_ds), (_num))) 1384 #define ath_hal_set11n_aggr_last(_ah, _ds) \ 1385 ((*(_ah)->ah_set11nAggrLast)((_ah), (_ds))) 1386 1387 #define ath_hal_set11nburstduration(_ah, _ds, _dur) \ 1388 ((*(_ah)->ah_set11nBurstDuration)((_ah), (_ds), (_dur))) 1389 #define ath_hal_clr11n_aggr(_ah, _ds) \ 1390 ((*(_ah)->ah_clr11nAggr)((_ah), (_ds))) 1391 #define ath_hal_set11n_virtmorefrag(_ah, _ds, _v) \ 1392 ((*(_ah)->ah_set11nVirtMoreFrag)((_ah), (_ds), (_v))) 1393 1394 #define ath_hal_gpioCfgOutput(_ah, _gpio, _type) \ 1395 ((*(_ah)->ah_gpioCfgOutput)((_ah), (_gpio), (_type))) 1396 #define ath_hal_gpioset(_ah, _gpio, _b) \ 1397 ((*(_ah)->ah_gpioSet)((_ah), (_gpio), (_b))) 1398 #define ath_hal_gpioget(_ah, _gpio) \ 1399 ((*(_ah)->ah_gpioGet)((_ah), (_gpio))) 1400 #define ath_hal_gpiosetintr(_ah, _gpio, _b) \ 1401 ((*(_ah)->ah_gpioSetIntr)((_ah), (_gpio), (_b))) 1402 1403 /* 1404 * PCIe suspend/resume/poweron/poweroff related macros 1405 */ 1406 #define ath_hal_enablepcie(_ah, _restore, _poweroff) \ 1407 ((*(_ah)->ah_configPCIE)((_ah), (_restore), (_poweroff))) 1408 #define ath_hal_disablepcie(_ah) \ 1409 ((*(_ah)->ah_disablePCIE)((_ah))) 1410 1411 /* 1412 * This is badly-named; you need to set the correct parameters 1413 * to begin to receive useful radar events; and even then 1414 * it doesn't "enable" DFS. See the ath_dfs/null/ module for 1415 * more information. 1416 */ 1417 #define ath_hal_enabledfs(_ah, _param) \ 1418 ((*(_ah)->ah_enableDfs)((_ah), (_param))) 1419 #define ath_hal_getdfsthresh(_ah, _param) \ 1420 ((*(_ah)->ah_getDfsThresh)((_ah), (_param))) 1421 #define ath_hal_getdfsdefaultthresh(_ah, _param) \ 1422 ((*(_ah)->ah_getDfsDefaultThresh)((_ah), (_param))) 1423 #define ath_hal_procradarevent(_ah, _rxs, _fulltsf, _buf, _event) \ 1424 ((*(_ah)->ah_procRadarEvent)((_ah), (_rxs), (_fulltsf), \ 1425 (_buf), (_event))) 1426 #define ath_hal_is_fast_clock_enabled(_ah) \ 1427 ((*(_ah)->ah_isFastClockEnabled)((_ah))) 1428 #define ath_hal_radar_wait(_ah, _chan) \ 1429 ((*(_ah)->ah_radarWait)((_ah), (_chan))) 1430 #define ath_hal_get_mib_cycle_counts(_ah, _sample) \ 1431 ((*(_ah)->ah_getMibCycleCounts)((_ah), (_sample))) 1432 #define ath_hal_get_chan_ext_busy(_ah) \ 1433 ((*(_ah)->ah_get11nExtBusy)((_ah))) 1434 #define ath_hal_setchainmasks(_ah, _txchainmask, _rxchainmask) \ 1435 ((*(_ah)->ah_setChainMasks)((_ah), (_txchainmask), (_rxchainmask))) 1436 1437 #define ath_hal_spectral_supported(_ah) \ 1438 (ath_hal_getcapability(_ah, HAL_CAP_SPECTRAL_SCAN, 0, NULL) == HAL_OK) 1439 #define ath_hal_spectral_get_config(_ah, _p) \ 1440 ((*(_ah)->ah_spectralGetConfig)((_ah), (_p))) 1441 #define ath_hal_spectral_configure(_ah, _p) \ 1442 ((*(_ah)->ah_spectralConfigure)((_ah), (_p))) 1443 #define ath_hal_spectral_start(_ah) \ 1444 ((*(_ah)->ah_spectralStart)((_ah))) 1445 #define ath_hal_spectral_stop(_ah) \ 1446 ((*(_ah)->ah_spectralStop)((_ah))) 1447 1448 #define ath_hal_btcoex_supported(_ah) \ 1449 (ath_hal_getcapability(_ah, HAL_CAP_BT_COEX, 0, NULL) == HAL_OK) 1450 #define ath_hal_btcoex_set_info(_ah, _info) \ 1451 ((*(_ah)->ah_btCoexSetInfo)((_ah), (_info))) 1452 #define ath_hal_btcoex_set_config(_ah, _cfg) \ 1453 ((*(_ah)->ah_btCoexSetConfig)((_ah), (_cfg))) 1454 #define ath_hal_btcoex_set_qcu_thresh(_ah, _qcuid) \ 1455 ((*(_ah)->ah_btCoexSetQcuThresh)((_ah), (_qcuid))) 1456 #define ath_hal_btcoex_set_weights(_ah, _weight) \ 1457 ((*(_ah)->ah_btCoexSetWeights)((_ah), (_weight))) 1458 #define ath_hal_btcoex_set_weights(_ah, _weight) \ 1459 ((*(_ah)->ah_btCoexSetWeights)((_ah), (_weight))) 1460 #define ath_hal_btcoex_set_bmiss_thresh(_ah, _thr) \ 1461 ((*(_ah)->ah_btCoexSetBmissThresh)((_ah), (_thr))) 1462 #define ath_hal_btcoex_set_parameter(_ah, _attrib, _val) \ 1463 ((*(_ah)->ah_btCoexSetParameter)((_ah), (_attrib), (_val))) 1464 #define ath_hal_btcoex_enable(_ah) \ 1465 ((*(_ah)->ah_btCoexEnable)((_ah))) 1466 #define ath_hal_btcoex_disable(_ah) \ 1467 ((*(_ah)->ah_btCoexDisable)((_ah))) 1468 1469 #define ath_hal_div_comb_conf_get(_ah, _conf) \ 1470 ((*(_ah)->ah_divLnaConfGet)((_ah), (_conf))) 1471 #define ath_hal_div_comb_conf_set(_ah, _conf) \ 1472 ((*(_ah)->ah_divLnaConfSet)((_ah), (_conf))) 1473 1474 #endif /* _DEV_ATH_ATHVAR_H */ 1475