xref: /freebsd/sys/contrib/dev/ath/ath_hal/ar9300/ar9300_interrupts.c (revision 0957b409a90fd597c1e9124cbaf3edd2b488f4ac)
1 /*
2  * Copyright (c) 2013 Qualcomm Atheros, Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
9  * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
10  * AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
11  * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
12  * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
13  * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
14  * PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include "opt_ah.h"
18 
19 #include "ah.h"
20 #include "ah_internal.h"
21 
22 #include "ar9300/ar9300.h"
23 #include "ar9300/ar9300reg.h"
24 #include "ar9300/ar9300phy.h"
25 
26 /*
27  * Checks to see if an interrupt is pending on our NIC
28  *
29  * Returns: TRUE    if an interrupt is pending
30  *          FALSE   if not
31  */
32 HAL_BOOL
33 ar9300_is_interrupt_pending(struct ath_hal *ah)
34 {
35     u_int32_t sync_en_def = AR9300_INTR_SYNC_DEFAULT;
36     u_int32_t host_isr;
37 
38     /*
39      * Some platforms trigger our ISR before applying power to
40      * the card, so make sure.
41      */
42     host_isr = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_CAUSE));
43     if ((host_isr & AR_INTR_ASYNC_USED) && (host_isr != AR_INTR_SPURIOUS)) {
44         return AH_TRUE;
45     }
46 
47     host_isr = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_CAUSE));
48     if (AR_SREV_POSEIDON(ah)) {
49         sync_en_def = AR9300_INTR_SYNC_DEF_NO_HOST1_PERR;
50     }
51     else if (AR_SREV_WASP(ah)) {
52         sync_en_def = AR9340_INTR_SYNC_DEFAULT;
53     }
54 
55     if ((host_isr & (sync_en_def | AR_INTR_SYNC_MASK_GPIO)) &&
56         (host_isr != AR_INTR_SPURIOUS)) {
57         return AH_TRUE;
58     }
59 
60     return AH_FALSE;
61 }
62 
63 /*
64  * Reads the Interrupt Status Register value from the NIC, thus deasserting
65  * the interrupt line, and returns both the masked and unmasked mapped ISR
66  * values.  The value returned is mapped to abstract the hw-specific bit
67  * locations in the Interrupt Status Register.
68  *
69  * Returns: A hardware-abstracted bitmap of all non-masked-out
70  *          interrupts pending, as well as an unmasked value
71  */
72 #define MAP_ISR_S2_HAL_CST          6 /* Carrier sense timeout */
73 #define MAP_ISR_S2_HAL_GTT          6 /* Global transmit timeout */
74 #define MAP_ISR_S2_HAL_TIM          3 /* TIM */
75 #define MAP_ISR_S2_HAL_CABEND       0 /* CABEND */
76 #define MAP_ISR_S2_HAL_DTIMSYNC     7 /* DTIMSYNC */
77 #define MAP_ISR_S2_HAL_DTIM         7 /* DTIM */
78 #define MAP_ISR_S2_HAL_TSFOOR       4 /* Rx TSF out of range */
79 #define MAP_ISR_S2_HAL_BBPANIC      6 /* Panic watchdog IRQ from BB */
80 HAL_BOOL
81 ar9300_get_pending_interrupts(
82     struct ath_hal *ah,
83     HAL_INT *masked,
84     HAL_INT_TYPE type,
85     u_int8_t msi,
86     HAL_BOOL nortc)
87 {
88     struct ath_hal_9300 *ahp = AH9300(ah);
89     HAL_BOOL  ret_val = AH_TRUE;
90     u_int32_t isr = 0;
91     u_int32_t mask2 = 0;
92     u_int32_t sync_cause = 0;
93     u_int32_t async_cause;
94     u_int32_t msi_pend_addr_mask = 0;
95     u_int32_t sync_en_def = AR9300_INTR_SYNC_DEFAULT;
96     HAL_CAPABILITIES *p_cap = &AH_PRIVATE(ah)->ah_caps;
97 
98     *masked = 0;
99 
100     if (!nortc) {
101         if (HAL_INT_MSI == type) {
102             if (msi == HAL_MSIVEC_RXHP) {
103                 OS_REG_WRITE(ah, AR_ISR, AR_ISR_HP_RXOK);
104                 *masked = HAL_INT_RXHP;
105                 goto end;
106             } else if (msi == HAL_MSIVEC_RXLP) {
107                 OS_REG_WRITE(ah, AR_ISR,
108                     (AR_ISR_LP_RXOK | AR_ISR_RXMINTR | AR_ISR_RXINTM));
109                 *masked = HAL_INT_RXLP;
110                 goto end;
111             } else if (msi == HAL_MSIVEC_TX) {
112                 OS_REG_WRITE(ah, AR_ISR, AR_ISR_TXOK);
113                 *masked = HAL_INT_TX;
114                 goto end;
115             } else if (msi == HAL_MSIVEC_MISC) {
116                 /*
117                  * For the misc MSI event fall through and determine the cause.
118                  */
119             }
120         }
121     }
122 
123     /* Make sure mac interrupt is pending in async interrupt cause register */
124     async_cause = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_CAUSE));
125     if (async_cause & AR_INTR_ASYNC_USED) {
126         /*
127          * RTC may not be on since it runs on a slow 32khz clock
128          * so check its status to be sure
129          */
130         if (!nortc &&
131             (OS_REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) ==
132              AR_RTC_STATUS_ON)
133         {
134             isr = OS_REG_READ(ah, AR_ISR);
135         }
136     }
137 
138     if (AR_SREV_POSEIDON(ah)) {
139         sync_en_def = AR9300_INTR_SYNC_DEF_NO_HOST1_PERR;
140     }
141     else if (AR_SREV_WASP(ah)) {
142         sync_en_def = AR9340_INTR_SYNC_DEFAULT;
143     }
144 
145     /* Store away the async and sync cause registers */
146     /* XXX Do this before the filtering done below */
147 #ifdef	AH_INTERRUPT_DEBUGGING
148 	ah->ah_intrstate[0] = OS_REG_READ(ah, AR_ISR);
149 	ah->ah_intrstate[1] = OS_REG_READ(ah, AR_ISR_S0);
150 	ah->ah_intrstate[2] = OS_REG_READ(ah, AR_ISR_S1);
151 	ah->ah_intrstate[3] = OS_REG_READ(ah, AR_ISR_S2);
152 	ah->ah_intrstate[4] = OS_REG_READ(ah, AR_ISR_S3);
153 	ah->ah_intrstate[5] = OS_REG_READ(ah, AR_ISR_S4);
154 	ah->ah_intrstate[6] = OS_REG_READ(ah, AR_ISR_S5);
155 
156 	/* XXX double reading? */
157 	ah->ah_syncstate = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_CAUSE));
158 #endif
159 
160     sync_cause =
161         OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_CAUSE)) &
162         (sync_en_def | AR_INTR_SYNC_MASK_GPIO);
163 
164     if (!isr && !sync_cause && !async_cause) {
165         ret_val = AH_FALSE;
166         goto end;
167     }
168 
169     HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
170         "%s: isr=0x%x, sync_cause=0x%x, async_cause=0x%x\n",
171 	__func__,
172 	isr,
173 	sync_cause,
174 	async_cause);
175 
176     if (isr) {
177         if (isr & AR_ISR_BCNMISC) {
178             u_int32_t isr2;
179             isr2 = OS_REG_READ(ah, AR_ISR_S2);
180 
181             /* Translate ISR bits to HAL values */
182             mask2 |= ((isr2 & AR_ISR_S2_TIM) >> MAP_ISR_S2_HAL_TIM);
183             mask2 |= ((isr2 & AR_ISR_S2_DTIM) >> MAP_ISR_S2_HAL_DTIM);
184             mask2 |= ((isr2 & AR_ISR_S2_DTIMSYNC) >> MAP_ISR_S2_HAL_DTIMSYNC);
185             mask2 |= ((isr2 & AR_ISR_S2_CABEND) >> MAP_ISR_S2_HAL_CABEND);
186             mask2 |= ((isr2 & AR_ISR_S2_GTT) << MAP_ISR_S2_HAL_GTT);
187             mask2 |= ((isr2 & AR_ISR_S2_CST) << MAP_ISR_S2_HAL_CST);
188             mask2 |= ((isr2 & AR_ISR_S2_TSFOOR) >> MAP_ISR_S2_HAL_TSFOOR);
189             mask2 |= ((isr2 & AR_ISR_S2_BBPANIC) >> MAP_ISR_S2_HAL_BBPANIC);
190 
191             if (!p_cap->halIsrRacSupport) {
192                 /*
193                  * EV61133 (missing interrupts due to ISR_RAC):
194                  * If not using ISR_RAC, clear interrupts by writing to ISR_S2.
195                  * This avoids a race condition where a new BCNMISC interrupt
196                  * could come in between reading the ISR and clearing the
197                  * interrupt via the primary ISR.  We therefore clear the
198                  * interrupt via the secondary, which avoids this race.
199                  */
200                 OS_REG_WRITE(ah, AR_ISR_S2, isr2);
201                 isr &= ~AR_ISR_BCNMISC;
202             }
203         }
204 
205         /* Use AR_ISR_RAC only if chip supports it.
206          * See EV61133 (missing interrupts due to ISR_RAC)
207          */
208         if (p_cap->halIsrRacSupport) {
209             isr = OS_REG_READ(ah, AR_ISR_RAC);
210         }
211         if (isr == 0xffffffff) {
212             *masked = 0;
213             ret_val = AH_FALSE;
214             goto end;
215         }
216 
217         *masked = isr & HAL_INT_COMMON;
218 
219         /*
220          * When interrupt mitigation is switched on, we fake a normal RX or TX
221          * interrupt when we received a mitigated interrupt. This way, the upper
222          * layer do not need to know about feature.
223          */
224         if (ahp->ah_intr_mitigation_rx) {
225             /* Only Rx interrupt mitigation. No Tx intr. mitigation. */
226             if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM)) {
227                 *masked |= HAL_INT_RXLP;
228             }
229         }
230         if (ahp->ah_intr_mitigation_tx) {
231             if (isr & (AR_ISR_TXMINTR | AR_ISR_TXINTM)) {
232                 *masked |= HAL_INT_TX;
233             }
234         }
235 
236         if (isr & (AR_ISR_LP_RXOK | AR_ISR_RXERR)) {
237             *masked |= HAL_INT_RXLP;
238         }
239         if (isr & AR_ISR_HP_RXOK) {
240             *masked |= HAL_INT_RXHP;
241         }
242         if (isr & (AR_ISR_TXOK | AR_ISR_TXERR | AR_ISR_TXEOL)) {
243             *masked |= HAL_INT_TX;
244 
245             if (!p_cap->halIsrRacSupport) {
246                 u_int32_t s0, s1;
247                 /*
248                  * EV61133 (missing interrupts due to ISR_RAC):
249                  * If not using ISR_RAC, clear interrupts by writing to
250                  * ISR_S0/S1.
251                  * This avoids a race condition where a new interrupt
252                  * could come in between reading the ISR and clearing the
253                  * interrupt via the primary ISR.  We therefore clear the
254                  * interrupt via the secondary, which avoids this race.
255                  */
256                 s0 = OS_REG_READ(ah, AR_ISR_S0);
257                 OS_REG_WRITE(ah, AR_ISR_S0, s0);
258                 s1 = OS_REG_READ(ah, AR_ISR_S1);
259                 OS_REG_WRITE(ah, AR_ISR_S1, s1);
260 
261                 isr &= ~(AR_ISR_TXOK | AR_ISR_TXERR | AR_ISR_TXEOL);
262             }
263         }
264 
265         /*
266          * Do not treat receive overflows as fatal for owl.
267          */
268         if (isr & AR_ISR_RXORN) {
269 #if __PKT_SERIOUS_ERRORS__
270             HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
271                 "%s: receive FIFO overrun interrupt\n", __func__);
272 #endif
273         }
274 
275 #if 0
276         /* XXX Verify if this is fixed for Osprey */
277         if (!p_cap->halAutoSleepSupport) {
278             u_int32_t isr5 = OS_REG_READ(ah, AR_ISR_S5_S);
279             if (isr5 & AR_ISR_S5_TIM_TIMER) {
280                 *masked |= HAL_INT_TIM_TIMER;
281             }
282         }
283 #endif
284         if (isr & AR_ISR_GENTMR) {
285             u_int32_t s5;
286 
287             if (p_cap->halIsrRacSupport) {
288                 /* Use secondary shadow registers if using ISR_RAC */
289                 s5 = OS_REG_READ(ah, AR_ISR_S5_S);
290             } else {
291                 s5 = OS_REG_READ(ah, AR_ISR_S5);
292             }
293             if (isr & AR_ISR_GENTMR) {
294 
295                 HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
296                     "%s: GENTIMER, ISR_RAC=0x%x ISR_S2_S=0x%x\n", __func__,
297                     isr, s5);
298                 ahp->ah_intr_gen_timer_trigger =
299                     MS(s5, AR_ISR_S5_GENTIMER_TRIG);
300                 ahp->ah_intr_gen_timer_thresh =
301                     MS(s5, AR_ISR_S5_GENTIMER_THRESH);
302                 if (ahp->ah_intr_gen_timer_trigger) {
303                     *masked |= HAL_INT_GENTIMER;
304                 }
305             }
306             if (!p_cap->halIsrRacSupport) {
307                 /*
308                  * EV61133 (missing interrupts due to ISR_RAC):
309                  * If not using ISR_RAC, clear interrupts by writing to ISR_S5.
310                  * This avoids a race condition where a new interrupt
311                  * could come in between reading the ISR and clearing the
312                  * interrupt via the primary ISR.  We therefore clear the
313                  * interrupt via the secondary, which avoids this race.
314                  */
315                 OS_REG_WRITE(ah, AR_ISR_S5, s5);
316                 isr &= ~AR_ISR_GENTMR;
317             }
318         }
319 
320         *masked |= mask2;
321 
322         if (!p_cap->halIsrRacSupport) {
323             /*
324              * EV61133 (missing interrupts due to ISR_RAC):
325              * If not using ISR_RAC, clear the interrupts we've read by
326              * writing back ones in these locations to the primary ISR
327              * (except for interrupts that have a secondary isr register -
328              * see above).
329              */
330             OS_REG_WRITE(ah, AR_ISR, isr);
331 
332             /* Flush prior write */
333             (void) OS_REG_READ(ah, AR_ISR);
334         }
335 
336 #ifdef AH_SUPPORT_AR9300
337         if (*masked & HAL_INT_BBPANIC) {
338             ar9300_handle_bb_panic(ah);
339         }
340 #endif
341     }
342 
343     if (async_cause) {
344         if (nortc) {
345             OS_REG_WRITE(ah,
346                 AR_HOSTIF_REG(ah, AR_INTR_ASYNC_CAUSE_CLR), async_cause);
347             /* Flush prior write */
348             (void) OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_CAUSE_CLR));
349         } else {
350 #ifdef ATH_GPIO_USE_ASYNC_CAUSE
351             if (async_cause & AR_INTR_ASYNC_CAUSE_GPIO) {
352                 ahp->ah_gpio_cause = (async_cause & AR_INTR_ASYNC_CAUSE_GPIO) >>
353                                      AR_INTR_ASYNC_ENABLE_GPIO_S;
354                 *masked |= HAL_INT_GPIO;
355             }
356 #endif
357         }
358 
359 #if ATH_SUPPORT_MCI
360         if ((async_cause & AR_INTR_ASYNC_CAUSE_MCI) &&
361             p_cap->halMciSupport)
362         {
363             u_int32_t int_raw, int_rx_msg;
364 
365             int_rx_msg = OS_REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
366             int_raw = OS_REG_READ(ah, AR_MCI_INTERRUPT_RAW);
367 
368             if ((int_raw == 0xdeadbeef) || (int_rx_msg == 0xdeadbeef))
369             {
370                 HALDEBUG(ah, HAL_DEBUG_BT_COEX,
371                     "(MCI) Get 0xdeadbeef during MCI int processing"
372                     "new int_raw=0x%08x, new rx_msg_raw=0x%08x, "
373                     "int_raw=0x%08x, rx_msg_raw=0x%08x\n",
374                     int_raw, int_rx_msg, ahp->ah_mci_int_raw,
375                     ahp->ah_mci_int_rx_msg);
376             }
377             else {
378                 if (ahp->ah_mci_int_raw || ahp->ah_mci_int_rx_msg) {
379                     ahp->ah_mci_int_rx_msg |= int_rx_msg;
380                     ahp->ah_mci_int_raw |= int_raw;
381                 }
382                 else {
383                     ahp->ah_mci_int_rx_msg = int_rx_msg;
384                     ahp->ah_mci_int_raw = int_raw;
385                 }
386 
387                 *masked |= HAL_INT_MCI;
388                 ahp->ah_mci_rx_status = OS_REG_READ(ah, AR_MCI_RX_STATUS);
389                 if (int_rx_msg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
390                     ahp->ah_mci_cont_status =
391                                     OS_REG_READ(ah, AR_MCI_CONT_STATUS);
392                     HALDEBUG(ah, HAL_DEBUG_BT_COEX,
393                         "(MCI) cont_status=0x%08x\n", ahp->ah_mci_cont_status);
394                 }
395                 OS_REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
396                     int_rx_msg);
397                 OS_REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, int_raw);
398 
399                 HALDEBUG(ah, HAL_DEBUG_INTERRUPT, "%s:AR_INTR_SYNC_MCI\n", __func__);
400             }
401         }
402 #endif
403     }
404 
405     if (sync_cause) {
406         int host1_fatal, host1_perr, radm_cpl_timeout, local_timeout;
407 
408         host1_fatal = AR_SREV_WASP(ah) ?
409             AR9340_INTR_SYNC_HOST1_FATAL : AR9300_INTR_SYNC_HOST1_FATAL;
410         host1_perr = AR_SREV_WASP(ah) ?
411             AR9340_INTR_SYNC_HOST1_PERR : AR9300_INTR_SYNC_HOST1_PERR;
412         radm_cpl_timeout = AR_SREV_WASP(ah) ?
413             0x0 : AR9300_INTR_SYNC_RADM_CPL_TIMEOUT;
414         local_timeout = AR_SREV_WASP(ah) ?
415             AR9340_INTR_SYNC_LOCAL_TIMEOUT : AR9300_INTR_SYNC_LOCAL_TIMEOUT;
416 
417         if (sync_cause & host1_fatal) {
418 #if __PKT_SERIOUS_ERRORS__
419             HALDEBUG(ah, HAL_DEBUG_UNMASKABLE,
420                 "%s: received PCI FATAL interrupt\n", __func__);
421 #endif
422            *masked |= HAL_INT_FATAL; /* Set FATAL INT flag here;*/
423         }
424         if (sync_cause & host1_perr) {
425 #if __PKT_SERIOUS_ERRORS__
426             HALDEBUG(ah, HAL_DEBUG_UNMASKABLE,
427                 "%s: received PCI PERR interrupt\n", __func__);
428 #endif
429         }
430 
431         if (sync_cause & radm_cpl_timeout) {
432             HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
433                 "%s: AR_INTR_SYNC_RADM_CPL_TIMEOUT\n",
434                 __func__);
435 
436             OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_RC), AR_RC_HOSTIF);
437             OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_RC), 0);
438             *masked |= HAL_INT_FATAL;
439         }
440         if (sync_cause & local_timeout) {
441             HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
442                 "%s: AR_INTR_SYNC_LOCAL_TIMEOUT\n",
443                 __func__);
444         }
445 
446 #ifndef ATH_GPIO_USE_ASYNC_CAUSE
447         if (sync_cause & AR_INTR_SYNC_MASK_GPIO) {
448             ahp->ah_gpio_cause = (sync_cause & AR_INTR_SYNC_MASK_GPIO) >>
449                                  AR_INTR_SYNC_ENABLE_GPIO_S;
450             *masked |= HAL_INT_GPIO;
451             HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
452                 "%s: AR_INTR_SYNC_GPIO\n", __func__);
453         }
454 #endif
455 
456         OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_CAUSE_CLR), sync_cause);
457         /* Flush prior write */
458         (void) OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_CAUSE_CLR));
459     }
460 
461 end:
462     if (HAL_INT_MSI == type) {
463         /*
464          * WAR for Bug EV#75887
465          * In normal case, SW read HOST_INTF_PCIE_MSI (0x40A4) and write
466          * into ah_msi_reg.  Then use value of ah_msi_reg to set bit#25
467          * when want to enable HW write the cfg_msi_pending.
468          * Sometimes, driver get MSI interrupt before read 0x40a4 and
469          * ah_msi_reg is initialization value (0x0).
470          * We don't know why "MSI interrupt earlier than driver read" now...
471          */
472         if (!ahp->ah_msi_reg) {
473             ahp->ah_msi_reg = OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_PCIE_MSI));
474         }
475         if (AR_SREV_POSEIDON(ah)) {
476             msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64;
477         } else {
478             msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR;
479         }
480         OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_PCIE_MSI),
481             ((ahp->ah_msi_reg | AR_PCIE_MSI_ENABLE) & msi_pend_addr_mask));
482 
483     }
484 
485     return ret_val;
486 }
487 
488 HAL_INT
489 ar9300_get_interrupts(struct ath_hal *ah)
490 {
491     return AH9300(ah)->ah_mask_reg;
492 }
493 
494 /*
495  * Atomically enables NIC interrupts.  Interrupts are passed in
496  * via the enumerated bitmask in ints.
497  */
498 HAL_INT
499 ar9300_set_interrupts(struct ath_hal *ah, HAL_INT ints, HAL_BOOL nortc)
500 {
501     struct ath_hal_9300 *ahp = AH9300(ah);
502     u_int32_t omask = ahp->ah_mask_reg;
503     u_int32_t mask, mask2, msi_mask = 0;
504     u_int32_t msi_pend_addr_mask = 0;
505     u_int32_t sync_en_def = AR9300_INTR_SYNC_DEFAULT;
506     HAL_CAPABILITIES *p_cap = &AH_PRIVATE(ah)->ah_caps;
507 
508     HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
509         "%s: 0x%x => 0x%x\n", __func__, omask, ints);
510 
511     if (omask & HAL_INT_GLOBAL) {
512         HALDEBUG(ah, HAL_DEBUG_INTERRUPT, "%s: disable IER\n", __func__);
513 
514         if (ah->ah_config.ath_hal_enable_msi) {
515             OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_PRIO_ASYNC_ENABLE), 0);
516             /* flush write to HW */
517             (void)OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_PRIO_ASYNC_ENABLE));
518         }
519 
520         if (!nortc) {
521             OS_REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
522             (void) OS_REG_READ(ah, AR_IER);   /* flush write to HW */
523         }
524 
525         OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_ENABLE), 0);
526         /* flush write to HW */
527         (void) OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_ENABLE));
528         OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_ENABLE), 0);
529         /* flush write to HW */
530         (void) OS_REG_READ(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_ENABLE));
531     }
532 
533     if (!nortc) {
534         /* reference count for global IER */
535         if (ints & HAL_INT_GLOBAL) {
536 #ifdef AH_DEBUG
537             HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
538                 "%s: Request HAL_INT_GLOBAL ENABLED\n", __func__);
539 #if 0
540             if (OS_ATOMIC_READ(&ahp->ah_ier_ref_count) == 0) {
541                 HALDEBUG(ah, HAL_DEBUG_UNMASKABLE,
542                     "%s: WARNING: ah_ier_ref_count is 0 "
543                     "and attempting to enable IER\n",
544                     __func__);
545             }
546 #endif
547 #endif
548 #if 0
549             if (OS_ATOMIC_READ(&ahp->ah_ier_ref_count) > 0) {
550                 OS_ATOMIC_DEC(&ahp->ah_ier_ref_count);
551             }
552 #endif
553         } else {
554             HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
555                 "%s: Request HAL_INT_GLOBAL DISABLED\n", __func__);
556             OS_ATOMIC_INC(&ahp->ah_ier_ref_count);
557         }
558         HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
559             "%s: ah_ier_ref_count = %d\n", __func__, ahp->ah_ier_ref_count);
560 
561         mask = ints & HAL_INT_COMMON;
562         mask2 = 0;
563         msi_mask = 0;
564 
565         if (ints & HAL_INT_TX) {
566             if (ahp->ah_intr_mitigation_tx) {
567                 mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
568             } else if (ahp->ah_tx_ok_interrupt_mask) {
569                 mask |= AR_IMR_TXOK;
570             }
571             msi_mask |= AR_INTR_PRIO_TX;
572             if (ahp->ah_tx_err_interrupt_mask) {
573                 mask |= AR_IMR_TXERR;
574             }
575             if (ahp->ah_tx_eol_interrupt_mask) {
576                 mask |= AR_IMR_TXEOL;
577             }
578         }
579         if (ints & HAL_INT_RX) {
580             mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
581             if (ahp->ah_intr_mitigation_rx) {
582                 mask &= ~(AR_IMR_RXOK_LP);
583                 mask |=  AR_IMR_RXMINTR | AR_IMR_RXINTM;
584             } else {
585                 mask |= AR_IMR_RXOK_LP;
586             }
587             msi_mask |= AR_INTR_PRIO_RXLP | AR_INTR_PRIO_RXHP;
588             if (! p_cap->halAutoSleepSupport) {
589                 mask |= AR_IMR_GENTMR;
590             }
591         }
592 
593         if (ints & (HAL_INT_BMISC)) {
594             mask |= AR_IMR_BCNMISC;
595             if (ints & HAL_INT_TIM) {
596                 mask2 |= AR_IMR_S2_TIM;
597             }
598             if (ints & HAL_INT_DTIM) {
599                 mask2 |= AR_IMR_S2_DTIM;
600             }
601             if (ints & HAL_INT_DTIMSYNC) {
602                 mask2 |= AR_IMR_S2_DTIMSYNC;
603             }
604             if (ints & HAL_INT_CABEND) {
605                 mask2 |= (AR_IMR_S2_CABEND);
606             }
607             if (ints & HAL_INT_TSFOOR) {
608                 mask2 |= AR_IMR_S2_TSFOOR;
609             }
610         }
611 
612         if (ints & (HAL_INT_GTT | HAL_INT_CST)) {
613             mask |= AR_IMR_BCNMISC;
614             if (ints & HAL_INT_GTT) {
615                 mask2 |= AR_IMR_S2_GTT;
616             }
617             if (ints & HAL_INT_CST) {
618                 mask2 |= AR_IMR_S2_CST;
619             }
620         }
621 
622         if (ints & HAL_INT_BBPANIC) {
623             /* EV92527 - MAC secondary interrupt must enable AR_IMR_BCNMISC */
624             mask |= AR_IMR_BCNMISC;
625             mask2 |= AR_IMR_S2_BBPANIC;
626         }
627 
628         if (ints & HAL_INT_GENTIMER) {
629             HALDEBUG(ah, HAL_DEBUG_INTERRUPT,
630                 "%s: enabling gen timer\n", __func__);
631             mask |= AR_IMR_GENTMR;
632         }
633 
634         /* Write the new IMR and store off our SW copy. */
635         HALDEBUG(ah, HAL_DEBUG_INTERRUPT, "%s: new IMR 0x%x\n", __func__, mask);
636         OS_REG_WRITE(ah, AR_IMR, mask);
637         ahp->ah_mask2Reg &= ~(AR_IMR_S2_TIM |
638                         AR_IMR_S2_DTIM |
639                         AR_IMR_S2_DTIMSYNC |
640                         AR_IMR_S2_CABEND |
641                         AR_IMR_S2_CABTO  |
642                         AR_IMR_S2_TSFOOR |
643                         AR_IMR_S2_GTT |
644                         AR_IMR_S2_CST |
645                         AR_IMR_S2_BBPANIC);
646         ahp->ah_mask2Reg |= mask2;
647         OS_REG_WRITE(ah, AR_IMR_S2, ahp->ah_mask2Reg );
648         ahp->ah_mask_reg = ints;
649 
650         if (! p_cap->halAutoSleepSupport) {
651             if (ints & HAL_INT_TIM_TIMER) {
652                 OS_REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
653             }
654             else {
655                 OS_REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
656             }
657         }
658     }
659 
660     /* Re-enable interrupts if they were enabled before. */
661 #if HAL_INTR_REFCOUNT_DISABLE
662     if ((ints & HAL_INT_GLOBAL)) {
663 #else
664     if ((ints & HAL_INT_GLOBAL) && (OS_ATOMIC_READ(&ahp->ah_ier_ref_count) == 0)) {
665 #endif
666         HALDEBUG(ah, HAL_DEBUG_INTERRUPT, "%s: enable IER\n", __func__);
667 
668         if (!nortc) {
669             OS_REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
670         }
671 
672         mask = AR_INTR_MAC_IRQ;
673 #ifdef ATH_GPIO_USE_ASYNC_CAUSE
674         if (ints & HAL_INT_GPIO) {
675             if (ahp->ah_gpio_mask) {
676                 mask |= SM(ahp->ah_gpio_mask, AR_INTR_ASYNC_MASK_GPIO);
677             }
678         }
679 #endif
680 
681 #if ATH_SUPPORT_MCI
682         if (ints & HAL_INT_MCI) {
683             mask |= AR_INTR_ASYNC_MASK_MCI;
684         }
685 #endif
686 
687         OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_ENABLE), mask);
688         OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_ASYNC_MASK), mask);
689 
690         if (ah->ah_config.ath_hal_enable_msi) {
691             OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_PRIO_ASYNC_ENABLE),
692                 msi_mask);
693             OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_PRIO_ASYNC_MASK),
694                 msi_mask);
695             if (AR_SREV_POSEIDON(ah)) {
696                 msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64;
697             } else {
698                 msi_pend_addr_mask = AR_PCIE_MSI_HW_INT_PENDING_ADDR;
699             }
700             OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_PCIE_MSI),
701                 ((ahp->ah_msi_reg | AR_PCIE_MSI_ENABLE) & msi_pend_addr_mask));
702         }
703 
704         /*
705          * debug - enable to see all synchronous interrupts status
706          * Enable synchronous GPIO interrupts as well, since some async
707          * GPIO interrupts don't wake the chip up.
708          */
709         mask = 0;
710 #ifndef ATH_GPIO_USE_ASYNC_CAUSE
711         if (ints & HAL_INT_GPIO) {
712             mask |= SM(ahp->ah_gpio_mask, AR_INTR_SYNC_MASK_GPIO);
713         }
714 #endif
715         if (AR_SREV_POSEIDON(ah)) {
716             sync_en_def = AR9300_INTR_SYNC_DEF_NO_HOST1_PERR;
717         }
718         else if (AR_SREV_WASP(ah)) {
719             sync_en_def = AR9340_INTR_SYNC_DEFAULT;
720         }
721 
722         OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_ENABLE),
723             (sync_en_def | mask));
724         OS_REG_WRITE(ah, AR_HOSTIF_REG(ah, AR_INTR_SYNC_MASK),
725             (sync_en_def | mask));
726 
727         HALDEBUG(ah,  HAL_DEBUG_INTERRUPT,
728             "AR_IMR 0x%x IER 0x%x\n",
729             OS_REG_READ(ah, AR_IMR), OS_REG_READ(ah, AR_IER));
730     }
731 
732     return omask;
733 }
734 
735 void
736 ar9300_set_intr_mitigation_timer(
737     struct ath_hal* ah,
738     HAL_INT_MITIGATION reg,
739     u_int32_t value)
740 {
741 #ifdef AR5416_INT_MITIGATION
742     switch (reg) {
743     case HAL_INT_THRESHOLD:
744         OS_REG_WRITE(ah, AR_MIRT, 0);
745         break;
746     case HAL_INT_RX_LASTPKT:
747         OS_REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, value);
748         break;
749     case HAL_INT_RX_FIRSTPKT:
750         OS_REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, value);
751         break;
752     case HAL_INT_TX_LASTPKT:
753         OS_REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, value);
754         break;
755     case HAL_INT_TX_FIRSTPKT:
756         OS_REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, value);
757         break;
758     default:
759         break;
760     }
761 #endif
762 }
763 
764 u_int32_t
765 ar9300_get_intr_mitigation_timer(struct ath_hal* ah, HAL_INT_MITIGATION reg)
766 {
767     u_int32_t val = 0;
768 #ifdef AR5416_INT_MITIGATION
769     switch (reg) {
770     case HAL_INT_THRESHOLD:
771         val = OS_REG_READ(ah, AR_MIRT);
772         break;
773     case HAL_INT_RX_LASTPKT:
774         val = OS_REG_READ(ah, AR_RIMT) & 0xFFFF;
775         break;
776     case HAL_INT_RX_FIRSTPKT:
777         val = OS_REG_READ(ah, AR_RIMT) >> 16;
778         break;
779     case HAL_INT_TX_LASTPKT:
780         val = OS_REG_READ(ah, AR_TIMT) & 0xFFFF;
781         break;
782     case HAL_INT_TX_FIRSTPKT:
783         val = OS_REG_READ(ah, AR_TIMT) >> 16;
784         break;
785     default:
786         break;
787     }
788 #endif
789     return val;
790 }
791