1 /* 2 * Copyright (c) 2012-2014 Qualcomm Atheros, Inc. 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #include <linux/interrupt.h> 18 19 #include "wil6210.h" 20 #include "trace.h" 21 22 /** 23 * Theory of operation: 24 * 25 * There is ISR pseudo-cause register, 26 * dma_rgf->DMA_RGF.PSEUDO_CAUSE.PSEUDO_CAUSE 27 * Its bits represents OR'ed bits from 3 real ISR registers: 28 * TX, RX, and MISC. 29 * 30 * Registers may be configured to either "write 1 to clear" or 31 * "clear on read" mode 32 * 33 * When handling interrupt, one have to mask/unmask interrupts for the 34 * real ISR registers, or hardware may malfunction. 35 * 36 */ 37 38 #define WIL6210_IRQ_DISABLE (0xFFFFFFFFUL) 39 #define WIL6210_IMC_RX (BIT_DMA_EP_RX_ICR_RX_DONE | \ 40 BIT_DMA_EP_RX_ICR_RX_HTRSH) 41 #define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \ 42 BIT_DMA_EP_TX_ICR_TX_DONE_N(0)) 43 #define WIL6210_IMC_MISC (ISR_MISC_FW_READY | \ 44 ISR_MISC_MBOX_EVT | \ 45 ISR_MISC_FW_ERROR) 46 47 #define WIL6210_IRQ_PSEUDO_MASK (u32)(~(BIT_DMA_PSEUDO_CAUSE_RX | \ 48 BIT_DMA_PSEUDO_CAUSE_TX | \ 49 BIT_DMA_PSEUDO_CAUSE_MISC)) 50 51 #if defined(CONFIG_WIL6210_ISR_COR) 52 /* configure to Clear-On-Read mode */ 53 #define WIL_ICR_ICC_VALUE (0xFFFFFFFFUL) 54 55 static inline void wil_icr_clear(u32 x, void __iomem *addr) 56 { 57 } 58 #else /* defined(CONFIG_WIL6210_ISR_COR) */ 59 /* configure to Write-1-to-Clear mode */ 60 #define WIL_ICR_ICC_VALUE (0UL) 61 62 static inline void wil_icr_clear(u32 x, void __iomem *addr) 63 { 64 iowrite32(x, addr); 65 } 66 #endif /* defined(CONFIG_WIL6210_ISR_COR) */ 67 68 static inline u32 wil_ioread32_and_clear(void __iomem *addr) 69 { 70 u32 x = ioread32(addr); 71 72 wil_icr_clear(x, addr); 73 74 return x; 75 } 76 77 static void wil6210_mask_irq_tx(struct wil6210_priv *wil) 78 { 79 iowrite32(WIL6210_IRQ_DISABLE, wil->csr + 80 HOSTADDR(RGF_DMA_EP_TX_ICR) + 81 offsetof(struct RGF_ICR, IMS)); 82 } 83 84 static void wil6210_mask_irq_rx(struct wil6210_priv *wil) 85 { 86 iowrite32(WIL6210_IRQ_DISABLE, wil->csr + 87 HOSTADDR(RGF_DMA_EP_RX_ICR) + 88 offsetof(struct RGF_ICR, IMS)); 89 } 90 91 static void wil6210_mask_irq_misc(struct wil6210_priv *wil) 92 { 93 iowrite32(WIL6210_IRQ_DISABLE, wil->csr + 94 HOSTADDR(RGF_DMA_EP_MISC_ICR) + 95 offsetof(struct RGF_ICR, IMS)); 96 } 97 98 static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil) 99 { 100 wil_dbg_irq(wil, "%s()\n", __func__); 101 102 iowrite32(WIL6210_IRQ_DISABLE, wil->csr + 103 HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW)); 104 105 clear_bit(wil_status_irqen, wil->status); 106 } 107 108 void wil6210_unmask_irq_tx(struct wil6210_priv *wil) 109 { 110 iowrite32(WIL6210_IMC_TX, wil->csr + 111 HOSTADDR(RGF_DMA_EP_TX_ICR) + 112 offsetof(struct RGF_ICR, IMC)); 113 } 114 115 void wil6210_unmask_irq_rx(struct wil6210_priv *wil) 116 { 117 iowrite32(WIL6210_IMC_RX, wil->csr + 118 HOSTADDR(RGF_DMA_EP_RX_ICR) + 119 offsetof(struct RGF_ICR, IMC)); 120 } 121 122 static void wil6210_unmask_irq_misc(struct wil6210_priv *wil) 123 { 124 iowrite32(WIL6210_IMC_MISC, wil->csr + 125 HOSTADDR(RGF_DMA_EP_MISC_ICR) + 126 offsetof(struct RGF_ICR, IMC)); 127 } 128 129 static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil) 130 { 131 wil_dbg_irq(wil, "%s()\n", __func__); 132 133 set_bit(wil_status_irqen, wil->status); 134 135 iowrite32(WIL6210_IRQ_PSEUDO_MASK, wil->csr + 136 HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW)); 137 } 138 139 void wil_mask_irq(struct wil6210_priv *wil) 140 { 141 wil_dbg_irq(wil, "%s()\n", __func__); 142 143 wil6210_mask_irq_tx(wil); 144 wil6210_mask_irq_rx(wil); 145 wil6210_mask_irq_misc(wil); 146 wil6210_mask_irq_pseudo(wil); 147 } 148 149 void wil_unmask_irq(struct wil6210_priv *wil) 150 { 151 wil_dbg_irq(wil, "%s()\n", __func__); 152 153 iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) + 154 offsetof(struct RGF_ICR, ICC)); 155 iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) + 156 offsetof(struct RGF_ICR, ICC)); 157 iowrite32(WIL_ICR_ICC_VALUE, wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) + 158 offsetof(struct RGF_ICR, ICC)); 159 160 wil6210_unmask_irq_pseudo(wil); 161 wil6210_unmask_irq_tx(wil); 162 wil6210_unmask_irq_rx(wil); 163 wil6210_unmask_irq_misc(wil); 164 } 165 166 /* target write operation */ 167 #define W(a, v) do { iowrite32(v, wil->csr + HOSTADDR(a)); wmb(); } while (0) 168 169 static 170 void wil_configure_interrupt_moderation_new(struct wil6210_priv *wil) 171 { 172 /* Disable and clear tx counter before (re)configuration */ 173 W(RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR); 174 W(RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration); 175 wil_info(wil, "set ITR_TX_CNT_TRSH = %d usec\n", 176 wil->tx_max_burst_duration); 177 /* Configure TX max burst duration timer to use usec units */ 178 W(RGF_DMA_ITR_TX_CNT_CTL, 179 BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL); 180 181 /* Disable and clear tx idle counter before (re)configuration */ 182 W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR); 183 W(RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout); 184 wil_info(wil, "set ITR_TX_IDL_CNT_TRSH = %d usec\n", 185 wil->tx_interframe_timeout); 186 /* Configure TX max burst duration timer to use usec units */ 187 W(RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN | 188 BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL); 189 190 /* Disable and clear rx counter before (re)configuration */ 191 W(RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR); 192 W(RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration); 193 wil_info(wil, "set ITR_RX_CNT_TRSH = %d usec\n", 194 wil->rx_max_burst_duration); 195 /* Configure TX max burst duration timer to use usec units */ 196 W(RGF_DMA_ITR_RX_CNT_CTL, 197 BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL); 198 199 /* Disable and clear rx idle counter before (re)configuration */ 200 W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR); 201 W(RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout); 202 wil_info(wil, "set ITR_RX_IDL_CNT_TRSH = %d usec\n", 203 wil->rx_interframe_timeout); 204 /* Configure TX max burst duration timer to use usec units */ 205 W(RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN | 206 BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL); 207 } 208 209 static 210 void wil_configure_interrupt_moderation_lgc(struct wil6210_priv *wil) 211 { 212 /* disable, use usec resolution */ 213 W(RGF_DMA_ITR_CNT_CRL, BIT_DMA_ITR_CNT_CRL_CLR); 214 215 wil_info(wil, "set ITR_TRSH = %d usec\n", wil->rx_max_burst_duration); 216 W(RGF_DMA_ITR_CNT_TRSH, wil->rx_max_burst_duration); 217 /* start it */ 218 W(RGF_DMA_ITR_CNT_CRL, 219 BIT_DMA_ITR_CNT_CRL_EN | BIT_DMA_ITR_CNT_CRL_EXT_TICK); 220 } 221 222 #undef W 223 224 void wil_configure_interrupt_moderation(struct wil6210_priv *wil) 225 { 226 wil_dbg_irq(wil, "%s()\n", __func__); 227 228 /* disable interrupt moderation for monitor 229 * to get better timestamp precision 230 */ 231 if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) 232 return; 233 234 if (test_bit(hw_capability_advanced_itr_moderation, 235 wil->hw_capabilities)) 236 wil_configure_interrupt_moderation_new(wil); 237 else { 238 /* Advanced interrupt moderation is not available before 239 * Sparrow v2. Will use legacy interrupt moderation 240 */ 241 wil_configure_interrupt_moderation_lgc(wil); 242 } 243 } 244 245 static irqreturn_t wil6210_irq_rx(int irq, void *cookie) 246 { 247 struct wil6210_priv *wil = cookie; 248 u32 isr = wil_ioread32_and_clear(wil->csr + 249 HOSTADDR(RGF_DMA_EP_RX_ICR) + 250 offsetof(struct RGF_ICR, ICR)); 251 bool need_unmask = true; 252 253 trace_wil6210_irq_rx(isr); 254 wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr); 255 256 if (!isr) { 257 wil_err(wil, "spurious IRQ: RX\n"); 258 return IRQ_NONE; 259 } 260 261 wil6210_mask_irq_rx(wil); 262 263 /* RX_DONE and RX_HTRSH interrupts are the same if interrupt 264 * moderation is not used. Interrupt moderation may cause RX 265 * buffer overflow while RX_DONE is delayed. The required 266 * action is always the same - should empty the accumulated 267 * packets from the RX ring. 268 */ 269 if (isr & (BIT_DMA_EP_RX_ICR_RX_DONE | BIT_DMA_EP_RX_ICR_RX_HTRSH)) { 270 wil_dbg_irq(wil, "RX done\n"); 271 272 if (isr & BIT_DMA_EP_RX_ICR_RX_HTRSH) 273 wil_err_ratelimited(wil, 274 "Received \"Rx buffer is in risk of overflow\" interrupt\n"); 275 276 isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE | 277 BIT_DMA_EP_RX_ICR_RX_HTRSH); 278 if (test_bit(wil_status_reset_done, wil->status)) { 279 if (test_bit(wil_status_napi_en, wil->status)) { 280 wil_dbg_txrx(wil, "NAPI(Rx) schedule\n"); 281 need_unmask = false; 282 napi_schedule(&wil->napi_rx); 283 } else { 284 wil_err(wil, 285 "Got Rx interrupt while stopping interface\n"); 286 } 287 } else { 288 wil_err(wil, "Got Rx interrupt while in reset\n"); 289 } 290 } 291 292 if (isr) 293 wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr); 294 295 /* Rx IRQ will be enabled when NAPI processing finished */ 296 297 atomic_inc(&wil->isr_count_rx); 298 299 if (unlikely(need_unmask)) 300 wil6210_unmask_irq_rx(wil); 301 302 return IRQ_HANDLED; 303 } 304 305 static irqreturn_t wil6210_irq_tx(int irq, void *cookie) 306 { 307 struct wil6210_priv *wil = cookie; 308 u32 isr = wil_ioread32_and_clear(wil->csr + 309 HOSTADDR(RGF_DMA_EP_TX_ICR) + 310 offsetof(struct RGF_ICR, ICR)); 311 bool need_unmask = true; 312 313 trace_wil6210_irq_tx(isr); 314 wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr); 315 316 if (!isr) { 317 wil_err(wil, "spurious IRQ: TX\n"); 318 return IRQ_NONE; 319 } 320 321 wil6210_mask_irq_tx(wil); 322 323 if (isr & BIT_DMA_EP_TX_ICR_TX_DONE) { 324 wil_dbg_irq(wil, "TX done\n"); 325 isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE; 326 /* clear also all VRING interrupts */ 327 isr &= ~(BIT(25) - 1UL); 328 if (test_bit(wil_status_reset_done, wil->status)) { 329 wil_dbg_txrx(wil, "NAPI(Tx) schedule\n"); 330 need_unmask = false; 331 napi_schedule(&wil->napi_tx); 332 } else { 333 wil_err(wil, "Got Tx interrupt while in reset\n"); 334 } 335 } 336 337 if (isr) 338 wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr); 339 340 /* Tx IRQ will be enabled when NAPI processing finished */ 341 342 atomic_inc(&wil->isr_count_tx); 343 344 if (unlikely(need_unmask)) 345 wil6210_unmask_irq_tx(wil); 346 347 return IRQ_HANDLED; 348 } 349 350 static void wil_notify_fw_error(struct wil6210_priv *wil) 351 { 352 struct device *dev = &wil_to_ndev(wil)->dev; 353 char *envp[3] = { 354 [0] = "SOURCE=wil6210", 355 [1] = "EVENT=FW_ERROR", 356 [2] = NULL, 357 }; 358 wil_err(wil, "Notify about firmware error\n"); 359 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); 360 } 361 362 static void wil_cache_mbox_regs(struct wil6210_priv *wil) 363 { 364 /* make shadow copy of registers that should not change on run time */ 365 wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX, 366 sizeof(struct wil6210_mbox_ctl)); 367 wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx); 368 wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx); 369 } 370 371 static irqreturn_t wil6210_irq_misc(int irq, void *cookie) 372 { 373 struct wil6210_priv *wil = cookie; 374 u32 isr = wil_ioread32_and_clear(wil->csr + 375 HOSTADDR(RGF_DMA_EP_MISC_ICR) + 376 offsetof(struct RGF_ICR, ICR)); 377 378 trace_wil6210_irq_misc(isr); 379 wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr); 380 381 if (!isr) { 382 wil_err(wil, "spurious IRQ: MISC\n"); 383 return IRQ_NONE; 384 } 385 386 wil6210_mask_irq_misc(wil); 387 388 if (isr & ISR_MISC_FW_ERROR) { 389 wil_err(wil, "Firmware error detected\n"); 390 clear_bit(wil_status_fwready, wil->status); 391 /* 392 * do not clear @isr here - we do 2-nd part in thread 393 * there, user space get notified, and it should be done 394 * in non-atomic context 395 */ 396 } 397 398 if (isr & ISR_MISC_FW_READY) { 399 wil_dbg_irq(wil, "IRQ: FW ready\n"); 400 wil_cache_mbox_regs(wil); 401 set_bit(wil_status_reset_done, wil->status); 402 /** 403 * Actual FW ready indicated by the 404 * WMI_FW_READY_EVENTID 405 */ 406 isr &= ~ISR_MISC_FW_READY; 407 } 408 409 wil->isr_misc = isr; 410 411 if (isr) { 412 return IRQ_WAKE_THREAD; 413 } else { 414 wil6210_unmask_irq_misc(wil); 415 return IRQ_HANDLED; 416 } 417 } 418 419 static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie) 420 { 421 struct wil6210_priv *wil = cookie; 422 u32 isr = wil->isr_misc; 423 424 trace_wil6210_irq_misc_thread(isr); 425 wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr); 426 427 if (isr & ISR_MISC_FW_ERROR) { 428 wil_notify_fw_error(wil); 429 isr &= ~ISR_MISC_FW_ERROR; 430 wil_fw_error_recovery(wil); 431 } 432 433 if (isr & ISR_MISC_MBOX_EVT) { 434 wil_dbg_irq(wil, "MBOX event\n"); 435 wmi_recv_cmd(wil); 436 isr &= ~ISR_MISC_MBOX_EVT; 437 } 438 439 if (isr) 440 wil_dbg_irq(wil, "un-handled MISC ISR bits 0x%08x\n", isr); 441 442 wil->isr_misc = 0; 443 444 wil6210_unmask_irq_misc(wil); 445 446 return IRQ_HANDLED; 447 } 448 449 /** 450 * thread IRQ handler 451 */ 452 static irqreturn_t wil6210_thread_irq(int irq, void *cookie) 453 { 454 struct wil6210_priv *wil = cookie; 455 456 wil_dbg_irq(wil, "Thread IRQ\n"); 457 /* Discover real IRQ cause */ 458 if (wil->isr_misc) 459 wil6210_irq_misc_thread(irq, cookie); 460 461 wil6210_unmask_irq_pseudo(wil); 462 463 return IRQ_HANDLED; 464 } 465 466 /* DEBUG 467 * There is subtle bug in hardware that causes IRQ to raise when it should be 468 * masked. It is quite rare and hard to debug. 469 * 470 * Catch irq issue if it happens and print all I can. 471 */ 472 static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause) 473 { 474 if (!test_bit(wil_status_irqen, wil->status)) { 475 u32 icm_rx = wil_ioread32_and_clear(wil->csr + 476 HOSTADDR(RGF_DMA_EP_RX_ICR) + 477 offsetof(struct RGF_ICR, ICM)); 478 u32 icr_rx = wil_ioread32_and_clear(wil->csr + 479 HOSTADDR(RGF_DMA_EP_RX_ICR) + 480 offsetof(struct RGF_ICR, ICR)); 481 u32 imv_rx = ioread32(wil->csr + 482 HOSTADDR(RGF_DMA_EP_RX_ICR) + 483 offsetof(struct RGF_ICR, IMV)); 484 u32 icm_tx = wil_ioread32_and_clear(wil->csr + 485 HOSTADDR(RGF_DMA_EP_TX_ICR) + 486 offsetof(struct RGF_ICR, ICM)); 487 u32 icr_tx = wil_ioread32_and_clear(wil->csr + 488 HOSTADDR(RGF_DMA_EP_TX_ICR) + 489 offsetof(struct RGF_ICR, ICR)); 490 u32 imv_tx = ioread32(wil->csr + 491 HOSTADDR(RGF_DMA_EP_TX_ICR) + 492 offsetof(struct RGF_ICR, IMV)); 493 u32 icm_misc = wil_ioread32_and_clear(wil->csr + 494 HOSTADDR(RGF_DMA_EP_MISC_ICR) + 495 offsetof(struct RGF_ICR, ICM)); 496 u32 icr_misc = wil_ioread32_and_clear(wil->csr + 497 HOSTADDR(RGF_DMA_EP_MISC_ICR) + 498 offsetof(struct RGF_ICR, ICR)); 499 u32 imv_misc = ioread32(wil->csr + 500 HOSTADDR(RGF_DMA_EP_MISC_ICR) + 501 offsetof(struct RGF_ICR, IMV)); 502 wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n" 503 "Rx icm:icr:imv 0x%08x 0x%08x 0x%08x\n" 504 "Tx icm:icr:imv 0x%08x 0x%08x 0x%08x\n" 505 "Misc icm:icr:imv 0x%08x 0x%08x 0x%08x\n", 506 pseudo_cause, 507 icm_rx, icr_rx, imv_rx, 508 icm_tx, icr_tx, imv_tx, 509 icm_misc, icr_misc, imv_misc); 510 511 return -EINVAL; 512 } 513 514 return 0; 515 } 516 517 static irqreturn_t wil6210_hardirq(int irq, void *cookie) 518 { 519 irqreturn_t rc = IRQ_HANDLED; 520 struct wil6210_priv *wil = cookie; 521 u32 pseudo_cause = ioread32(wil->csr + HOSTADDR(RGF_DMA_PSEUDO_CAUSE)); 522 523 /** 524 * pseudo_cause is Clear-On-Read, no need to ACK 525 */ 526 if ((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff)) 527 return IRQ_NONE; 528 529 /* FIXME: IRQ mask debug */ 530 if (wil6210_debug_irq_mask(wil, pseudo_cause)) 531 return IRQ_NONE; 532 533 trace_wil6210_irq_pseudo(pseudo_cause); 534 wil_dbg_irq(wil, "Pseudo IRQ 0x%08x\n", pseudo_cause); 535 536 wil6210_mask_irq_pseudo(wil); 537 538 /* Discover real IRQ cause 539 * There are 2 possible phases for every IRQ: 540 * - hard IRQ handler called right here 541 * - threaded handler called later 542 * 543 * Hard IRQ handler reads and clears ISR. 544 * 545 * If threaded handler requested, hard IRQ handler 546 * returns IRQ_WAKE_THREAD and saves ISR register value 547 * for the threaded handler use. 548 * 549 * voting for wake thread - need at least 1 vote 550 */ 551 if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_RX) && 552 (wil6210_irq_rx(irq, cookie) == IRQ_WAKE_THREAD)) 553 rc = IRQ_WAKE_THREAD; 554 555 if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_TX) && 556 (wil6210_irq_tx(irq, cookie) == IRQ_WAKE_THREAD)) 557 rc = IRQ_WAKE_THREAD; 558 559 if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_MISC) && 560 (wil6210_irq_misc(irq, cookie) == IRQ_WAKE_THREAD)) 561 rc = IRQ_WAKE_THREAD; 562 563 /* if thread is requested, it will unmask IRQ */ 564 if (rc != IRQ_WAKE_THREAD) 565 wil6210_unmask_irq_pseudo(wil); 566 567 return rc; 568 } 569 570 static int wil6210_request_3msi(struct wil6210_priv *wil, int irq) 571 { 572 int rc; 573 /* 574 * IRQ's are in the following order: 575 * - Tx 576 * - Rx 577 * - Misc 578 */ 579 580 rc = request_irq(irq, wil6210_irq_tx, IRQF_SHARED, 581 WIL_NAME"_tx", wil); 582 if (rc) 583 return rc; 584 585 rc = request_irq(irq + 1, wil6210_irq_rx, IRQF_SHARED, 586 WIL_NAME"_rx", wil); 587 if (rc) 588 goto free0; 589 590 rc = request_threaded_irq(irq + 2, wil6210_irq_misc, 591 wil6210_irq_misc_thread, 592 IRQF_SHARED, WIL_NAME"_misc", wil); 593 if (rc) 594 goto free1; 595 596 return 0; 597 /* error branch */ 598 free1: 599 free_irq(irq + 1, wil); 600 free0: 601 free_irq(irq, wil); 602 603 return rc; 604 } 605 606 /* can't use wil_ioread32_and_clear because ICC value is not set yet */ 607 static inline void wil_clear32(void __iomem *addr) 608 { 609 u32 x = ioread32(addr); 610 611 iowrite32(x, addr); 612 } 613 614 void wil6210_clear_irq(struct wil6210_priv *wil) 615 { 616 wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) + 617 offsetof(struct RGF_ICR, ICR)); 618 wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) + 619 offsetof(struct RGF_ICR, ICR)); 620 wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) + 621 offsetof(struct RGF_ICR, ICR)); 622 wmb(); /* make sure write completed */ 623 } 624 625 int wil6210_init_irq(struct wil6210_priv *wil, int irq) 626 { 627 int rc; 628 629 wil_dbg_misc(wil, "%s() n_msi=%d\n", __func__, wil->n_msi); 630 631 if (wil->n_msi == 3) 632 rc = wil6210_request_3msi(wil, irq); 633 else 634 rc = request_threaded_irq(irq, wil6210_hardirq, 635 wil6210_thread_irq, 636 wil->n_msi ? 0 : IRQF_SHARED, 637 WIL_NAME, wil); 638 return rc; 639 } 640 641 void wil6210_fini_irq(struct wil6210_priv *wil, int irq) 642 { 643 wil_dbg_misc(wil, "%s()\n", __func__); 644 645 wil_mask_irq(wil); 646 free_irq(irq, wil); 647 if (wil->n_msi == 3) { 648 free_irq(irq + 1, wil); 649 free_irq(irq + 2, wil); 650 } 651 } 652