1 /****************************************************************************** 2 * 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 5 * Copyright(c) 2016 Intel Deutschland GmbH 6 * 7 * Portions of this file are derived from the ipw3945 project, as well 8 * as portions of the ieee80211 subsystem header files. 9 * 10 * This program is free software; you can redistribute it and/or modify it 11 * under the terms of version 2 of the GNU General Public License as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, but WITHOUT 15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 17 * more details. 18 * 19 * You should have received a copy of the GNU General Public License along with 20 * this program; if not, write to the Free Software Foundation, Inc., 21 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 22 * 23 * The full GNU General Public License is included in this distribution in the 24 * file called LICENSE. 25 * 26 * Contact Information: 27 * Intel Linux Wireless <linuxwifi@intel.com> 28 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * 30 *****************************************************************************/ 31 #include <linux/sched.h> 32 #include <linux/wait.h> 33 #include <linux/gfp.h> 34 35 #include "iwl-prph.h" 36 #include "iwl-io.h" 37 #include "internal.h" 38 #include "iwl-op-mode.h" 39 40 /****************************************************************************** 41 * 42 * RX path functions 43 * 44 ******************************************************************************/ 45 46 /* 47 * Rx theory of operation 48 * 49 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs), 50 * each of which point to Receive Buffers to be filled by the NIC. These get 51 * used not only for Rx frames, but for any command response or notification 52 * from the NIC. The driver and NIC manage the Rx buffers by means 53 * of indexes into the circular buffer. 54 * 55 * Rx Queue Indexes 56 * The host/firmware share two index registers for managing the Rx buffers. 57 * 58 * The READ index maps to the first position that the firmware may be writing 59 * to -- the driver can read up to (but not including) this position and get 60 * good data. 61 * The READ index is managed by the firmware once the card is enabled. 62 * 63 * The WRITE index maps to the last position the driver has read from -- the 64 * position preceding WRITE is the last slot the firmware can place a packet. 65 * 66 * The queue is empty (no good data) if WRITE = READ - 1, and is full if 67 * WRITE = READ. 68 * 69 * During initialization, the host sets up the READ queue position to the first 70 * INDEX position, and WRITE to the last (READ - 1 wrapped) 71 * 72 * When the firmware places a packet in a buffer, it will advance the READ index 73 * and fire the RX interrupt. The driver can then query the READ index and 74 * process as many packets as possible, moving the WRITE index forward as it 75 * resets the Rx queue buffers with new memory. 76 * 77 * The management in the driver is as follows: 78 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. 79 * When the interrupt handler is called, the request is processed. 80 * The page is either stolen - transferred to the upper layer 81 * or reused - added immediately to the iwl->rxq->rx_free list. 82 * + When the page is stolen - the driver updates the matching queue's used 83 * count, detaches the RBD and transfers it to the queue used list. 84 * When there are two used RBDs - they are transferred to the allocator empty 85 * list. Work is then scheduled for the allocator to start allocating 86 * eight buffers. 87 * When there are another 6 used RBDs - they are transferred to the allocator 88 * empty list and the driver tries to claim the pre-allocated buffers and 89 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them 90 * until ready. 91 * When there are 8+ buffers in the free list - either from allocation or from 92 * 8 reused unstolen pages - restock is called to update the FW and indexes. 93 * + In order to make sure the allocator always has RBDs to use for allocation 94 * the allocator has initial pool in the size of num_queues*(8-2) - the 95 * maximum missing RBDs per allocation request (request posted with 2 96 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). 97 * The queues supplies the recycle of the rest of the RBDs. 98 * + A received packet is processed and handed to the kernel network stack, 99 * detached from the iwl->rxq. The driver 'processed' index is updated. 100 * + If there are no allocated buffers in iwl->rxq->rx_free, 101 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. 102 * If there were enough free buffers and RX_STALLED is set it is cleared. 103 * 104 * 105 * Driver sequence: 106 * 107 * iwl_rxq_alloc() Allocates rx_free 108 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls 109 * iwl_pcie_rxq_restock. 110 * Used only during initialization. 111 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx 112 * queue, updates firmware pointers, and updates 113 * the WRITE index. 114 * iwl_pcie_rx_allocator() Background work for allocating pages. 115 * 116 * -- enable interrupts -- 117 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the 118 * READ INDEX, detaching the SKB from the pool. 119 * Moves the packet buffer from queue to rx_used. 120 * Posts and claims requests to the allocator. 121 * Calls iwl_pcie_rxq_restock to refill any empty 122 * slots. 123 * 124 * RBD life-cycle: 125 * 126 * Init: 127 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue 128 * 129 * Regular Receive interrupt: 130 * Page Stolen: 131 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> 132 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue 133 * Page not Stolen: 134 * rxq.queue -> rxq.rx_free -> rxq.queue 135 * ... 136 * 137 */ 138 139 /* 140 * iwl_rxq_space - Return number of free slots available in queue. 141 */ 142 static int iwl_rxq_space(const struct iwl_rxq *rxq) 143 { 144 /* Make sure rx queue size is a power of 2 */ 145 WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); 146 147 /* 148 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity 149 * between empty and completely full queues. 150 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well 151 * defined for negative dividends. 152 */ 153 return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); 154 } 155 156 /* 157 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 158 */ 159 static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) 160 { 161 return cpu_to_le32((u32)(dma_addr >> 8)); 162 } 163 164 static void iwl_pcie_write_prph_64_no_grab(struct iwl_trans *trans, u64 ofs, 165 u64 val) 166 { 167 iwl_write_prph_no_grab(trans, ofs, val & 0xffffffff); 168 iwl_write_prph_no_grab(trans, ofs + 4, val >> 32); 169 } 170 171 /* 172 * iwl_pcie_rx_stop - stops the Rx DMA 173 */ 174 int iwl_pcie_rx_stop(struct iwl_trans *trans) 175 { 176 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 177 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, 178 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); 179 } 180 181 /* 182 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue 183 */ 184 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, 185 struct iwl_rxq *rxq) 186 { 187 u32 reg; 188 189 lockdep_assert_held(&rxq->lock); 190 191 /* 192 * explicitly wake up the NIC if: 193 * 1. shadow registers aren't enabled 194 * 2. there is a chance that the NIC is asleep 195 */ 196 if (!trans->cfg->base_params->shadow_reg_enable && 197 test_bit(STATUS_TPOWER_PMI, &trans->status)) { 198 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 199 200 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 201 IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", 202 reg); 203 iwl_set_bit(trans, CSR_GP_CNTRL, 204 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 205 rxq->need_update = true; 206 return; 207 } 208 } 209 210 rxq->write_actual = round_down(rxq->write, 8); 211 if (trans->cfg->mq_rx_supported) 212 iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), 213 rxq->write_actual); 214 /* 215 * write to FH_RSCSR_CHNL0_WPTR register even in MQ as a W/A to 216 * hardware shadow registers bug - writing to RFH_Q_FRBDCB_WIDX will 217 * not wake the NIC. 218 */ 219 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); 220 } 221 222 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) 223 { 224 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 225 int i; 226 227 for (i = 0; i < trans->num_rx_queues; i++) { 228 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 229 230 if (!rxq->need_update) 231 continue; 232 spin_lock(&rxq->lock); 233 iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 234 rxq->need_update = false; 235 spin_unlock(&rxq->lock); 236 } 237 } 238 239 /* 240 * iwl_pcie_rxq_mq_restock - restock implementation for multi-queue rx 241 */ 242 static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans, 243 struct iwl_rxq *rxq) 244 { 245 struct iwl_rx_mem_buffer *rxb; 246 247 /* 248 * If the device isn't enabled - no need to try to add buffers... 249 * This can happen when we stop the device and still have an interrupt 250 * pending. We stop the APM before we sync the interrupts because we 251 * have to (see comment there). On the other hand, since the APM is 252 * stopped, we cannot access the HW (in particular not prph). 253 * So don't try to restock if the APM has been already stopped. 254 */ 255 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 256 return; 257 258 spin_lock(&rxq->lock); 259 while (rxq->free_count) { 260 __le64 *bd = (__le64 *)rxq->bd; 261 262 /* Get next free Rx buffer, remove from free list */ 263 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, 264 list); 265 list_del(&rxb->list); 266 267 /* 12 first bits are expected to be empty */ 268 WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); 269 /* Point to Rx buffer via next RBD in circular buffer */ 270 bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); 271 rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK; 272 rxq->free_count--; 273 } 274 spin_unlock(&rxq->lock); 275 276 /* 277 * If we've added more space for the firmware to place data, tell it. 278 * Increment device's write pointer in multiples of 8. 279 */ 280 if (rxq->write_actual != (rxq->write & ~0x7)) { 281 spin_lock(&rxq->lock); 282 iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 283 spin_unlock(&rxq->lock); 284 } 285 } 286 287 /* 288 * iwl_pcie_rxq_sq_restock - restock implementation for single queue rx 289 */ 290 static void iwl_pcie_rxq_sq_restock(struct iwl_trans *trans, 291 struct iwl_rxq *rxq) 292 { 293 struct iwl_rx_mem_buffer *rxb; 294 295 /* 296 * If the device isn't enabled - not need to try to add buffers... 297 * This can happen when we stop the device and still have an interrupt 298 * pending. We stop the APM before we sync the interrupts because we 299 * have to (see comment there). On the other hand, since the APM is 300 * stopped, we cannot access the HW (in particular not prph). 301 * So don't try to restock if the APM has been already stopped. 302 */ 303 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 304 return; 305 306 spin_lock(&rxq->lock); 307 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { 308 __le32 *bd = (__le32 *)rxq->bd; 309 /* The overwritten rxb must be a used one */ 310 rxb = rxq->queue[rxq->write]; 311 BUG_ON(rxb && rxb->page); 312 313 /* Get next free Rx buffer, remove from free list */ 314 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, 315 list); 316 list_del(&rxb->list); 317 318 /* Point to Rx buffer via next RBD in circular buffer */ 319 bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); 320 rxq->queue[rxq->write] = rxb; 321 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 322 rxq->free_count--; 323 } 324 spin_unlock(&rxq->lock); 325 326 /* If we've added more space for the firmware to place data, tell it. 327 * Increment device's write pointer in multiples of 8. */ 328 if (rxq->write_actual != (rxq->write & ~0x7)) { 329 spin_lock(&rxq->lock); 330 iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 331 spin_unlock(&rxq->lock); 332 } 333 } 334 335 /* 336 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool 337 * 338 * If there are slots in the RX queue that need to be restocked, 339 * and we have free pre-allocated buffers, fill the ranks as much 340 * as we can, pulling from rx_free. 341 * 342 * This moves the 'write' index forward to catch up with 'processed', and 343 * also updates the memory address in the firmware to reference the new 344 * target buffer. 345 */ 346 static 347 void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) 348 { 349 if (trans->cfg->mq_rx_supported) 350 iwl_pcie_rxq_mq_restock(trans, rxq); 351 else 352 iwl_pcie_rxq_sq_restock(trans, rxq); 353 } 354 355 /* 356 * iwl_pcie_rx_alloc_page - allocates and returns a page. 357 * 358 */ 359 static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, 360 gfp_t priority) 361 { 362 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 363 struct page *page; 364 gfp_t gfp_mask = priority; 365 366 if (trans_pcie->rx_page_order > 0) 367 gfp_mask |= __GFP_COMP; 368 369 /* Alloc a new receive buffer */ 370 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); 371 if (!page) { 372 if (net_ratelimit()) 373 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", 374 trans_pcie->rx_page_order); 375 /* 376 * Issue an error if we don't have enough pre-allocated 377 * buffers. 378 ` */ 379 if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) 380 IWL_CRIT(trans, 381 "Failed to alloc_pages\n"); 382 return NULL; 383 } 384 return page; 385 } 386 387 /* 388 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD 389 * 390 * A used RBD is an Rx buffer that has been given to the stack. To use it again 391 * a page must be allocated and the RBD must point to the page. This function 392 * doesn't change the HW pointer but handles the list of pages that is used by 393 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly 394 * allocated buffers. 395 */ 396 static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, 397 struct iwl_rxq *rxq) 398 { 399 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 400 struct iwl_rx_mem_buffer *rxb; 401 struct page *page; 402 403 while (1) { 404 spin_lock(&rxq->lock); 405 if (list_empty(&rxq->rx_used)) { 406 spin_unlock(&rxq->lock); 407 return; 408 } 409 spin_unlock(&rxq->lock); 410 411 /* Alloc a new receive buffer */ 412 page = iwl_pcie_rx_alloc_page(trans, priority); 413 if (!page) 414 return; 415 416 spin_lock(&rxq->lock); 417 418 if (list_empty(&rxq->rx_used)) { 419 spin_unlock(&rxq->lock); 420 __free_pages(page, trans_pcie->rx_page_order); 421 return; 422 } 423 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer, 424 list); 425 list_del(&rxb->list); 426 spin_unlock(&rxq->lock); 427 428 BUG_ON(rxb->page); 429 rxb->page = page; 430 /* Get physical address of the RB */ 431 rxb->page_dma = 432 dma_map_page(trans->dev, page, 0, 433 PAGE_SIZE << trans_pcie->rx_page_order, 434 DMA_FROM_DEVICE); 435 if (dma_mapping_error(trans->dev, rxb->page_dma)) { 436 rxb->page = NULL; 437 spin_lock(&rxq->lock); 438 list_add(&rxb->list, &rxq->rx_used); 439 spin_unlock(&rxq->lock); 440 __free_pages(page, trans_pcie->rx_page_order); 441 return; 442 } 443 444 spin_lock(&rxq->lock); 445 446 list_add_tail(&rxb->list, &rxq->rx_free); 447 rxq->free_count++; 448 449 spin_unlock(&rxq->lock); 450 } 451 } 452 453 static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) 454 { 455 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 456 int i; 457 458 for (i = 0; i < RX_POOL_SIZE; i++) { 459 if (!trans_pcie->rx_pool[i].page) 460 continue; 461 dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, 462 PAGE_SIZE << trans_pcie->rx_page_order, 463 DMA_FROM_DEVICE); 464 __free_pages(trans_pcie->rx_pool[i].page, 465 trans_pcie->rx_page_order); 466 trans_pcie->rx_pool[i].page = NULL; 467 } 468 } 469 470 /* 471 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues 472 * 473 * Allocates for each received request 8 pages 474 * Called as a scheduled work item. 475 */ 476 static void iwl_pcie_rx_allocator(struct iwl_trans *trans) 477 { 478 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 479 struct iwl_rb_allocator *rba = &trans_pcie->rba; 480 struct list_head local_empty; 481 int pending = atomic_xchg(&rba->req_pending, 0); 482 483 IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending); 484 485 /* If we were scheduled - there is at least one request */ 486 spin_lock(&rba->lock); 487 /* swap out the rba->rbd_empty to a local list */ 488 list_replace_init(&rba->rbd_empty, &local_empty); 489 spin_unlock(&rba->lock); 490 491 while (pending) { 492 int i; 493 struct list_head local_allocated; 494 gfp_t gfp_mask = GFP_KERNEL; 495 496 /* Do not post a warning if there are only a few requests */ 497 if (pending < RX_PENDING_WATERMARK) 498 gfp_mask |= __GFP_NOWARN; 499 500 INIT_LIST_HEAD(&local_allocated); 501 502 for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { 503 struct iwl_rx_mem_buffer *rxb; 504 struct page *page; 505 506 /* List should never be empty - each reused RBD is 507 * returned to the list, and initial pool covers any 508 * possible gap between the time the page is allocated 509 * to the time the RBD is added. 510 */ 511 BUG_ON(list_empty(&local_empty)); 512 /* Get the first rxb from the rbd list */ 513 rxb = list_first_entry(&local_empty, 514 struct iwl_rx_mem_buffer, list); 515 BUG_ON(rxb->page); 516 517 /* Alloc a new receive buffer */ 518 page = iwl_pcie_rx_alloc_page(trans, gfp_mask); 519 if (!page) 520 continue; 521 rxb->page = page; 522 523 /* Get physical address of the RB */ 524 rxb->page_dma = dma_map_page(trans->dev, page, 0, 525 PAGE_SIZE << trans_pcie->rx_page_order, 526 DMA_FROM_DEVICE); 527 if (dma_mapping_error(trans->dev, rxb->page_dma)) { 528 rxb->page = NULL; 529 __free_pages(page, trans_pcie->rx_page_order); 530 continue; 531 } 532 533 /* move the allocated entry to the out list */ 534 list_move(&rxb->list, &local_allocated); 535 i++; 536 } 537 538 pending--; 539 if (!pending) { 540 pending = atomic_xchg(&rba->req_pending, 0); 541 IWL_DEBUG_RX(trans, 542 "Pending allocation requests = %d\n", 543 pending); 544 } 545 546 spin_lock(&rba->lock); 547 /* add the allocated rbds to the allocator allocated list */ 548 list_splice_tail(&local_allocated, &rba->rbd_allocated); 549 /* get more empty RBDs for current pending requests */ 550 list_splice_tail_init(&rba->rbd_empty, &local_empty); 551 spin_unlock(&rba->lock); 552 553 atomic_inc(&rba->req_ready); 554 } 555 556 spin_lock(&rba->lock); 557 /* return unused rbds to the allocator empty list */ 558 list_splice_tail(&local_empty, &rba->rbd_empty); 559 spin_unlock(&rba->lock); 560 } 561 562 /* 563 * iwl_pcie_rx_allocator_get - returns the pre-allocated pages 564 .* 565 .* Called by queue when the queue posted allocation request and 566 * has freed 8 RBDs in order to restock itself. 567 * This function directly moves the allocated RBs to the queue's ownership 568 * and updates the relevant counters. 569 */ 570 static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans, 571 struct iwl_rxq *rxq) 572 { 573 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 574 struct iwl_rb_allocator *rba = &trans_pcie->rba; 575 int i; 576 577 lockdep_assert_held(&rxq->lock); 578 579 /* 580 * atomic_dec_if_positive returns req_ready - 1 for any scenario. 581 * If req_ready is 0 atomic_dec_if_positive will return -1 and this 582 * function will return early, as there are no ready requests. 583 * atomic_dec_if_positive will perofrm the *actual* decrement only if 584 * req_ready > 0, i.e. - there are ready requests and the function 585 * hands one request to the caller. 586 */ 587 if (atomic_dec_if_positive(&rba->req_ready) < 0) 588 return; 589 590 spin_lock(&rba->lock); 591 for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { 592 /* Get next free Rx buffer, remove it from free list */ 593 struct iwl_rx_mem_buffer *rxb = 594 list_first_entry(&rba->rbd_allocated, 595 struct iwl_rx_mem_buffer, list); 596 597 list_move(&rxb->list, &rxq->rx_free); 598 } 599 spin_unlock(&rba->lock); 600 601 rxq->used_count -= RX_CLAIM_REQ_ALLOC; 602 rxq->free_count += RX_CLAIM_REQ_ALLOC; 603 } 604 605 static void iwl_pcie_rx_allocator_work(struct work_struct *data) 606 { 607 struct iwl_rb_allocator *rba_p = 608 container_of(data, struct iwl_rb_allocator, rx_alloc); 609 struct iwl_trans_pcie *trans_pcie = 610 container_of(rba_p, struct iwl_trans_pcie, rba); 611 612 iwl_pcie_rx_allocator(trans_pcie->trans); 613 } 614 615 static int iwl_pcie_rx_alloc(struct iwl_trans *trans) 616 { 617 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 618 struct iwl_rb_allocator *rba = &trans_pcie->rba; 619 struct device *dev = trans->dev; 620 int i; 621 int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : 622 sizeof(__le32); 623 624 if (WARN_ON(trans_pcie->rxq)) 625 return -EINVAL; 626 627 trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), 628 GFP_KERNEL); 629 if (!trans_pcie->rxq) 630 return -EINVAL; 631 632 spin_lock_init(&rba->lock); 633 634 for (i = 0; i < trans->num_rx_queues; i++) { 635 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 636 637 spin_lock_init(&rxq->lock); 638 if (trans->cfg->mq_rx_supported) 639 rxq->queue_size = MQ_RX_TABLE_SIZE; 640 else 641 rxq->queue_size = RX_QUEUE_SIZE; 642 643 /* 644 * Allocate the circular buffer of Read Buffer Descriptors 645 * (RBDs) 646 */ 647 rxq->bd = dma_zalloc_coherent(dev, 648 free_size * rxq->queue_size, 649 &rxq->bd_dma, GFP_KERNEL); 650 if (!rxq->bd) 651 goto err; 652 653 if (trans->cfg->mq_rx_supported) { 654 rxq->used_bd = dma_zalloc_coherent(dev, 655 sizeof(__le32) * 656 rxq->queue_size, 657 &rxq->used_bd_dma, 658 GFP_KERNEL); 659 if (!rxq->used_bd) 660 goto err; 661 } 662 663 /*Allocate the driver's pointer to receive buffer status */ 664 rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), 665 &rxq->rb_stts_dma, 666 GFP_KERNEL); 667 if (!rxq->rb_stts) 668 goto err; 669 } 670 return 0; 671 672 err: 673 for (i = 0; i < trans->num_rx_queues; i++) { 674 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 675 676 if (rxq->bd) 677 dma_free_coherent(dev, free_size * rxq->queue_size, 678 rxq->bd, rxq->bd_dma); 679 rxq->bd_dma = 0; 680 rxq->bd = NULL; 681 682 if (rxq->rb_stts) 683 dma_free_coherent(trans->dev, 684 sizeof(struct iwl_rb_status), 685 rxq->rb_stts, rxq->rb_stts_dma); 686 687 if (rxq->used_bd) 688 dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size, 689 rxq->used_bd, rxq->used_bd_dma); 690 rxq->used_bd_dma = 0; 691 rxq->used_bd = NULL; 692 } 693 kfree(trans_pcie->rxq); 694 695 return -ENOMEM; 696 } 697 698 static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) 699 { 700 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 701 u32 rb_size; 702 unsigned long flags; 703 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ 704 705 switch (trans_pcie->rx_buf_size) { 706 case IWL_AMSDU_4K: 707 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 708 break; 709 case IWL_AMSDU_8K: 710 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; 711 break; 712 case IWL_AMSDU_12K: 713 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K; 714 break; 715 default: 716 WARN_ON(1); 717 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 718 } 719 720 if (!iwl_trans_grab_nic_access(trans, &flags)) 721 return; 722 723 /* Stop Rx DMA */ 724 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 725 /* reset and flush pointers */ 726 iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); 727 iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); 728 iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0); 729 730 /* Reset driver's Rx queue write index */ 731 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 732 733 /* Tell device where to find RBD circular buffer in DRAM */ 734 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, 735 (u32)(rxq->bd_dma >> 8)); 736 737 /* Tell device where in DRAM to update its Rx status */ 738 iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, 739 rxq->rb_stts_dma >> 4); 740 741 /* Enable Rx DMA 742 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in 743 * the credit mechanism in 5000 HW RX FIFO 744 * Direct rx interrupts to hosts 745 * Rx buffer size 4 or 8k or 12k 746 * RB timeout 0x10 747 * 256 RBDs 748 */ 749 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 750 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 751 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | 752 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 753 rb_size | 754 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | 755 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); 756 757 iwl_trans_release_nic_access(trans, &flags); 758 759 /* Set interrupt coalescing timer to default (2048 usecs) */ 760 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 761 762 /* W/A for interrupt coalescing bug in 7260 and 3160 */ 763 if (trans->cfg->host_interrupt_operation_mode) 764 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); 765 } 766 767 static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) 768 { 769 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 770 u32 rb_size, enabled = 0; 771 unsigned long flags; 772 int i; 773 774 switch (trans_pcie->rx_buf_size) { 775 case IWL_AMSDU_4K: 776 rb_size = RFH_RXF_DMA_RB_SIZE_4K; 777 break; 778 case IWL_AMSDU_8K: 779 rb_size = RFH_RXF_DMA_RB_SIZE_8K; 780 break; 781 case IWL_AMSDU_12K: 782 rb_size = RFH_RXF_DMA_RB_SIZE_12K; 783 break; 784 default: 785 WARN_ON(1); 786 rb_size = RFH_RXF_DMA_RB_SIZE_4K; 787 } 788 789 if (!iwl_trans_grab_nic_access(trans, &flags)) 790 return; 791 792 /* Stop Rx DMA */ 793 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0); 794 /* disable free amd used rx queue operation */ 795 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0); 796 797 for (i = 0; i < trans->num_rx_queues; i++) { 798 /* Tell device where to find RBD free table in DRAM */ 799 iwl_pcie_write_prph_64_no_grab(trans, 800 RFH_Q_FRBDCB_BA_LSB(i), 801 trans_pcie->rxq[i].bd_dma); 802 /* Tell device where to find RBD used table in DRAM */ 803 iwl_pcie_write_prph_64_no_grab(trans, 804 RFH_Q_URBDCB_BA_LSB(i), 805 trans_pcie->rxq[i].used_bd_dma); 806 /* Tell device where in DRAM to update its Rx status */ 807 iwl_pcie_write_prph_64_no_grab(trans, 808 RFH_Q_URBD_STTS_WPTR_LSB(i), 809 trans_pcie->rxq[i].rb_stts_dma); 810 /* Reset device indice tables */ 811 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0); 812 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0); 813 iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0); 814 815 enabled |= BIT(i) | BIT(i + 16); 816 } 817 818 /* restock default queue */ 819 iwl_pcie_rxq_mq_restock(trans, &trans_pcie->rxq[0]); 820 821 /* 822 * Enable Rx DMA 823 * Single frame mode 824 * Rx buffer size 4 or 8k or 12k 825 * Min RB size 4 or 8 826 * Drop frames that exceed RB size 827 * 512 RBDs 828 */ 829 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 830 RFH_DMA_EN_ENABLE_VAL | 831 rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK | 832 RFH_RXF_DMA_MIN_RB_4_8 | 833 RFH_RXF_DMA_DROP_TOO_LARGE_MASK | 834 RFH_RXF_DMA_RBDCB_SIZE_512); 835 836 /* 837 * Activate DMA snooping. 838 * Set RX DMA chunk size to 64B 839 * Default queue is 0 840 */ 841 iwl_write_prph_no_grab(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP | 842 (DEFAULT_RXQ_NUM << 843 RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) | 844 RFH_GEN_CFG_SERVICE_DMA_SNOOP); 845 /* Enable the relevant rx queues */ 846 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled); 847 848 iwl_trans_release_nic_access(trans, &flags); 849 850 /* Set interrupt coalescing timer to default (2048 usecs) */ 851 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 852 } 853 854 static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) 855 { 856 lockdep_assert_held(&rxq->lock); 857 858 INIT_LIST_HEAD(&rxq->rx_free); 859 INIT_LIST_HEAD(&rxq->rx_used); 860 rxq->free_count = 0; 861 rxq->used_count = 0; 862 } 863 864 static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) 865 { 866 WARN_ON(1); 867 return 0; 868 } 869 870 int iwl_pcie_rx_init(struct iwl_trans *trans) 871 { 872 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 873 struct iwl_rxq *def_rxq; 874 struct iwl_rb_allocator *rba = &trans_pcie->rba; 875 int i, err, queue_size, allocator_pool_size, num_alloc; 876 877 if (!trans_pcie->rxq) { 878 err = iwl_pcie_rx_alloc(trans); 879 if (err) 880 return err; 881 } 882 def_rxq = trans_pcie->rxq; 883 if (!rba->alloc_wq) 884 rba->alloc_wq = alloc_workqueue("rb_allocator", 885 WQ_HIGHPRI | WQ_UNBOUND, 1); 886 INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); 887 888 spin_lock(&rba->lock); 889 atomic_set(&rba->req_pending, 0); 890 atomic_set(&rba->req_ready, 0); 891 INIT_LIST_HEAD(&rba->rbd_allocated); 892 INIT_LIST_HEAD(&rba->rbd_empty); 893 spin_unlock(&rba->lock); 894 895 /* free all first - we might be reconfigured for a different size */ 896 iwl_pcie_free_rbs_pool(trans); 897 898 for (i = 0; i < RX_QUEUE_SIZE; i++) 899 def_rxq->queue[i] = NULL; 900 901 for (i = 0; i < trans->num_rx_queues; i++) { 902 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 903 904 rxq->id = i; 905 906 spin_lock(&rxq->lock); 907 /* 908 * Set read write pointer to reflect that we have processed 909 * and used all buffers, but have not restocked the Rx queue 910 * with fresh buffers 911 */ 912 rxq->read = 0; 913 rxq->write = 0; 914 rxq->write_actual = 0; 915 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); 916 917 iwl_pcie_rx_init_rxb_lists(rxq); 918 919 if (!rxq->napi.poll) 920 netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, 921 iwl_pcie_dummy_napi_poll, 64); 922 923 spin_unlock(&rxq->lock); 924 } 925 926 /* move the pool to the default queue and allocator ownerships */ 927 queue_size = trans->cfg->mq_rx_supported ? 928 MQ_RX_NUM_RBDS : RX_QUEUE_SIZE; 929 allocator_pool_size = trans->num_rx_queues * 930 (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); 931 num_alloc = queue_size + allocator_pool_size; 932 BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) != 933 ARRAY_SIZE(trans_pcie->rx_pool)); 934 for (i = 0; i < num_alloc; i++) { 935 struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; 936 937 if (i < allocator_pool_size) 938 list_add(&rxb->list, &rba->rbd_empty); 939 else 940 list_add(&rxb->list, &def_rxq->rx_used); 941 trans_pcie->global_table[i] = rxb; 942 rxb->vid = (u16)i; 943 } 944 945 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); 946 if (trans->cfg->mq_rx_supported) { 947 iwl_pcie_rx_mq_hw_init(trans); 948 } else { 949 iwl_pcie_rxq_sq_restock(trans, def_rxq); 950 iwl_pcie_rx_hw_init(trans, def_rxq); 951 } 952 953 spin_lock(&def_rxq->lock); 954 iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq); 955 spin_unlock(&def_rxq->lock); 956 957 return 0; 958 } 959 960 void iwl_pcie_rx_free(struct iwl_trans *trans) 961 { 962 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 963 struct iwl_rb_allocator *rba = &trans_pcie->rba; 964 int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : 965 sizeof(__le32); 966 int i; 967 968 /* 969 * if rxq is NULL, it means that nothing has been allocated, 970 * exit now 971 */ 972 if (!trans_pcie->rxq) { 973 IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); 974 return; 975 } 976 977 cancel_work_sync(&rba->rx_alloc); 978 if (rba->alloc_wq) { 979 destroy_workqueue(rba->alloc_wq); 980 rba->alloc_wq = NULL; 981 } 982 983 iwl_pcie_free_rbs_pool(trans); 984 985 for (i = 0; i < trans->num_rx_queues; i++) { 986 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 987 988 if (rxq->bd) 989 dma_free_coherent(trans->dev, 990 free_size * rxq->queue_size, 991 rxq->bd, rxq->bd_dma); 992 rxq->bd_dma = 0; 993 rxq->bd = NULL; 994 995 if (rxq->rb_stts) 996 dma_free_coherent(trans->dev, 997 sizeof(struct iwl_rb_status), 998 rxq->rb_stts, rxq->rb_stts_dma); 999 else 1000 IWL_DEBUG_INFO(trans, 1001 "Free rxq->rb_stts which is NULL\n"); 1002 1003 if (rxq->used_bd) 1004 dma_free_coherent(trans->dev, 1005 sizeof(__le32) * rxq->queue_size, 1006 rxq->used_bd, rxq->used_bd_dma); 1007 rxq->used_bd_dma = 0; 1008 rxq->used_bd = NULL; 1009 1010 if (rxq->napi.poll) 1011 netif_napi_del(&rxq->napi); 1012 } 1013 kfree(trans_pcie->rxq); 1014 } 1015 1016 /* 1017 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs 1018 * 1019 * Called when a RBD can be reused. The RBD is transferred to the allocator. 1020 * When there are 2 empty RBDs - a request for allocation is posted 1021 */ 1022 static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, 1023 struct iwl_rx_mem_buffer *rxb, 1024 struct iwl_rxq *rxq, bool emergency) 1025 { 1026 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1027 struct iwl_rb_allocator *rba = &trans_pcie->rba; 1028 1029 /* Move the RBD to the used list, will be moved to allocator in batches 1030 * before claiming or posting a request*/ 1031 list_add_tail(&rxb->list, &rxq->rx_used); 1032 1033 if (unlikely(emergency)) 1034 return; 1035 1036 /* Count the allocator owned RBDs */ 1037 rxq->used_count++; 1038 1039 /* If we have RX_POST_REQ_ALLOC new released rx buffers - 1040 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is 1041 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, 1042 * after but we still need to post another request. 1043 */ 1044 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { 1045 /* Move the 2 RBDs to the allocator ownership. 1046 Allocator has another 6 from pool for the request completion*/ 1047 spin_lock(&rba->lock); 1048 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); 1049 spin_unlock(&rba->lock); 1050 1051 atomic_inc(&rba->req_pending); 1052 queue_work(rba->alloc_wq, &rba->rx_alloc); 1053 } 1054 } 1055 1056 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, 1057 struct iwl_rxq *rxq, 1058 struct iwl_rx_mem_buffer *rxb, 1059 bool emergency) 1060 { 1061 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1062 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 1063 bool page_stolen = false; 1064 int max_len = PAGE_SIZE << trans_pcie->rx_page_order; 1065 u32 offset = 0; 1066 1067 if (WARN_ON(!rxb)) 1068 return; 1069 1070 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE); 1071 1072 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) { 1073 struct iwl_rx_packet *pkt; 1074 u16 sequence; 1075 bool reclaim; 1076 int index, cmd_index, len; 1077 struct iwl_rx_cmd_buffer rxcb = { 1078 ._offset = offset, 1079 ._rx_page_order = trans_pcie->rx_page_order, 1080 ._page = rxb->page, 1081 ._page_stolen = false, 1082 .truesize = max_len, 1083 }; 1084 1085 pkt = rxb_addr(&rxcb); 1086 1087 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) 1088 break; 1089 1090 IWL_DEBUG_RX(trans, 1091 "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n", 1092 rxcb._offset, 1093 iwl_get_cmd_string(trans, 1094 iwl_cmd_id(pkt->hdr.cmd, 1095 pkt->hdr.group_id, 1096 0)), 1097 pkt->hdr.cmd, le16_to_cpu(pkt->hdr.sequence)); 1098 1099 len = iwl_rx_packet_len(pkt); 1100 len += sizeof(u32); /* account for status word */ 1101 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len); 1102 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len); 1103 1104 /* Reclaim a command buffer only if this packet is a response 1105 * to a (driver-originated) command. 1106 * If the packet (e.g. Rx frame) originated from uCode, 1107 * there is no command buffer to reclaim. 1108 * Ucode should set SEQ_RX_FRAME bit if ucode-originated, 1109 * but apparently a few don't get set; catch them here. */ 1110 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME); 1111 if (reclaim) { 1112 int i; 1113 1114 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { 1115 if (trans_pcie->no_reclaim_cmds[i] == 1116 pkt->hdr.cmd) { 1117 reclaim = false; 1118 break; 1119 } 1120 } 1121 } 1122 1123 sequence = le16_to_cpu(pkt->hdr.sequence); 1124 index = SEQ_TO_INDEX(sequence); 1125 cmd_index = get_cmd_index(&txq->q, index); 1126 1127 if (rxq->id == 0) 1128 iwl_op_mode_rx(trans->op_mode, &rxq->napi, 1129 &rxcb); 1130 else 1131 iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, 1132 &rxcb, rxq->id); 1133 1134 if (reclaim) { 1135 kzfree(txq->entries[cmd_index].free_buf); 1136 txq->entries[cmd_index].free_buf = NULL; 1137 } 1138 1139 /* 1140 * After here, we should always check rxcb._page_stolen, 1141 * if it is true then one of the handlers took the page. 1142 */ 1143 1144 if (reclaim) { 1145 /* Invoke any callbacks, transfer the buffer to caller, 1146 * and fire off the (possibly) blocking 1147 * iwl_trans_send_cmd() 1148 * as we reclaim the driver command queue */ 1149 if (!rxcb._page_stolen) 1150 iwl_pcie_hcmd_complete(trans, &rxcb); 1151 else 1152 IWL_WARN(trans, "Claim null rxb?\n"); 1153 } 1154 1155 page_stolen |= rxcb._page_stolen; 1156 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); 1157 } 1158 1159 /* page was stolen from us -- free our reference */ 1160 if (page_stolen) { 1161 __free_pages(rxb->page, trans_pcie->rx_page_order); 1162 rxb->page = NULL; 1163 } 1164 1165 /* Reuse the page if possible. For notification packets and 1166 * SKBs that fail to Rx correctly, add them back into the 1167 * rx_free list for reuse later. */ 1168 if (rxb->page != NULL) { 1169 rxb->page_dma = 1170 dma_map_page(trans->dev, rxb->page, 0, 1171 PAGE_SIZE << trans_pcie->rx_page_order, 1172 DMA_FROM_DEVICE); 1173 if (dma_mapping_error(trans->dev, rxb->page_dma)) { 1174 /* 1175 * free the page(s) as well to not break 1176 * the invariant that the items on the used 1177 * list have no page(s) 1178 */ 1179 __free_pages(rxb->page, trans_pcie->rx_page_order); 1180 rxb->page = NULL; 1181 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); 1182 } else { 1183 list_add_tail(&rxb->list, &rxq->rx_free); 1184 rxq->free_count++; 1185 } 1186 } else 1187 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); 1188 } 1189 1190 /* 1191 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw 1192 */ 1193 static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) 1194 { 1195 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1196 struct iwl_rxq *rxq = &trans_pcie->rxq[queue]; 1197 u32 r, i, count = 0; 1198 bool emergency = false; 1199 1200 restart: 1201 spin_lock(&rxq->lock); 1202 /* uCode's read index (stored in shared DRAM) indicates the last Rx 1203 * buffer that the driver may process (last buffer filled by ucode). */ 1204 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; 1205 i = rxq->read; 1206 1207 /* W/A 9000 device step A0 wrap-around bug */ 1208 r &= (rxq->queue_size - 1); 1209 1210 /* Rx interrupt, but nothing sent from uCode */ 1211 if (i == r) 1212 IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); 1213 1214 while (i != r) { 1215 struct iwl_rx_mem_buffer *rxb; 1216 1217 if (unlikely(rxq->used_count == rxq->queue_size / 2)) 1218 emergency = true; 1219 1220 if (trans->cfg->mq_rx_supported) { 1221 /* 1222 * used_bd is a 32 bit but only 12 are used to retrieve 1223 * the vid 1224 */ 1225 u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF; 1226 1227 if (WARN(vid >= ARRAY_SIZE(trans_pcie->global_table), 1228 "Invalid rxb index from HW %u\n", (u32)vid)) 1229 goto out; 1230 rxb = trans_pcie->global_table[vid]; 1231 } else { 1232 rxb = rxq->queue[i]; 1233 rxq->queue[i] = NULL; 1234 } 1235 1236 IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); 1237 iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency); 1238 1239 i = (i + 1) & (rxq->queue_size - 1); 1240 1241 /* 1242 * If we have RX_CLAIM_REQ_ALLOC released rx buffers - 1243 * try to claim the pre-allocated buffers from the allocator. 1244 * If not ready - will try to reclaim next time. 1245 * There is no need to reschedule work - allocator exits only 1246 * on success 1247 */ 1248 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) 1249 iwl_pcie_rx_allocator_get(trans, rxq); 1250 1251 if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { 1252 struct iwl_rb_allocator *rba = &trans_pcie->rba; 1253 1254 /* Add the remaining empty RBDs for allocator use */ 1255 spin_lock(&rba->lock); 1256 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); 1257 spin_unlock(&rba->lock); 1258 } else if (emergency) { 1259 count++; 1260 if (count == 8) { 1261 count = 0; 1262 if (rxq->used_count < rxq->queue_size / 3) 1263 emergency = false; 1264 1265 rxq->read = i; 1266 spin_unlock(&rxq->lock); 1267 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); 1268 iwl_pcie_rxq_restock(trans, rxq); 1269 goto restart; 1270 } 1271 } 1272 } 1273 out: 1274 /* Backtrack one entry */ 1275 rxq->read = i; 1276 spin_unlock(&rxq->lock); 1277 1278 /* 1279 * handle a case where in emergency there are some unallocated RBDs. 1280 * those RBDs are in the used list, but are not tracked by the queue's 1281 * used_count which counts allocator owned RBDs. 1282 * unallocated emergency RBDs must be allocated on exit, otherwise 1283 * when called again the function may not be in emergency mode and 1284 * they will be handed to the allocator with no tracking in the RBD 1285 * allocator counters, which will lead to them never being claimed back 1286 * by the queue. 1287 * by allocating them here, they are now in the queue free list, and 1288 * will be restocked by the next call of iwl_pcie_rxq_restock. 1289 */ 1290 if (unlikely(emergency && count)) 1291 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); 1292 1293 if (rxq->napi.poll) 1294 napi_gro_flush(&rxq->napi, false); 1295 1296 iwl_pcie_rxq_restock(trans, rxq); 1297 } 1298 1299 static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry) 1300 { 1301 u8 queue = entry->entry; 1302 struct msix_entry *entries = entry - queue; 1303 1304 return container_of(entries, struct iwl_trans_pcie, msix_entries[0]); 1305 } 1306 1307 static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, 1308 struct msix_entry *entry) 1309 { 1310 /* 1311 * Before sending the interrupt the HW disables it to prevent 1312 * a nested interrupt. This is done by writing 1 to the corresponding 1313 * bit in the mask register. After handling the interrupt, it should be 1314 * re-enabled by clearing this bit. This register is defined as 1315 * write 1 clear (W1C) register, meaning that it's being clear 1316 * by writing 1 to the bit. 1317 */ 1318 iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry)); 1319 } 1320 1321 /* 1322 * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw 1323 * This interrupt handler should be used with RSS queue only. 1324 */ 1325 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) 1326 { 1327 struct msix_entry *entry = dev_id; 1328 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); 1329 struct iwl_trans *trans = trans_pcie->trans; 1330 1331 if (WARN_ON(entry->entry >= trans->num_rx_queues)) 1332 return IRQ_NONE; 1333 1334 lock_map_acquire(&trans->sync_cmd_lockdep_map); 1335 1336 local_bh_disable(); 1337 iwl_pcie_rx_handle(trans, entry->entry); 1338 local_bh_enable(); 1339 1340 iwl_pcie_clear_irq(trans, entry); 1341 1342 lock_map_release(&trans->sync_cmd_lockdep_map); 1343 1344 return IRQ_HANDLED; 1345 } 1346 1347 /* 1348 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card 1349 */ 1350 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) 1351 { 1352 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1353 int i; 1354 1355 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ 1356 if (trans->cfg->internal_wimax_coex && 1357 !trans->cfg->apmg_not_supported && 1358 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & 1359 APMS_CLK_VAL_MRB_FUNC_MODE) || 1360 (iwl_read_prph(trans, APMG_PS_CTRL_REG) & 1361 APMG_PS_CTRL_VAL_RESET_REQ))) { 1362 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1363 iwl_op_mode_wimax_active(trans->op_mode); 1364 wake_up(&trans_pcie->wait_command_queue); 1365 return; 1366 } 1367 1368 iwl_pcie_dump_csr(trans); 1369 iwl_dump_fh(trans, NULL); 1370 1371 local_bh_disable(); 1372 /* The STATUS_FW_ERROR bit is set in this function. This must happen 1373 * before we wake up the command caller, to ensure a proper cleanup. */ 1374 iwl_trans_fw_error(trans); 1375 local_bh_enable(); 1376 1377 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) 1378 del_timer(&trans_pcie->txq[i].stuck_timer); 1379 1380 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1381 wake_up(&trans_pcie->wait_command_queue); 1382 } 1383 1384 static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans) 1385 { 1386 u32 inta; 1387 1388 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock); 1389 1390 trace_iwlwifi_dev_irq(trans->dev); 1391 1392 /* Discover which interrupts are active/pending */ 1393 inta = iwl_read32(trans, CSR_INT); 1394 1395 /* the thread will service interrupts and re-enable them */ 1396 return inta; 1397 } 1398 1399 /* a device (PCI-E) page is 4096 bytes long */ 1400 #define ICT_SHIFT 12 1401 #define ICT_SIZE (1 << ICT_SHIFT) 1402 #define ICT_COUNT (ICT_SIZE / sizeof(u32)) 1403 1404 /* interrupt handler using ict table, with this interrupt driver will 1405 * stop using INTA register to get device's interrupt, reading this register 1406 * is expensive, device will write interrupts in ICT dram table, increment 1407 * index then will fire interrupt to driver, driver will OR all ICT table 1408 * entries from current index up to table entry with 0 value. the result is 1409 * the interrupt we need to service, driver will set the entries back to 0 and 1410 * set index. 1411 */ 1412 static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans) 1413 { 1414 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1415 u32 inta; 1416 u32 val = 0; 1417 u32 read; 1418 1419 trace_iwlwifi_dev_irq(trans->dev); 1420 1421 /* Ignore interrupt if there's nothing in NIC to service. 1422 * This may be due to IRQ shared with another device, 1423 * or due to sporadic interrupts thrown from our NIC. */ 1424 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); 1425 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read); 1426 if (!read) 1427 return 0; 1428 1429 /* 1430 * Collect all entries up to the first 0, starting from ict_index; 1431 * note we already read at ict_index. 1432 */ 1433 do { 1434 val |= read; 1435 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n", 1436 trans_pcie->ict_index, read); 1437 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0; 1438 trans_pcie->ict_index = 1439 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1)); 1440 1441 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]); 1442 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, 1443 read); 1444 } while (read); 1445 1446 /* We should not get this value, just ignore it. */ 1447 if (val == 0xffffffff) 1448 val = 0; 1449 1450 /* 1451 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit 1452 * (bit 15 before shifting it to 31) to clear when using interrupt 1453 * coalescing. fortunately, bits 18 and 19 stay set when this happens 1454 * so we use them to decide on the real state of the Rx bit. 1455 * In order words, bit 15 is set if bit 18 or bit 19 are set. 1456 */ 1457 if (val & 0xC0000) 1458 val |= 0x8000; 1459 1460 inta = (0xff & val) | ((0xff00 & val) << 16); 1461 return inta; 1462 } 1463 1464 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) 1465 { 1466 struct iwl_trans *trans = dev_id; 1467 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1468 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 1469 u32 inta = 0; 1470 u32 handled = 0; 1471 1472 lock_map_acquire(&trans->sync_cmd_lockdep_map); 1473 1474 spin_lock(&trans_pcie->irq_lock); 1475 1476 /* dram interrupt table not set yet, 1477 * use legacy interrupt. 1478 */ 1479 if (likely(trans_pcie->use_ict)) 1480 inta = iwl_pcie_int_cause_ict(trans); 1481 else 1482 inta = iwl_pcie_int_cause_non_ict(trans); 1483 1484 if (iwl_have_debug_level(IWL_DL_ISR)) { 1485 IWL_DEBUG_ISR(trans, 1486 "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n", 1487 inta, trans_pcie->inta_mask, 1488 iwl_read32(trans, CSR_INT_MASK), 1489 iwl_read32(trans, CSR_FH_INT_STATUS)); 1490 if (inta & (~trans_pcie->inta_mask)) 1491 IWL_DEBUG_ISR(trans, 1492 "We got a masked interrupt (0x%08x)\n", 1493 inta & (~trans_pcie->inta_mask)); 1494 } 1495 1496 inta &= trans_pcie->inta_mask; 1497 1498 /* 1499 * Ignore interrupt if there's nothing in NIC to service. 1500 * This may be due to IRQ shared with another device, 1501 * or due to sporadic interrupts thrown from our NIC. 1502 */ 1503 if (unlikely(!inta)) { 1504 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); 1505 /* 1506 * Re-enable interrupts here since we don't 1507 * have anything to service 1508 */ 1509 if (test_bit(STATUS_INT_ENABLED, &trans->status)) 1510 iwl_enable_interrupts(trans); 1511 spin_unlock(&trans_pcie->irq_lock); 1512 lock_map_release(&trans->sync_cmd_lockdep_map); 1513 return IRQ_NONE; 1514 } 1515 1516 if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { 1517 /* 1518 * Hardware disappeared. It might have 1519 * already raised an interrupt. 1520 */ 1521 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta); 1522 spin_unlock(&trans_pcie->irq_lock); 1523 goto out; 1524 } 1525 1526 /* Ack/clear/reset pending uCode interrupts. 1527 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, 1528 */ 1529 /* There is a hardware bug in the interrupt mask function that some 1530 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if 1531 * they are disabled in the CSR_INT_MASK register. Furthermore the 1532 * ICT interrupt handling mechanism has another bug that might cause 1533 * these unmasked interrupts fail to be detected. We workaround the 1534 * hardware bugs here by ACKing all the possible interrupts so that 1535 * interrupt coalescing can still be achieved. 1536 */ 1537 iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask); 1538 1539 if (iwl_have_debug_level(IWL_DL_ISR)) 1540 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n", 1541 inta, iwl_read32(trans, CSR_INT_MASK)); 1542 1543 spin_unlock(&trans_pcie->irq_lock); 1544 1545 /* Now service all interrupt bits discovered above. */ 1546 if (inta & CSR_INT_BIT_HW_ERR) { 1547 IWL_ERR(trans, "Hardware error detected. Restarting.\n"); 1548 1549 /* Tell the device to stop sending interrupts */ 1550 iwl_disable_interrupts(trans); 1551 1552 isr_stats->hw++; 1553 iwl_pcie_irq_handle_error(trans); 1554 1555 handled |= CSR_INT_BIT_HW_ERR; 1556 1557 goto out; 1558 } 1559 1560 if (iwl_have_debug_level(IWL_DL_ISR)) { 1561 /* NIC fires this, but we don't use it, redundant with WAKEUP */ 1562 if (inta & CSR_INT_BIT_SCD) { 1563 IWL_DEBUG_ISR(trans, 1564 "Scheduler finished to transmit the frame/frames.\n"); 1565 isr_stats->sch++; 1566 } 1567 1568 /* Alive notification via Rx interrupt will do the real work */ 1569 if (inta & CSR_INT_BIT_ALIVE) { 1570 IWL_DEBUG_ISR(trans, "Alive interrupt\n"); 1571 isr_stats->alive++; 1572 } 1573 } 1574 1575 /* Safely ignore these bits for debug checks below */ 1576 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); 1577 1578 /* HW RF KILL switch toggled */ 1579 if (inta & CSR_INT_BIT_RF_KILL) { 1580 bool hw_rfkill; 1581 1582 hw_rfkill = iwl_is_rfkill_set(trans); 1583 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", 1584 hw_rfkill ? "disable radio" : "enable radio"); 1585 1586 isr_stats->rfkill++; 1587 1588 mutex_lock(&trans_pcie->mutex); 1589 iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1590 mutex_unlock(&trans_pcie->mutex); 1591 if (hw_rfkill) { 1592 set_bit(STATUS_RFKILL, &trans->status); 1593 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, 1594 &trans->status)) 1595 IWL_DEBUG_RF_KILL(trans, 1596 "Rfkill while SYNC HCMD in flight\n"); 1597 wake_up(&trans_pcie->wait_command_queue); 1598 } else { 1599 clear_bit(STATUS_RFKILL, &trans->status); 1600 } 1601 1602 handled |= CSR_INT_BIT_RF_KILL; 1603 } 1604 1605 /* Chip got too hot and stopped itself */ 1606 if (inta & CSR_INT_BIT_CT_KILL) { 1607 IWL_ERR(trans, "Microcode CT kill error detected.\n"); 1608 isr_stats->ctkill++; 1609 handled |= CSR_INT_BIT_CT_KILL; 1610 } 1611 1612 /* Error detected by uCode */ 1613 if (inta & CSR_INT_BIT_SW_ERR) { 1614 IWL_ERR(trans, "Microcode SW error detected. " 1615 " Restarting 0x%X.\n", inta); 1616 isr_stats->sw++; 1617 iwl_pcie_irq_handle_error(trans); 1618 handled |= CSR_INT_BIT_SW_ERR; 1619 } 1620 1621 /* uCode wakes up after power-down sleep */ 1622 if (inta & CSR_INT_BIT_WAKEUP) { 1623 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 1624 iwl_pcie_rxq_check_wrptr(trans); 1625 iwl_pcie_txq_check_wrptrs(trans); 1626 1627 isr_stats->wakeup++; 1628 1629 handled |= CSR_INT_BIT_WAKEUP; 1630 } 1631 1632 /* All uCode command responses, including Tx command responses, 1633 * Rx "responses" (frame-received notification), and other 1634 * notifications from uCode come through here*/ 1635 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX | 1636 CSR_INT_BIT_RX_PERIODIC)) { 1637 IWL_DEBUG_ISR(trans, "Rx interrupt\n"); 1638 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 1639 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 1640 iwl_write32(trans, CSR_FH_INT_STATUS, 1641 CSR_FH_INT_RX_MASK); 1642 } 1643 if (inta & CSR_INT_BIT_RX_PERIODIC) { 1644 handled |= CSR_INT_BIT_RX_PERIODIC; 1645 iwl_write32(trans, 1646 CSR_INT, CSR_INT_BIT_RX_PERIODIC); 1647 } 1648 /* Sending RX interrupt require many steps to be done in the 1649 * the device: 1650 * 1- write interrupt to current index in ICT table. 1651 * 2- dma RX frame. 1652 * 3- update RX shared data to indicate last write index. 1653 * 4- send interrupt. 1654 * This could lead to RX race, driver could receive RX interrupt 1655 * but the shared data changes does not reflect this; 1656 * periodic interrupt will detect any dangling Rx activity. 1657 */ 1658 1659 /* Disable periodic interrupt; we use it as just a one-shot. */ 1660 iwl_write8(trans, CSR_INT_PERIODIC_REG, 1661 CSR_INT_PERIODIC_DIS); 1662 1663 /* 1664 * Enable periodic interrupt in 8 msec only if we received 1665 * real RX interrupt (instead of just periodic int), to catch 1666 * any dangling Rx interrupt. If it was just the periodic 1667 * interrupt, there was no dangling Rx activity, and no need 1668 * to extend the periodic interrupt; one-shot is enough. 1669 */ 1670 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) 1671 iwl_write8(trans, CSR_INT_PERIODIC_REG, 1672 CSR_INT_PERIODIC_ENA); 1673 1674 isr_stats->rx++; 1675 1676 local_bh_disable(); 1677 iwl_pcie_rx_handle(trans, 0); 1678 local_bh_enable(); 1679 } 1680 1681 /* This "Tx" DMA channel is used only for loading uCode */ 1682 if (inta & CSR_INT_BIT_FH_TX) { 1683 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK); 1684 IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); 1685 isr_stats->tx++; 1686 handled |= CSR_INT_BIT_FH_TX; 1687 /* Wake up uCode load routine, now that load is complete */ 1688 trans_pcie->ucode_write_complete = true; 1689 wake_up(&trans_pcie->ucode_write_waitq); 1690 } 1691 1692 if (inta & ~handled) { 1693 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled); 1694 isr_stats->unhandled++; 1695 } 1696 1697 if (inta & ~(trans_pcie->inta_mask)) { 1698 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n", 1699 inta & ~trans_pcie->inta_mask); 1700 } 1701 1702 /* we are loading the firmware, enable FH_TX interrupt only */ 1703 if (handled & CSR_INT_BIT_FH_TX) 1704 iwl_enable_fw_load_int(trans); 1705 /* only Re-enable all interrupt if disabled by irq */ 1706 else if (test_bit(STATUS_INT_ENABLED, &trans->status)) 1707 iwl_enable_interrupts(trans); 1708 /* Re-enable RF_KILL if it occurred */ 1709 else if (handled & CSR_INT_BIT_RF_KILL) 1710 iwl_enable_rfkill_int(trans); 1711 1712 out: 1713 lock_map_release(&trans->sync_cmd_lockdep_map); 1714 return IRQ_HANDLED; 1715 } 1716 1717 /****************************************************************************** 1718 * 1719 * ICT functions 1720 * 1721 ******************************************************************************/ 1722 1723 /* Free dram table */ 1724 void iwl_pcie_free_ict(struct iwl_trans *trans) 1725 { 1726 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1727 1728 if (trans_pcie->ict_tbl) { 1729 dma_free_coherent(trans->dev, ICT_SIZE, 1730 trans_pcie->ict_tbl, 1731 trans_pcie->ict_tbl_dma); 1732 trans_pcie->ict_tbl = NULL; 1733 trans_pcie->ict_tbl_dma = 0; 1734 } 1735 } 1736 1737 /* 1738 * allocate dram shared table, it is an aligned memory 1739 * block of ICT_SIZE. 1740 * also reset all data related to ICT table interrupt. 1741 */ 1742 int iwl_pcie_alloc_ict(struct iwl_trans *trans) 1743 { 1744 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1745 1746 trans_pcie->ict_tbl = 1747 dma_zalloc_coherent(trans->dev, ICT_SIZE, 1748 &trans_pcie->ict_tbl_dma, 1749 GFP_KERNEL); 1750 if (!trans_pcie->ict_tbl) 1751 return -ENOMEM; 1752 1753 /* just an API sanity check ... it is guaranteed to be aligned */ 1754 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) { 1755 iwl_pcie_free_ict(trans); 1756 return -EINVAL; 1757 } 1758 1759 return 0; 1760 } 1761 1762 /* Device is going up inform it about using ICT interrupt table, 1763 * also we need to tell the driver to start using ICT interrupt. 1764 */ 1765 void iwl_pcie_reset_ict(struct iwl_trans *trans) 1766 { 1767 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1768 u32 val; 1769 1770 if (!trans_pcie->ict_tbl) 1771 return; 1772 1773 spin_lock(&trans_pcie->irq_lock); 1774 iwl_disable_interrupts(trans); 1775 1776 memset(trans_pcie->ict_tbl, 0, ICT_SIZE); 1777 1778 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT; 1779 1780 val |= CSR_DRAM_INT_TBL_ENABLE | 1781 CSR_DRAM_INIT_TBL_WRAP_CHECK | 1782 CSR_DRAM_INIT_TBL_WRITE_POINTER; 1783 1784 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val); 1785 1786 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val); 1787 trans_pcie->use_ict = true; 1788 trans_pcie->ict_index = 0; 1789 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); 1790 iwl_enable_interrupts(trans); 1791 spin_unlock(&trans_pcie->irq_lock); 1792 } 1793 1794 /* Device is going down disable ict interrupt usage */ 1795 void iwl_pcie_disable_ict(struct iwl_trans *trans) 1796 { 1797 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1798 1799 spin_lock(&trans_pcie->irq_lock); 1800 trans_pcie->use_ict = false; 1801 spin_unlock(&trans_pcie->irq_lock); 1802 } 1803 1804 irqreturn_t iwl_pcie_isr(int irq, void *data) 1805 { 1806 struct iwl_trans *trans = data; 1807 1808 if (!trans) 1809 return IRQ_NONE; 1810 1811 /* Disable (but don't clear!) interrupts here to avoid 1812 * back-to-back ISRs and sporadic interrupts from our NIC. 1813 * If we have something to service, the tasklet will re-enable ints. 1814 * If we *don't* have something, we'll re-enable before leaving here. 1815 */ 1816 iwl_write32(trans, CSR_INT_MASK, 0x00000000); 1817 1818 return IRQ_WAKE_THREAD; 1819 } 1820 1821 irqreturn_t iwl_pcie_msix_isr(int irq, void *data) 1822 { 1823 return IRQ_WAKE_THREAD; 1824 } 1825 1826 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) 1827 { 1828 struct msix_entry *entry = dev_id; 1829 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); 1830 struct iwl_trans *trans = trans_pcie->trans; 1831 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 1832 u32 inta_fh, inta_hw; 1833 1834 lock_map_acquire(&trans->sync_cmd_lockdep_map); 1835 1836 spin_lock(&trans_pcie->irq_lock); 1837 inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD); 1838 inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD); 1839 /* 1840 * Clear causes registers to avoid being handling the same cause. 1841 */ 1842 iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh); 1843 iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); 1844 spin_unlock(&trans_pcie->irq_lock); 1845 1846 if (unlikely(!(inta_fh | inta_hw))) { 1847 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); 1848 lock_map_release(&trans->sync_cmd_lockdep_map); 1849 return IRQ_NONE; 1850 } 1851 1852 if (iwl_have_debug_level(IWL_DL_ISR)) 1853 IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n", 1854 inta_fh, 1855 iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD)); 1856 1857 /* This "Tx" DMA channel is used only for loading uCode */ 1858 if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) { 1859 IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); 1860 isr_stats->tx++; 1861 /* 1862 * Wake up uCode load routine, 1863 * now that load is complete 1864 */ 1865 trans_pcie->ucode_write_complete = true; 1866 wake_up(&trans_pcie->ucode_write_waitq); 1867 } 1868 1869 /* Error detected by uCode */ 1870 if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || 1871 (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) { 1872 IWL_ERR(trans, 1873 "Microcode SW error detected. Restarting 0x%X.\n", 1874 inta_fh); 1875 isr_stats->sw++; 1876 iwl_pcie_irq_handle_error(trans); 1877 } 1878 1879 /* After checking FH register check HW register */ 1880 if (iwl_have_debug_level(IWL_DL_ISR)) 1881 IWL_DEBUG_ISR(trans, 1882 "ISR inta_hw 0x%08x, enabled 0x%08x\n", 1883 inta_hw, 1884 iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD)); 1885 1886 /* Alive notification via Rx interrupt will do the real work */ 1887 if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { 1888 IWL_DEBUG_ISR(trans, "Alive interrupt\n"); 1889 isr_stats->alive++; 1890 } 1891 1892 /* uCode wakes up after power-down sleep */ 1893 if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { 1894 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 1895 iwl_pcie_rxq_check_wrptr(trans); 1896 iwl_pcie_txq_check_wrptrs(trans); 1897 1898 isr_stats->wakeup++; 1899 } 1900 1901 /* Chip got too hot and stopped itself */ 1902 if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) { 1903 IWL_ERR(trans, "Microcode CT kill error detected.\n"); 1904 isr_stats->ctkill++; 1905 } 1906 1907 /* HW RF KILL switch toggled */ 1908 if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) { 1909 bool hw_rfkill; 1910 1911 hw_rfkill = iwl_is_rfkill_set(trans); 1912 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", 1913 hw_rfkill ? "disable radio" : "enable radio"); 1914 1915 isr_stats->rfkill++; 1916 1917 mutex_lock(&trans_pcie->mutex); 1918 iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1919 mutex_unlock(&trans_pcie->mutex); 1920 if (hw_rfkill) { 1921 set_bit(STATUS_RFKILL, &trans->status); 1922 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, 1923 &trans->status)) 1924 IWL_DEBUG_RF_KILL(trans, 1925 "Rfkill while SYNC HCMD in flight\n"); 1926 wake_up(&trans_pcie->wait_command_queue); 1927 } else { 1928 clear_bit(STATUS_RFKILL, &trans->status); 1929 } 1930 } 1931 1932 if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) { 1933 IWL_ERR(trans, 1934 "Hardware error detected. Restarting.\n"); 1935 1936 isr_stats->hw++; 1937 iwl_pcie_irq_handle_error(trans); 1938 } 1939 1940 iwl_pcie_clear_irq(trans, entry); 1941 1942 lock_map_release(&trans->sync_cmd_lockdep_map); 1943 1944 return IRQ_HANDLED; 1945 } 1946