Lines Matching defs:trans_pcie

205 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
209 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
251 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
273 WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);
368 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
369 unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
370 unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
374 if (trans_pcie->rx_page_order > 0)
377 if (trans_pcie->alloc_page) {
378 spin_lock_bh(&trans_pcie->alloc_page_lock);
380 if (trans_pcie->alloc_page) {
381 *offset = trans_pcie->alloc_page_used;
382 page = trans_pcie->alloc_page;
383 trans_pcie->alloc_page_used += rbsize;
384 if (trans_pcie->alloc_page_used >= allocsize)
385 trans_pcie->alloc_page = NULL;
388 spin_unlock_bh(&trans_pcie->alloc_page_lock);
391 spin_unlock_bh(&trans_pcie->alloc_page_lock);
395 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
399 trans_pcie->rx_page_order);
411 spin_lock_bh(&trans_pcie->alloc_page_lock);
412 if (!trans_pcie->alloc_page) {
414 trans_pcie->alloc_page = page;
415 trans_pcie->alloc_page_used = rbsize;
417 spin_unlock_bh(&trans_pcie->alloc_page_lock);
436 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
458 __free_pages(page, trans_pcie->rx_page_order);
472 trans_pcie->rx_buf_bytes,
479 __free_pages(page, trans_pcie->rx_page_order);
494 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
497 if (!trans_pcie->rx_pool)
500 for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
501 if (!trans_pcie->rx_pool[i].page)
503 dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
504 trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);
505 __free_pages(trans_pcie->rx_pool[i].page,
506 trans_pcie->rx_page_order);
507 trans_pcie->rx_pool[i].page = NULL;
519 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
520 struct iwl_rb_allocator *rba = &trans_pcie->rba;
566 trans_pcie->rx_buf_bytes,
570 __free_pages(page, trans_pcie->rx_page_order);
620 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
621 struct iwl_rb_allocator *rba = &trans_pcie->rba;
656 struct iwl_trans_pcie *trans_pcie =
659 iwl_pcie_rx_allocator(trans_pcie->trans);
720 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
753 rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
755 trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
761 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
771 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
773 struct iwl_rb_allocator *rba = &trans_pcie->rba;
776 if (WARN_ON(trans_pcie->rxq))
779 trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
781 trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
782 sizeof(trans_pcie->rx_pool[0]),
784 trans_pcie->global_table =
785 kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
786 sizeof(trans_pcie->global_table[0]),
788 if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
789 !trans_pcie->global_table) {
800 trans_pcie->base_rb_stts =
803 &trans_pcie->base_rb_stts_dma,
805 if (!trans_pcie->base_rb_stts) {
811 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
821 if (trans_pcie->base_rb_stts) {
824 trans_pcie->base_rb_stts,
825 trans_pcie->base_rb_stts_dma);
826 trans_pcie->base_rb_stts = NULL;
827 trans_pcie->base_rb_stts_dma = 0;
829 kfree(trans_pcie->rx_pool);
830 trans_pcie->rx_pool = NULL;
831 kfree(trans_pcie->global_table);
832 trans_pcie->global_table = NULL;
833 kfree(trans_pcie->rxq);
834 trans_pcie->rxq = NULL;
841 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
845 switch (trans_pcie->rx_buf_size) {
909 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
913 switch (trans_pcie->rx_buf_size) {
943 trans_pcie->rxq[i].bd_dma);
947 trans_pcie->rxq[i].used_bd_dma);
951 trans_pcie->rxq[i].rb_stts_dma);
1015 struct iwl_trans_pcie *trans_pcie;
1019 trans_pcie = iwl_netdev_to_trans_pcie(napi->dev);
1020 trans = trans_pcie->trans;
1028 spin_lock(&trans_pcie->irq_lock);
1031 spin_unlock(&trans_pcie->irq_lock);
1042 struct iwl_trans_pcie *trans_pcie;
1046 trans_pcie = iwl_netdev_to_trans_pcie(napi->dev);
1047 trans = trans_pcie->trans;
1057 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS &&
1061 spin_lock(&trans_pcie->irq_lock);
1063 spin_unlock(&trans_pcie->irq_lock);
1073 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1076 if (unlikely(!trans_pcie->rxq))
1080 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1089 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1091 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1094 if (!trans_pcie->rxq) {
1099 def_rxq = trans_pcie->rxq;
1117 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1140 if (trans_pcie->msix_enabled)
1143 netif_napi_add(trans_pcie->napi_dev, &rxq->napi,
1152 trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;
1158 struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
1164 trans_pcie->global_table[i] = rxb;
1176 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1185 iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
1187 iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
1189 spin_lock_bh(&trans_pcie->rxq->lock);
1190 iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
1191 spin_unlock_bh(&trans_pcie->rxq->lock);
1210 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1212 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1219 if (!trans_pcie->rxq) {
1228 if (trans_pcie->base_rb_stts) {
1231 trans_pcie->base_rb_stts,
1232 trans_pcie->base_rb_stts_dma);
1233 trans_pcie->base_rb_stts = NULL;
1234 trans_pcie->base_rb_stts_dma = 0;
1238 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1247 kfree(trans_pcie->rx_pool);
1248 kfree(trans_pcie->global_table);
1249 kfree(trans_pcie->rxq);
1251 if (trans_pcie->alloc_page)
1252 __free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order);
1273 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1274 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1307 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1308 struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
1310 int max_len = trans_pcie->rx_buf_bytes;
1324 ._rx_page_order = trans_pcie->rx_page_order,
1375 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
1376 if (trans_pcie->no_reclaim_cmds[i] ==
1421 __free_pages(rxb->page, trans_pcie->rx_page_order);
1431 trans_pcie->rx_buf_bytes,
1439 __free_pages(rxb->page, trans_pcie->rx_page_order);
1454 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1483 if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
1486 rxb = trans_pcie->global_table[vid - 1];
1507 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1512 if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
1515 rxq = &trans_pcie->rxq[queue];
1532 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1536 atomic_read(&trans_pcie->rba.req_pending) *
1649 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
1650 struct iwl_trans *trans = trans_pcie->trans;
1658 if (!trans_pcie->rxq) {
1666 rxq = &trans_pcie->rxq[entry->entry];
1685 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1702 if (!trans_pcie->txqs.txq[i])
1704 del_timer(&trans_pcie->txqs.txq[i]->stuck_timer);
1745 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1755 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1756 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1767 trans_pcie->ict_index, read);
1768 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1769 trans_pcie->ict_index =
1770 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1772 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1773 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1797 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1798 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1801 mutex_lock(&trans_pcie->mutex);
1808 if (trans_pcie->opmode_down)
1820 mutex_unlock(&trans_pcie->mutex);
1830 if (trans_pcie->opmode_down)
1838 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1839 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1846 spin_lock_bh(&trans_pcie->irq_lock);
1851 if (likely(trans_pcie->use_ict))
1860 inta, trans_pcie->inta_mask,
1863 if (inta & (~trans_pcie->inta_mask))
1866 inta & (~trans_pcie->inta_mask));
1870 inta &= trans_pcie->inta_mask;
1885 spin_unlock_bh(&trans_pcie->irq_lock);
1896 spin_unlock_bh(&trans_pcie->irq_lock);
1911 iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1919 spin_unlock_bh(&trans_pcie->irq_lock);
1952 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
2039 if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
2041 __napi_schedule(&trans_pcie->rxq[0].napi);
2053 trans_pcie->ucode_write_complete = true;
2054 wake_up(&trans_pcie->ucode_write_waitq);
2056 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2057 trans_pcie->imr_status = IMR_D2S_COMPLETED;
2058 wake_up(&trans_pcie->ucode_write_waitq);
2067 if (inta & ~(trans_pcie->inta_mask)) {
2069 inta & ~trans_pcie->inta_mask);
2073 spin_lock_bh(&trans_pcie->irq_lock);
2086 spin_unlock_bh(&trans_pcie->irq_lock);
2103 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2105 if (trans_pcie->ict_tbl) {
2107 trans_pcie->ict_tbl,
2108 trans_pcie->ict_tbl_dma);
2109 trans_pcie->ict_tbl = NULL;
2110 trans_pcie->ict_tbl_dma = 0;
2121 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2123 trans_pcie->ict_tbl =
2125 &trans_pcie->ict_tbl_dma, GFP_KERNEL);
2126 if (!trans_pcie->ict_tbl)
2130 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
2143 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2146 if (!trans_pcie->ict_tbl)
2149 spin_lock_bh(&trans_pcie->irq_lock);
2152 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
2154 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
2163 trans_pcie->use_ict = true;
2164 trans_pcie->ict_index = 0;
2165 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
2167 spin_unlock_bh(&trans_pcie->irq_lock);
2173 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2175 spin_lock_bh(&trans_pcie->irq_lock);
2176 trans_pcie->use_ict = false;
2177 spin_unlock_bh(&trans_pcie->irq_lock);
2205 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
2206 struct iwl_trans *trans = trans_pcie->trans;
2207 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2213 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
2216 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
2221 spin_lock_bh(&trans_pcie->irq_lock);
2229 spin_unlock_bh(&trans_pcie->irq_lock);
2243 entry->entry, inta_fh, trans_pcie->fh_mask,
2245 if (inta_fh & ~trans_pcie->fh_mask)
2248 inta_fh & ~trans_pcie->fh_mask);
2252 inta_fh &= trans_pcie->fh_mask;
2254 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
2257 if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
2259 __napi_schedule(&trans_pcie->rxq[0].napi);
2264 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
2267 if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) {
2269 __napi_schedule(&trans_pcie->rxq[1].napi);
2276 trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2281 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2282 trans_pcie->imr_status = IMR_D2S_COMPLETED;
2283 wake_up(&trans_pcie->ucode_write_waitq);
2292 trans_pcie->ucode_write_complete = true;
2293 wake_up(&trans_pcie->ucode_write_waitq);
2296 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2297 trans_pcie->imr_status = IMR_D2S_COMPLETED;
2298 wake_up(&trans_pcie->ucode_write_waitq);
2320 if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2321 trans_pcie->imr_status = IMR_D2S_ERROR;
2322 wake_up(&trans_pcie->imr_waitq);
2323 } else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
2324 trans_pcie->fw_reset_state = FW_RESET_ERROR;
2325 wake_up(&trans_pcie->fw_reset_waitq);
2336 entry->entry, inta_hw, trans_pcie->hw_mask,
2338 if (inta_hw & ~trans_pcie->hw_mask)
2341 inta_hw & ~trans_pcie->hw_mask);
2345 inta_hw &= trans_pcie->hw_mask;
2353 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
2362 if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) {
2364 le32_to_cpu(trans_pcie->prph_info->sleep_notif);
2370 trans_pcie->sx_complete = true;
2371 wake_up(&trans_pcie->sx_waitq);
2403 trans_pcie->fw_reset_state = FW_RESET_OK;
2404 wake_up(&trans_pcie->fw_reset_waitq);