Lines Matching full:trans

52 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,  in iwl_pcie_alloc_dma_ptr()  argument
58 ptr->addr = dma_alloc_coherent(trans->dev, size, in iwl_pcie_alloc_dma_ptr()
66 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) in iwl_pcie_free_dma_ptr() argument
71 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); in iwl_pcie_free_dma_ptr()
78 static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, in iwl_pcie_txq_inc_wr_ptr() argument
81 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_txq_inc_wr_ptr()
93 if (!trans->trans_cfg->base_params->shadow_reg_enable && in iwl_pcie_txq_inc_wr_ptr()
95 test_bit(STATUS_TPOWER_PMI, &trans->status)) { in iwl_pcie_txq_inc_wr_ptr()
101 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); in iwl_pcie_txq_inc_wr_ptr()
104 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", in iwl_pcie_txq_inc_wr_ptr()
106 iwl_set_bit(trans, CSR_GP_CNTRL, in iwl_pcie_txq_inc_wr_ptr()
117 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); in iwl_pcie_txq_inc_wr_ptr()
119 iwl_write32(trans, HBUS_TARG_WRPTR, in iwl_pcie_txq_inc_wr_ptr()
123 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) in iwl_pcie_txq_check_wrptrs() argument
125 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_txq_check_wrptrs()
128 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { in iwl_pcie_txq_check_wrptrs()
136 iwl_pcie_txq_inc_wr_ptr(trans, txq); in iwl_pcie_txq_check_wrptrs()
162 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, in iwl_pcie_txq_build_tfd() argument
165 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_txq_build_tfd()
178 IWL_ERR(trans, "Error can not send more than %d chunks\n", in iwl_pcie_txq_build_tfd()
192 static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) in iwl_pcie_clear_cmd_in_flight() argument
194 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_clear_cmd_in_flight()
196 if (!trans->trans_cfg->base_params->apmg_wake_up_wa) in iwl_pcie_clear_cmd_in_flight()
207 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, in iwl_pcie_clear_cmd_in_flight()
212 static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans, in iwl_pcie_free_and_unmap_tso_page() argument
219 dma_unmap_page(trans->dev, info->dma_addr, PAGE_SIZE, in iwl_pcie_free_and_unmap_tso_page()
226 void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb, in iwl_pcie_free_tso_pages() argument
229 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_free_tso_pages()
251 dma_unmap_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0); in iwl_pcie_free_tso_pages()
254 iwl_pcie_free_and_unmap_tso_page(trans, tmp); in iwl_pcie_free_tso_pages()
280 static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans, in iwl_txq_set_tfd_invalid_gen1() argument
285 iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans->invalid_tx_cmd.dma, in iwl_txq_set_tfd_invalid_gen1()
286 trans->invalid_tx_cmd.size); in iwl_txq_set_tfd_invalid_gen1()
289 static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, in iwl_txq_gen1_tfd_unmap() argument
293 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_txq_gen1_tfd_unmap()
295 struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index); in iwl_txq_gen1_tfd_unmap()
301 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); in iwl_txq_gen1_tfd_unmap()
314 dma_unmap_page(trans->dev, in iwl_txq_gen1_tfd_unmap()
316 iwl_txq_gen1_tfd_tb_get_len(trans, in iwl_txq_gen1_tfd_unmap()
320 dma_unmap_single(trans->dev, in iwl_txq_gen1_tfd_unmap()
322 iwl_txq_gen1_tfd_tb_get_len(trans, in iwl_txq_gen1_tfd_unmap()
329 iwl_txq_set_tfd_invalid_gen1(trans, tfd); in iwl_txq_gen1_tfd_unmap()
334 * @trans: transport private data
341 static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, in iwl_txq_free_tfd() argument
358 if (trans->trans_cfg->gen2) in iwl_txq_free_tfd()
359 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, in iwl_txq_free_tfd()
360 iwl_txq_get_tfd(trans, txq, read_ptr)); in iwl_txq_free_tfd()
362 iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, in iwl_txq_free_tfd()
373 iwl_op_mode_free_skb(trans->op_mode, skb); in iwl_txq_free_tfd()
381 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) in iwl_pcie_txq_unmap() argument
383 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_txq_unmap()
387 IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); in iwl_pcie_txq_unmap()
394 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", in iwl_pcie_txq_unmap()
405 iwl_pcie_free_tso_pages(trans, skb, cmd_meta); in iwl_pcie_txq_unmap()
407 iwl_txq_free_tfd(trans, txq, txq->read_ptr); in iwl_pcie_txq_unmap()
408 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); in iwl_pcie_txq_unmap()
412 iwl_pcie_clear_cmd_in_flight(trans); in iwl_pcie_txq_unmap()
418 iwl_op_mode_free_skb(trans->op_mode, skb); in iwl_pcie_txq_unmap()
425 iwl_trans_pcie_wake_queue(trans, txq); in iwl_pcie_txq_unmap()
436 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) in iwl_pcie_txq_free() argument
438 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_txq_free()
440 struct device *dev = trans->dev; in iwl_pcie_txq_free()
446 iwl_pcie_txq_unmap(trans, txq_id); in iwl_pcie_txq_free()
459 trans->trans_cfg->base_params->max_tfd_queue_size, in iwl_pcie_txq_free()
478 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) in iwl_pcie_tx_start() argument
480 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_tx_start()
481 int nq = trans->trans_cfg->base_params->num_of_queues; in iwl_pcie_tx_start()
494 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); in iwl_pcie_tx_start()
500 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + in iwl_pcie_tx_start()
504 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, in iwl_pcie_tx_start()
510 if (trans->trans_cfg->base_params->scd_chain_ext_wa) in iwl_pcie_tx_start()
511 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); in iwl_pcie_tx_start()
513 iwl_trans_ac_txq_enable(trans, trans_pcie->txqs.cmd.q_id, in iwl_pcie_tx_start()
518 iwl_scd_activate_fifos(trans); in iwl_pcie_tx_start()
522 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), in iwl_pcie_tx_start()
527 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); in iwl_pcie_tx_start()
528 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, in iwl_pcie_tx_start()
532 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) in iwl_pcie_tx_start()
533 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, in iwl_pcie_tx_start()
537 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) in iwl_trans_pcie_tx_reset() argument
539 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_trans_pcie_tx_reset()
543 * we should never get here in gen2 trans mode return early to avoid in iwl_trans_pcie_tx_reset()
546 if (WARN_ON_ONCE(trans->trans_cfg->gen2)) in iwl_trans_pcie_tx_reset()
549 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; in iwl_trans_pcie_tx_reset()
552 if (trans->trans_cfg->gen2) in iwl_trans_pcie_tx_reset()
553 iwl_write_direct64(trans, in iwl_trans_pcie_tx_reset()
554 FH_MEM_CBBC_QUEUE(trans, txq_id), in iwl_trans_pcie_tx_reset()
557 iwl_write_direct32(trans, in iwl_trans_pcie_tx_reset()
558 FH_MEM_CBBC_QUEUE(trans, txq_id), in iwl_trans_pcie_tx_reset()
560 iwl_pcie_txq_unmap(trans, txq_id); in iwl_trans_pcie_tx_reset()
566 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, in iwl_trans_pcie_tx_reset()
574 iwl_pcie_tx_start(trans, 0); in iwl_trans_pcie_tx_reset()
577 static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) in iwl_pcie_tx_stop_fh() argument
579 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_tx_stop_fh()
585 if (!iwl_trans_grab_nic_access(trans)) in iwl_pcie_tx_stop_fh()
590 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); in iwl_pcie_tx_stop_fh()
595 ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); in iwl_pcie_tx_stop_fh()
597 IWL_ERR(trans, in iwl_pcie_tx_stop_fh()
599 ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); in iwl_pcie_tx_stop_fh()
601 iwl_trans_release_nic_access(trans); in iwl_pcie_tx_stop_fh()
610 int iwl_pcie_tx_stop(struct iwl_trans *trans) in iwl_pcie_tx_stop() argument
612 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_tx_stop()
616 iwl_scd_deactivate_fifos(trans); in iwl_pcie_tx_stop()
619 iwl_pcie_tx_stop_fh(trans); in iwl_pcie_tx_stop()
636 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; in iwl_pcie_tx_stop()
638 iwl_pcie_txq_unmap(trans, txq_id); in iwl_pcie_tx_stop()
648 void iwl_pcie_tx_free(struct iwl_trans *trans) in iwl_pcie_tx_free() argument
651 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_tx_free()
659 txq_id < trans->trans_cfg->base_params->num_of_queues; in iwl_pcie_tx_free()
661 iwl_pcie_txq_free(trans, txq_id); in iwl_pcie_tx_free()
669 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); in iwl_pcie_tx_free()
671 iwl_pcie_free_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls); in iwl_pcie_tx_free()
674 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) in iwl_txq_log_scd_error() argument
681 if (trans->trans_cfg->gen2) { in iwl_txq_log_scd_error()
682 IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, in iwl_txq_log_scd_error()
688 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); in iwl_txq_log_scd_error()
692 IWL_ERR(trans, in iwl_txq_log_scd_error()
697 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & in iwl_txq_log_scd_error()
698 (trans->trans_cfg->base_params->max_tfd_queue_size - 1), in iwl_txq_log_scd_error()
699 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & in iwl_txq_log_scd_error()
700 (trans->trans_cfg->base_params->max_tfd_queue_size - 1), in iwl_txq_log_scd_error()
701 iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); in iwl_txq_log_scd_error()
707 struct iwl_trans *trans = txq->trans; in iwl_txq_stuck_timer() local
717 iwl_txq_log_scd_error(trans, txq); in iwl_txq_stuck_timer()
719 iwl_force_nmi(trans); in iwl_txq_stuck_timer()
722 int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, in iwl_pcie_txq_alloc() argument
725 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_txq_alloc()
726 size_t num_entries = trans->trans_cfg->gen2 ? in iwl_pcie_txq_alloc()
727 slots_num : trans->trans_cfg->base_params->max_tfd_queue_size; in iwl_pcie_txq_alloc()
741 txq->trans = trans; in iwl_pcie_txq_alloc()
764 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, in iwl_pcie_txq_alloc()
773 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, in iwl_pcie_txq_alloc()
780 void *tfd = iwl_txq_get_tfd(trans, txq, i); in iwl_pcie_txq_alloc()
782 if (trans->trans_cfg->gen2) in iwl_pcie_txq_alloc()
783 iwl_txq_set_tfd_invalid_gen2(trans, tfd); in iwl_pcie_txq_alloc()
785 iwl_txq_set_tfd_invalid_gen1(trans, tfd); in iwl_pcie_txq_alloc()
790 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); in iwl_pcie_txq_alloc()
806 static int iwl_pcie_tx_alloc(struct iwl_trans *trans) in iwl_pcie_tx_alloc() argument
810 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_tx_alloc()
811 u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; in iwl_pcie_tx_alloc()
813 if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) in iwl_pcie_tx_alloc()
825 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls, in iwl_pcie_tx_alloc()
828 IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); in iwl_pcie_tx_alloc()
833 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); in iwl_pcie_tx_alloc()
835 IWL_ERR(trans, "Keep Warm allocation failed\n"); in iwl_pcie_tx_alloc()
840 kcalloc(trans->trans_cfg->base_params->num_of_queues, in iwl_pcie_tx_alloc()
843 IWL_ERR(trans, "Not enough memory for txq\n"); in iwl_pcie_tx_alloc()
849 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; in iwl_pcie_tx_alloc()
855 trans->cfg->min_txq_size); in iwl_pcie_tx_alloc()
858 trans->cfg->min_ba_txq_size); in iwl_pcie_tx_alloc()
860 ret = iwl_pcie_txq_alloc(trans, trans_pcie->txqs.txq[txq_id], in iwl_pcie_tx_alloc()
863 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); in iwl_pcie_tx_alloc()
872 iwl_pcie_tx_free(trans); in iwl_pcie_tx_alloc()
904 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, in iwl_txq_init() argument
908 trans->trans_cfg->base_params->max_tfd_queue_size; in iwl_txq_init()
940 int iwl_pcie_tx_init(struct iwl_trans *trans) in iwl_pcie_tx_init() argument
942 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_tx_init()
948 ret = iwl_pcie_tx_alloc(trans); in iwl_pcie_tx_init()
957 iwl_scd_deactivate_fifos(trans); in iwl_pcie_tx_init()
960 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, in iwl_pcie_tx_init()
966 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; in iwl_pcie_tx_init()
972 trans->cfg->min_txq_size); in iwl_pcie_tx_init()
975 trans->cfg->min_ba_txq_size); in iwl_pcie_tx_init()
976 ret = iwl_txq_init(trans, trans_pcie->txqs.txq[txq_id], slots_num, in iwl_pcie_tx_init()
979 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); in iwl_pcie_tx_init()
989 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), in iwl_pcie_tx_init()
993 iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); in iwl_pcie_tx_init()
994 if (trans->trans_cfg->base_params->num_of_queues > 20) in iwl_pcie_tx_init()
995 iwl_set_bits_prph(trans, SCD_GP_CTRL, in iwl_pcie_tx_init()
1002 iwl_pcie_tx_free(trans); in iwl_pcie_tx_init()
1006 static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, in iwl_pcie_set_cmd_in_flight() argument
1009 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_set_cmd_in_flight()
1012 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) in iwl_pcie_set_cmd_in_flight()
1015 if (!trans->trans_cfg->base_params->apmg_wake_up_wa) in iwl_pcie_set_cmd_in_flight()
1024 if (!_iwl_trans_pcie_grab_nic_access(trans)) in iwl_pcie_set_cmd_in_flight()
1081 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) in iwl_pcie_cmdq_reclaim() argument
1083 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_cmdq_reclaim()
1093 if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || in iwl_pcie_cmdq_reclaim()
1098 trans->trans_cfg->base_params->max_tfd_queue_size, in iwl_pcie_cmdq_reclaim()
1103 for (idx = iwl_txq_inc_wrap(trans, idx); r != idx; in iwl_pcie_cmdq_reclaim()
1104 r = iwl_txq_inc_wrap(trans, r)) { in iwl_pcie_cmdq_reclaim()
1105 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); in iwl_pcie_cmdq_reclaim()
1108 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", in iwl_pcie_cmdq_reclaim()
1110 iwl_force_nmi(trans); in iwl_pcie_cmdq_reclaim()
1115 iwl_pcie_clear_cmd_in_flight(trans); in iwl_pcie_cmdq_reclaim()
1120 static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, in iwl_pcie_txq_set_ratid_map() argument
1123 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_txq_set_ratid_map()
1133 tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); in iwl_pcie_txq_set_ratid_map()
1140 iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); in iwl_pcie_txq_set_ratid_map()
1149 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, in iwl_trans_pcie_txq_enable() argument
1153 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_trans_pcie_txq_enable()
1169 iwl_scd_enable_set_active(trans, 0); in iwl_trans_pcie_txq_enable()
1172 iwl_scd_txq_set_inactive(trans, txq_id); in iwl_trans_pcie_txq_enable()
1176 iwl_scd_txq_set_chain(trans, txq_id); in iwl_trans_pcie_txq_enable()
1182 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); in iwl_trans_pcie_txq_enable()
1185 iwl_scd_txq_enable_agg(trans, txq_id); in iwl_trans_pcie_txq_enable()
1193 iwl_scd_txq_disable_agg(trans, txq_id); in iwl_trans_pcie_txq_enable()
1209 scd_bug = !trans->trans_cfg->mq_rx_supported && in iwl_trans_pcie_txq_enable()
1220 iwl_write_direct32(trans, HBUS_TARG_WRPTR, in iwl_trans_pcie_txq_enable()
1226 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); in iwl_trans_pcie_txq_enable()
1229 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + in iwl_trans_pcie_txq_enable()
1231 iwl_trans_write_mem32(trans, in iwl_trans_pcie_txq_enable()
1238 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), in iwl_trans_pcie_txq_enable()
1247 iwl_scd_enable_set_active(trans, BIT(txq_id)); in iwl_trans_pcie_txq_enable()
1249 IWL_DEBUG_TX_QUEUES(trans, in iwl_trans_pcie_txq_enable()
1253 IWL_DEBUG_TX_QUEUES(trans, in iwl_trans_pcie_txq_enable()
1261 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, in iwl_trans_pcie_txq_set_shared_mode() argument
1264 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_trans_pcie_txq_set_shared_mode()
1270 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, in iwl_trans_pcie_txq_disable() argument
1273 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_trans_pcie_txq_disable()
1288 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), in iwl_trans_pcie_txq_disable()
1294 iwl_scd_txq_set_inactive(trans, txq_id); in iwl_trans_pcie_txq_disable()
1296 iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val, in iwl_trans_pcie_txq_disable()
1300 iwl_pcie_txq_unmap(trans, txq_id); in iwl_trans_pcie_txq_disable()
1303 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); in iwl_trans_pcie_txq_disable()
1308 static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) in iwl_trans_pcie_block_txq_ptrs() argument
1310 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_trans_pcie_block_txq_ptrs()
1313 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { in iwl_trans_pcie_block_txq_ptrs()
1325 iwl_write32(trans, HBUS_TARG_WRPTR, in iwl_trans_pcie_block_txq_ptrs()
1345 int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, in iwl_pcie_enqueue_hcmd() argument
1348 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_enqueue_hcmd()
1364 if (WARN(!trans->wide_cmd_header && in iwl_pcie_enqueue_hcmd()
1440 iwl_get_cmd_string(trans, cmd->id), in iwl_pcie_enqueue_hcmd()
1448 if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { in iwl_pcie_enqueue_hcmd()
1451 IWL_ERR(trans, "No space in command queue\n"); in iwl_pcie_enqueue_hcmd()
1452 iwl_op_mode_nic_error(trans->op_mode, in iwl_pcie_enqueue_hcmd()
1454 iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_CMD_QUEUE_FULL); in iwl_pcie_enqueue_hcmd()
1532 IWL_DEBUG_HC(trans, in iwl_pcie_enqueue_hcmd()
1534 iwl_get_cmd_string(trans, cmd->id), in iwl_pcie_enqueue_hcmd()
1542 iwl_pcie_txq_build_tfd(trans, txq, in iwl_pcie_enqueue_hcmd()
1548 phys_addr = dma_map_single(trans->dev, in iwl_pcie_enqueue_hcmd()
1552 if (dma_mapping_error(trans->dev, phys_addr)) { in iwl_pcie_enqueue_hcmd()
1553 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, in iwl_pcie_enqueue_hcmd()
1559 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, in iwl_pcie_enqueue_hcmd()
1574 phys_addr = dma_map_single(trans->dev, data, in iwl_pcie_enqueue_hcmd()
1576 if (dma_mapping_error(trans->dev, phys_addr)) { in iwl_pcie_enqueue_hcmd()
1577 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, in iwl_pcie_enqueue_hcmd()
1583 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); in iwl_pcie_enqueue_hcmd()
1592 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); in iwl_pcie_enqueue_hcmd()
1598 ret = iwl_pcie_set_cmd_in_flight(trans, cmd); in iwl_pcie_enqueue_hcmd()
1605 iwl_trans_pcie_block_txq_ptrs(trans, true); in iwl_pcie_enqueue_hcmd()
1608 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); in iwl_pcie_enqueue_hcmd()
1609 iwl_pcie_txq_inc_wr_ptr(trans, txq); in iwl_pcie_enqueue_hcmd()
1623 void iwl_pcie_hcmd_complete(struct iwl_trans *trans, in iwl_pcie_hcmd_complete() argument
1635 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_hcmd_complete()
1645 iwl_print_hex_error(trans, pkt, 32); in iwl_pcie_hcmd_complete()
1657 if (trans->trans_cfg->gen2) in iwl_pcie_hcmd_complete()
1658 iwl_txq_gen2_tfd_unmap(trans, meta, in iwl_pcie_hcmd_complete()
1659 iwl_txq_get_tfd(trans, txq, index)); in iwl_pcie_hcmd_complete()
1661 iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); in iwl_pcie_hcmd_complete()
1673 iwl_trans_pcie_block_txq_ptrs(trans, false); in iwl_pcie_hcmd_complete()
1675 iwl_pcie_cmdq_reclaim(trans, txq_id, index); in iwl_pcie_hcmd_complete()
1678 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { in iwl_pcie_hcmd_complete()
1679 IWL_WARN(trans, in iwl_pcie_hcmd_complete()
1681 iwl_get_cmd_string(trans, cmd_id)); in iwl_pcie_hcmd_complete()
1683 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); in iwl_pcie_hcmd_complete()
1684 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", in iwl_pcie_hcmd_complete()
1685 iwl_get_cmd_string(trans, cmd_id)); in iwl_pcie_hcmd_complete()
1686 wake_up(&trans->wait_command_queue); in iwl_pcie_hcmd_complete()
1694 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, in iwl_fill_data_tbs() argument
1708 dma_addr_t tb_phys = dma_map_single(trans->dev, in iwl_fill_data_tbs()
1711 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) in iwl_fill_data_tbs()
1713 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, in iwl_fill_data_tbs()
1715 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); in iwl_fill_data_tbs()
1727 tb_phys = skb_frag_dma_map(trans->dev, frag, 0, in iwl_fill_data_tbs()
1730 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) in iwl_fill_data_tbs()
1732 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), in iwl_fill_data_tbs()
1734 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, in iwl_fill_data_tbs()
1746 static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans, in iwl_pcie_get_page_hdr() argument
1749 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_get_page_hdr()
1780 iwl_pcie_free_and_unmap_tso_page(trans, p->page); in iwl_pcie_get_page_hdr()
1794 phys = dma_map_page_attrs(trans->dev, p->page, 0, PAGE_SIZE, in iwl_pcie_get_page_hdr()
1796 if (unlikely(dma_mapping_error(trans->dev, phys))) { in iwl_pcie_get_page_hdr()
1853 * @trans: transport private data
1866 struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb, in iwl_pcie_prep_tso() argument
1878 *hdr = iwl_pcie_get_page_hdr(trans, in iwl_pcie_prep_tso()
1899 if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0) in iwl_pcie_prep_tso()
1908 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, in iwl_fill_data_tbs_amsdu() argument
1914 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_fill_data_tbs_amsdu()
1931 trace_iwlwifi_dev_tx(trans->dev, skb, in iwl_fill_data_tbs_amsdu()
1932 iwl_txq_get_tfd(trans, txq, txq->write_ptr), in iwl_fill_data_tbs_amsdu()
1946 sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room, in iwl_fill_data_tbs_amsdu()
2005 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, in iwl_fill_data_tbs_amsdu()
2007 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, in iwl_fill_data_tbs_amsdu()
2026 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, in iwl_fill_data_tbs_amsdu()
2028 trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, in iwl_fill_data_tbs_amsdu()
2037 dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room, in iwl_fill_data_tbs_amsdu()
2046 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, in iwl_fill_data_tbs_amsdu() argument
2065 static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, in iwl_txq_gen1_update_byte_cnt_tbl() argument
2069 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_txq_gen1_update_byte_cnt_tbl()
2110 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, in iwl_trans_pcie_tx() argument
2113 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_trans_pcie_tx()
2148 if (iwl_txq_space(trans, txq) < txq->high_mark) { in iwl_trans_pcie_tx()
2149 iwl_txq_stop(trans, txq); in iwl_trans_pcie_tx()
2152 if (unlikely(iwl_txq_space(trans, txq) < 3)) { in iwl_trans_pcie_tx()
2221 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, in iwl_trans_pcie_tx()
2232 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); in iwl_trans_pcie_tx()
2233 if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) in iwl_trans_pcie_tx()
2235 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); in iwl_trans_pcie_tx()
2237 trace_iwlwifi_dev_tx(trans->dev, skb, in iwl_trans_pcie_tx()
2238 iwl_txq_get_tfd(trans, txq, txq->write_ptr), in iwl_trans_pcie_tx()
2250 if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, in iwl_trans_pcie_tx()
2257 if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, in iwl_trans_pcie_tx()
2262 if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0, in iwl_trans_pcie_tx()
2271 tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); in iwl_trans_pcie_tx()
2273 iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), in iwl_trans_pcie_tx()
2294 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); in iwl_trans_pcie_tx()
2296 iwl_pcie_txq_inc_wr_ptr(trans, txq); in iwl_trans_pcie_tx()
2305 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr); in iwl_trans_pcie_tx()
2310 static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, in iwl_txq_gen1_inval_byte_cnt_tbl() argument
2314 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_txq_gen1_inval_byte_cnt_tbl()
2337 void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, in iwl_pcie_reclaim() argument
2340 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_reclaim()
2368 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", in iwl_pcie_reclaim()
2376 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n", in iwl_pcie_reclaim()
2382 last_to_free = iwl_txq_dec_wrap(trans, tfd_num); in iwl_pcie_reclaim()
2385 IWL_ERR(trans, in iwl_pcie_reclaim()
2388 trans->trans_cfg->base_params->max_tfd_queue_size, in iwl_pcie_reclaim()
2391 iwl_op_mode_time_point(trans->op_mode, in iwl_pcie_reclaim()
2402 txq_read_ptr = iwl_txq_inc_wrap(trans, txq_read_ptr), in iwl_pcie_reclaim()
2411 iwl_pcie_free_tso_pages(trans, skb, cmd_meta); in iwl_pcie_reclaim()
2417 if (!trans->trans_cfg->gen2) in iwl_pcie_reclaim()
2418 iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq, in iwl_pcie_reclaim()
2421 iwl_txq_free_tfd(trans, txq, txq_read_ptr); in iwl_pcie_reclaim()
2429 if (iwl_txq_space(trans, txq) > txq->low_mark && in iwl_pcie_reclaim()
2466 iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); in iwl_pcie_reclaim()
2469 if (iwl_txq_space(trans, txq) > txq->low_mark) in iwl_pcie_reclaim()
2470 iwl_trans_pcie_wake_queue(trans, txq); in iwl_pcie_reclaim()
2482 void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) in iwl_pcie_set_q_ptrs() argument
2484 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_set_q_ptrs()
2495 void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans, in iwl_pcie_freeze_txq_timer() argument
2498 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_pcie_freeze_txq_timer()
2512 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", in iwl_pcie_freeze_txq_timer()
2550 static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans, in iwl_trans_pcie_send_hcmd_sync() argument
2553 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); in iwl_trans_pcie_send_hcmd_sync()
2554 const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); in iwl_trans_pcie_send_hcmd_sync()
2559 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str); in iwl_trans_pcie_send_hcmd_sync()
2562 &trans->status), in iwl_trans_pcie_send_hcmd_sync()
2566 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); in iwl_trans_pcie_send_hcmd_sync()
2568 if (trans->trans_cfg->gen2) in iwl_trans_pcie_send_hcmd_sync()
2569 cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); in iwl_trans_pcie_send_hcmd_sync()
2571 cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); in iwl_trans_pcie_send_hcmd_sync()
2575 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); in iwl_trans_pcie_send_hcmd_sync()
2576 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", in iwl_trans_pcie_send_hcmd_sync()
2581 ret = wait_event_timeout(trans->wait_command_queue, in iwl_trans_pcie_send_hcmd_sync()
2583 &trans->status), in iwl_trans_pcie_send_hcmd_sync()
2586 IWL_ERR(trans, "Error sending %s: time out after %dms.\n", in iwl_trans_pcie_send_hcmd_sync()
2589 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", in iwl_trans_pcie_send_hcmd_sync()
2592 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); in iwl_trans_pcie_send_hcmd_sync()
2593 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", in iwl_trans_pcie_send_hcmd_sync()
2597 iwl_trans_sync_nmi(trans); in iwl_trans_pcie_send_hcmd_sync()
2601 if (test_bit(STATUS_FW_ERROR, &trans->status)) { in iwl_trans_pcie_send_hcmd_sync()
2603 &trans->status)) { in iwl_trans_pcie_send_hcmd_sync()
2604 IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); in iwl_trans_pcie_send_hcmd_sync()
2612 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { in iwl_trans_pcie_send_hcmd_sync()
2613 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); in iwl_trans_pcie_send_hcmd_sync()
2619 IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str); in iwl_trans_pcie_send_hcmd_sync()
2645 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, in iwl_trans_pcie_send_hcmd() argument
2649 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) in iwl_trans_pcie_send_hcmd()
2653 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { in iwl_trans_pcie_send_hcmd()
2654 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", in iwl_trans_pcie_send_hcmd()
2659 if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && in iwl_trans_pcie_send_hcmd()
2661 IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id); in iwl_trans_pcie_send_hcmd()
2672 if (trans->trans_cfg->gen2) in iwl_trans_pcie_send_hcmd()
2673 ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); in iwl_trans_pcie_send_hcmd()
2675 ret = iwl_pcie_enqueue_hcmd(trans, cmd); in iwl_trans_pcie_send_hcmd()
2678 IWL_ERR(trans, in iwl_trans_pcie_send_hcmd()
2680 iwl_get_cmd_string(trans, cmd->id), ret); in iwl_trans_pcie_send_hcmd()
2686 return iwl_trans_pcie_send_hcmd_sync(trans, cmd); in iwl_trans_pcie_send_hcmd()