Lines Matching refs:trans

57 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
63 ptr->addr = dma_alloc_coherent(trans->dev, size,
71 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
76 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
83 static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
86 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
98 if (!trans->trans_cfg->base_params->shadow_reg_enable &&
100 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
106 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
109 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
111 iwl_set_bit(trans, CSR_GP_CNTRL,
122 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);
124 iwl_write32(trans, HBUS_TARG_WRPTR,
128 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
130 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
133 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
141 iwl_pcie_txq_inc_wr_ptr(trans, txq);
167 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
170 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
183 IWL_ERR(trans, "Error can not send more than %d chunks\n",
197 static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
199 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
201 if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
212 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
217 static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans,
224 dma_unmap_page(trans->dev, info->dma_addr, PAGE_SIZE,
231 void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
234 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
256 dma_unmap_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0);
259 iwl_pcie_free_and_unmap_tso_page(trans, tmp);
285 static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans,
290 iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans->invalid_tx_cmd.dma,
291 trans->invalid_tx_cmd.size);
294 static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
298 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
300 struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index);
306 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
319 dma_unmap_page(trans->dev,
321 iwl_txq_gen1_tfd_tb_get_len(trans,
325 dma_unmap_single(trans->dev,
327 iwl_txq_gen1_tfd_tb_get_len(trans,
334 iwl_txq_set_tfd_invalid_gen1(trans, tfd);
339 * @trans: transport private data
346 static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
363 if (trans->trans_cfg->gen2)
364 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
365 iwl_txq_get_tfd(trans, txq, read_ptr));
367 iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta,
378 iwl_op_mode_free_skb(trans->op_mode, skb);
386 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
388 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
392 IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
399 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
410 iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
412 iwl_txq_free_tfd(trans, txq, txq->read_ptr);
413 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
417 iwl_pcie_clear_cmd_in_flight(trans);
423 iwl_op_mode_free_skb(trans->op_mode, skb);
430 iwl_trans_pcie_wake_queue(trans, txq);
441 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
443 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
445 struct device *dev = trans->dev;
451 iwl_pcie_txq_unmap(trans, txq_id);
464 trans->trans_cfg->base_params->max_tfd_queue_size,
483 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
485 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
486 int nq = trans->trans_cfg->base_params->num_of_queues;
499 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
505 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
509 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
515 if (trans->trans_cfg->base_params->scd_chain_ext_wa)
516 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
518 iwl_trans_ac_txq_enable(trans, trans_pcie->txqs.cmd.q_id,
523 iwl_scd_activate_fifos(trans);
527 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
532 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
533 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
537 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
538 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
542 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
544 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
548 * we should never get here in gen2 trans mode return early to avoid
551 if (WARN_ON_ONCE(trans->trans_cfg->gen2))
554 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
557 if (trans->trans_cfg->gen2)
558 iwl_write_direct64(trans,
559 FH_MEM_CBBC_QUEUE(trans, txq_id),
562 iwl_write_direct32(trans,
563 FH_MEM_CBBC_QUEUE(trans, txq_id),
565 iwl_pcie_txq_unmap(trans, txq_id);
571 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
579 iwl_pcie_tx_start(trans, 0);
582 static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
584 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
590 if (!iwl_trans_grab_nic_access(trans))
595 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
600 ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
602 IWL_ERR(trans,
604 ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
606 iwl_trans_release_nic_access(trans);
615 int iwl_pcie_tx_stop(struct iwl_trans *trans)
617 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
621 iwl_scd_deactivate_fifos(trans);
624 iwl_pcie_tx_stop_fh(trans);
641 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
643 iwl_pcie_txq_unmap(trans, txq_id);
653 void iwl_pcie_tx_free(struct iwl_trans *trans)
656 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
664 txq_id < trans->trans_cfg->base_params->num_of_queues;
666 iwl_pcie_txq_free(trans, txq_id);
674 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
676 iwl_pcie_free_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls);
679 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
686 if (trans->trans_cfg->gen2) {
687 IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
693 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
697 IWL_ERR(trans,
702 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
703 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
704 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
705 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
706 iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
712 struct iwl_trans *trans = txq->trans;
722 iwl_txq_log_scd_error(trans, txq);
724 iwl_force_nmi(trans);
727 int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
730 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
731 size_t num_entries = trans->trans_cfg->gen2 ?
732 slots_num : trans->trans_cfg->base_params->max_tfd_queue_size;
746 txq->trans = trans;
769 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
778 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
785 void *tfd = iwl_txq_get_tfd(trans, txq, i);
787 if (trans->trans_cfg->gen2)
788 iwl_txq_set_tfd_invalid_gen2(trans, tfd);
790 iwl_txq_set_tfd_invalid_gen1(trans, tfd);
795 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
811 static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
815 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
816 u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues;
818 if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
830 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls,
833 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
838 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
840 IWL_ERR(trans, "Keep Warm allocation failed\n");
845 kcalloc(trans->trans_cfg->base_params->num_of_queues,
848 IWL_ERR(trans, "Not enough memory for txq\n");
854 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
860 trans->cfg->min_txq_size);
863 trans->cfg->min_ba_txq_size);
865 ret = iwl_pcie_txq_alloc(trans, trans_pcie->txqs.txq[txq_id],
868 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
877 iwl_pcie_tx_free(trans);
909 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
913 trans->trans_cfg->base_params->max_tfd_queue_size;
947 int iwl_pcie_tx_init(struct iwl_trans *trans)
949 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
955 ret = iwl_pcie_tx_alloc(trans);
964 iwl_scd_deactivate_fifos(trans);
967 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
973 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
979 trans->cfg->min_txq_size);
982 trans->cfg->min_ba_txq_size);
983 ret = iwl_txq_init(trans, trans_pcie->txqs.txq[txq_id], slots_num,
986 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
996 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
1000 iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1001 if (trans->trans_cfg->base_params->num_of_queues > 20)
1002 iwl_set_bits_prph(trans, SCD_GP_CTRL,
1009 iwl_pcie_tx_free(trans);
1013 static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
1016 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1019 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
1022 if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
1031 if (!_iwl_trans_pcie_grab_nic_access(trans))
1088 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
1090 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1100 if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
1105 trans->trans_cfg->base_params->max_tfd_queue_size,
1110 for (idx = iwl_txq_inc_wrap(trans, idx); r != idx;
1111 r = iwl_txq_inc_wrap(trans, r)) {
1112 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
1115 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
1117 iwl_force_nmi(trans);
1122 iwl_pcie_clear_cmd_in_flight(trans);
1127 static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
1130 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1140 tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
1147 iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
1156 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
1160 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1176 iwl_scd_enable_set_active(trans, 0);
1179 iwl_scd_txq_set_inactive(trans, txq_id);
1183 iwl_scd_txq_set_chain(trans, txq_id);
1189 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
1192 iwl_scd_txq_enable_agg(trans, txq_id);
1200 iwl_scd_txq_disable_agg(trans, txq_id);
1216 scd_bug = !trans->trans_cfg->mq_rx_supported &&
1227 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
1233 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
1236 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
1238 iwl_trans_write_mem32(trans,
1245 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
1254 iwl_scd_enable_set_active(trans, BIT(txq_id));
1256 IWL_DEBUG_TX_QUEUES(trans,
1260 IWL_DEBUG_TX_QUEUES(trans,
1268 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
1271 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1277 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
1280 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1295 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1301 iwl_scd_txq_set_inactive(trans, txq_id);
1303 iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val,
1307 iwl_pcie_txq_unmap(trans, txq_id);
1310 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
1315 static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
1317 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1320 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
1332 iwl_write32(trans, HBUS_TARG_WRPTR,
1352 int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1355 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1371 if (WARN(!trans->wide_cmd_header &&
1447 iwl_get_cmd_string(trans, cmd->id),
1455 if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1458 IWL_ERR(trans, "No space in command queue\n");
1459 iwl_op_mode_cmd_queue_full(trans->op_mode);
1537 IWL_DEBUG_HC(trans,
1539 iwl_get_cmd_string(trans, cmd->id),
1547 iwl_pcie_txq_build_tfd(trans, txq,
1553 phys_addr = dma_map_single(trans->dev,
1557 if (dma_mapping_error(trans->dev, phys_addr)) {
1558 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
1564 iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
1579 phys_addr = dma_map_single(trans->dev, data,
1581 if (dma_mapping_error(trans->dev, phys_addr)) {
1582 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
1588 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
1597 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
1603 ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
1610 iwl_trans_pcie_block_txq_ptrs(trans, true);
1613 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
1614 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1628 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1640 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1650 iwl_print_hex_error(trans, pkt, 32);
1662 if (trans->trans_cfg->gen2)
1663 iwl_txq_gen2_tfd_unmap(trans, meta,
1664 iwl_txq_get_tfd(trans, txq, index));
1666 iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);
1682 iwl_trans_pcie_block_txq_ptrs(trans, false);
1684 iwl_pcie_cmdq_reclaim(trans, txq_id, index);
1687 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
1688 IWL_WARN(trans,
1690 iwl_get_cmd_string(trans, cmd_id));
1692 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1693 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1694 iwl_get_cmd_string(trans, cmd_id));
1695 wake_up(&trans->wait_command_queue);
1703 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
1717 dma_addr_t tb_phys = dma_map_single(trans->dev,
1720 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
1722 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
1724 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
1736 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
1739 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
1741 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
1743 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
1755 static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans,
1758 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1789 iwl_pcie_free_and_unmap_tso_page(trans, p->page);
1803 phys = dma_map_page_attrs(trans->dev, p->page, 0, PAGE_SIZE,
1805 if (unlikely(dma_mapping_error(trans->dev, phys))) {
1862 * @trans: transport private data
1874 struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
1883 *hdr = iwl_pcie_get_page_hdr(trans,
1904 if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0)
1913 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
1919 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1936 trace_iwlwifi_dev_tx(trans->dev, skb,
1937 iwl_txq_get_tfd(trans, txq, txq->write_ptr),
1951 sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room);
2009 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
2011 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
2030 iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
2032 trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
2041 dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room,
2050 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
2069 static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
2073 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2114 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
2117 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2152 if (iwl_txq_space(trans, txq) < txq->high_mark) {
2153 iwl_txq_stop(trans, txq);
2156 if (unlikely(iwl_txq_space(trans, txq) < 3)) {
2225 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
2236 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
2237 if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
2239 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
2241 trace_iwlwifi_dev_tx(trans->dev, skb,
2242 iwl_txq_get_tfd(trans, txq, txq->write_ptr),
2254 if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
2261 if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
2266 if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0,
2275 tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
2277 iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
2298 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
2300 iwl_pcie_txq_inc_wr_ptr(trans, txq);
2309 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
2314 static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
2318 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2341 void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
2344 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2368 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
2376 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n",
2382 last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
2385 IWL_ERR(trans,
2388 trans->trans_cfg->base_params->max_tfd_queue_size,
2391 iwl_op_mode_time_point(trans->op_mode,
2402 txq_read_ptr = iwl_txq_inc_wrap(trans, txq_read_ptr),
2411 iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
2417 if (!trans->trans_cfg->gen2)
2418 iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq,
2421 iwl_txq_free_tfd(trans, txq, txq_read_ptr);
2429 if (iwl_txq_space(trans, txq) > txq->low_mark &&
2466 iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
2469 if (iwl_txq_space(trans, txq) > txq->low_mark)
2470 iwl_trans_pcie_wake_queue(trans, txq);
2482 void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
2484 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2495 void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans,
2498 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2512 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
2550 static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,
2553 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2554 const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
2559 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
2562 &trans->status),
2566 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
2568 if (trans->trans_cfg->gen2)
2569 cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
2571 cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
2575 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
2576 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
2581 ret = wait_event_timeout(trans->wait_command_queue,
2583 &trans->status),
2586 IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
2589 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
2592 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
2593 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
2597 iwl_trans_sync_nmi(trans);
2601 if (test_bit(STATUS_FW_ERROR, &trans->status)) {
2603 &trans->status)) {
2604 IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
2612 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
2613 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
2619 IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
2645 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
2649 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2653 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
2654 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
2659 if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
2661 IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
2672 if (trans->trans_cfg->gen2)
2673 ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
2675 ret = iwl_pcie_enqueue_hcmd(trans, cmd);
2678 IWL_ERR(trans,
2680 iwl_get_cmd_string(trans, cmd->id), ret);
2686 return iwl_trans_pcie_send_hcmd_sync(trans, cmd);