Lines Matching refs:trans

20 static struct page *get_workaround_page(struct iwl_trans *trans,
23 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
38 phys = dma_map_page_attrs(trans->dev, ret, 0, PAGE_SIZE,
40 if (unlikely(dma_mapping_error(trans->dev, phys))) {
62 static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
73 if (unlikely(dma_mapping_error(trans->dev, phys)))
77 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
103 page = get_workaround_page(trans, skb);
122 phys = dma_map_single(trans->dev, page_address(page), len,
124 if (unlikely(dma_mapping_error(trans->dev, phys)))
128 dma_sync_single_for_device(trans->dev, phys, len,
132 ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
140 IWL_DEBUG_TX(trans,
151 dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
153 dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
155 trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
160 static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
180 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
193 sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room);
252 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
253 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
272 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
284 dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room,
298 iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
307 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
319 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
334 tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
335 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
341 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
343 if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, out_meta,
352 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
356 static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
372 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
374 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
385 iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
395 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
411 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
429 tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
430 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
436 iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
437 trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
446 tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
448 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
455 if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
461 tb_phys = dma_map_single(trans->dev, frag->data,
463 ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
469 if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
476 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
481 struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
489 struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
505 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
522 return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
524 return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
528 int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
539 if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
542 max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
549 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
560 static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
564 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
585 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
610 int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
613 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
633 IWL_ERR(trans, "Error can not send more than %d chunks\n",
646 void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
650 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
657 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
668 dma_unmap_page(trans->dev,
673 dma_unmap_single(trans->dev,
679 iwl_txq_set_tfd_invalid_gen2(trans, tfd);
682 static void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
695 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
696 iwl_txq_get_tfd(trans, txq, idx));
705 iwl_op_mode_free_skb(trans->op_mode, skb);
713 static void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
717 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
723 iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
726 int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
729 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
751 if (iwl_txq_space(trans, txq) < txq->high_mark) {
752 iwl_txq_stop(trans, txq);
755 if (unlikely(iwl_txq_space(trans, txq) < 3)) {
782 tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
788 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
801 iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
809 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
810 iwl_txq_inc_wr_ptr(trans, txq);
824 static void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
826 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
832 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
841 iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
843 iwl_txq_gen2_free_tfd(trans, txq);
844 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
850 iwl_op_mode_free_skb(trans->op_mode, skb);
857 iwl_trans_pcie_wake_queue(trans, txq);
860 static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
863 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
864 struct device *dev = trans->dev;
891 static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
893 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
906 iwl_txq_gen2_unmap(trans, txq_id);
916 iwl_txq_gen2_free_memory(trans, txq);
924 iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
926 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
946 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
951 ret = iwl_pcie_txq_alloc(trans, txq, size, false);
953 IWL_ERR(trans, "Tx queue alloc failed\n");
956 ret = iwl_txq_init(trans, txq, size, false);
958 IWL_ERR(trans, "Tx queue init failed\n");
967 iwl_txq_gen2_free_memory(trans, txq);
971 static int iwl_pcie_txq_alloc_response(struct iwl_trans *trans,
975 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1010 wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
1016 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
1023 iwl_txq_gen2_free_memory(trans, txq);
1027 int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
1030 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1046 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
1047 trans->hw_rev_step == SILICON_A_STEP) {
1049 txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
1052 txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
1056 IWL_DEBUG_TX_QUEUES(trans,
1102 ret = iwl_trans_send_cmd(trans, &hcmd);
1106 return iwl_pcie_txq_alloc_response(trans, txq, &hcmd);
1109 iwl_txq_gen2_free_memory(trans, txq);
1113 void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
1115 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1128 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1133 iwl_txq_gen2_free(trans, queue);
1135 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
1138 void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
1140 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1151 iwl_txq_gen2_free(trans, i);
1155 int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
1157 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1165 IWL_ERR(trans, "Not enough memory for tx queue\n");
1169 ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true);
1171 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1178 ret = iwl_txq_init(trans, queue, queue_size,
1181 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1190 iwl_txq_gen2_tx_free(trans);
1205 int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
1208 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1288 iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) {
1296 tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
1299 if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1302 IWL_ERR(trans, "No space in command queue\n");
1303 iwl_op_mode_cmd_queue_full(trans->op_mode);
1368 IWL_DEBUG_HC(trans,
1370 iwl_get_cmd_string(trans, cmd->id), group_id,
1377 iwl_txq_gen2_set_tb(trans, tfd, iwl_txq_get_first_tb_dma(txq, idx),
1382 phys_addr = dma_map_single(trans->dev,
1386 if (dma_mapping_error(trans->dev, phys_addr)) {
1388 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
1391 iwl_txq_gen2_set_tb(trans, tfd, phys_addr,
1406 phys_addr = dma_map_single(trans->dev, data,
1408 if (dma_mapping_error(trans->dev, phys_addr)) {
1410 iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
1413 iwl_txq_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
1422 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
1430 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
1431 iwl_txq_inc_wr_ptr(trans, txq);