Lines Matching full:iq

65 		ioq_vector->iq = oct->iq[i];  in octep_alloc_ioq_vectors()
560 * @iq: Octeon Tx queue data structure.
563 static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq) in octep_enable_ioq_irq() argument
567 netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no); in octep_enable_ioq_irq()
568 if (iq->pkts_processed) { in octep_enable_ioq_irq()
569 writel(iq->pkts_processed, iq->inst_cnt_reg); in octep_enable_ioq_irq()
570 iq->pkt_in_done -= iq->pkts_processed; in octep_enable_ioq_irq()
571 iq->pkts_processed = 0; in octep_enable_ioq_irq()
581 writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg); in octep_enable_ioq_irq()
596 tx_pending = octep_iq_process_completions(ioq_vector->iq, budget); in octep_napi_poll()
606 octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq); in octep_napi_poll()
798 * @iq: Octeon Tx queue data structure.
803 static inline int octep_iq_full_check(struct octep_iq *iq) in octep_iq_full_check() argument
805 if (likely((IQ_INSTR_SPACE(iq)) > in octep_iq_full_check()
810 netif_stop_subqueue(iq->netdev, iq->q_no); in octep_iq_full_check()
822 if (unlikely(IQ_INSTR_SPACE(iq) > in octep_iq_full_check()
824 netif_start_subqueue(iq->netdev, iq->q_no); in octep_iq_full_check()
825 iq->stats->restart_cnt++; in octep_iq_full_check()
851 struct octep_iq *iq; in octep_start_xmit() local
866 iq = oct->iq[q_no]; in octep_start_xmit()
871 wi = iq->host_write_index; in octep_start_xmit()
872 hw_desc = &iq->desc_ring[wi]; in octep_start_xmit()
875 tx_buffer = iq->buff_info + wi; in octep_start_xmit()
885 tx_buffer->dma = dma_map_single(iq->dev, skb->data, in octep_start_xmit()
887 if (dma_mapping_error(iq->dev, tx_buffer->dma)) in octep_start_xmit()
902 dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE); in octep_start_xmit()
903 if (dma_mapping_error(iq->dev, dma)) in octep_start_xmit()
914 dma = skb_frag_dma_map(iq->dev, frag, 0, in octep_start_xmit()
916 if (dma_mapping_error(iq->dev, dma)) in octep_start_xmit()
943 __netdev_tx_sent_queue(iq->netdev_q, skb->len, xmit_more); in octep_start_xmit()
946 iq->fill_cnt++; in octep_start_xmit()
948 iq->host_write_index = wi & iq->ring_size_mask; in octep_start_xmit()
955 if (!octep_iq_full_check(iq) && xmit_more && in octep_start_xmit()
956 iq->fill_cnt < iq->fill_threshold) in octep_start_xmit()
962 writel(iq->fill_cnt, iq->doorbell_reg); in octep_start_xmit()
963 iq->stats->instr_posted += iq->fill_cnt; in octep_start_xmit()
964 iq->fill_cnt = 0; in octep_start_xmit()
969 dma_unmap_single(iq->dev, sglist[0].dma_ptr[0], in octep_start_xmit()
974 dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3], in octep_start_xmit()