Lines Matching +full:sw +full:- +full:managed

1 /* SPDX-License-Identifier: GPL-2.0-only */
75 * descriptors before SW gets an interrupt and overwrites SW head, the gen bit
77 * be gone forever and SW has no reasonable way to tell that this has happened.
78 * From SW perspective, when we finally get an interrupt, it looks like we're
83 #define IDPF_RX_BUFQ_WORKING_SET(rxq) ((rxq)->desc_count - 1)
87 if (unlikely(++(ntc) == (rxq)->desc_count)) { \
95 if (unlikely(++(idx) == (q)->desc_count)) \
117 ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
118 (txq)->next_to_clean - (txq)->next_to_use - 1)
120 #define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->stash->buf_stack.top)
122 (txq)->desc_count >> 2)
124 #define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1)
129 (((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
131 (txq)->num_completions_pending - (txq)->complq->num_completions)
136 ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
137 0 : (txq)->compl_tag_cur_gen)
154 * struct idpf_buf_lifo - LIFO for managing OOO completions
166 * struct idpf_tx_offload_params - Offload parameters for a given packet
246 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
251 #define IDPF_TX_MAX_DESC_DATA (SZ_16K - 1)
262 DIV_ROUND_DOWN_ULL((IDPF_CTLQ_MAX_BUF_LEN - IDPF_RX_PTYPE_HDR_SZ), \
265 #define IDPF_GET_PTYPE_SIZE(p) struct_size((p), proto_id, (p)->proto_id_count)
298 * 0->1 or 1->0 on each ring wrap. SW maintains its own
304 * @__IDPF_Q_RFL_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW
305 * bit and Q_RFL_GEN is the SW bit.
325 #define idpf_queue_set(f, q) __set_bit(__IDPF_Q_##f, (q)->flags)
326 #define idpf_queue_clear(f, q) __clear_bit(__IDPF_Q_##f, (q)->flags)
327 #define idpf_queue_change(f, q) __change_bit(__IDPF_Q_##f, (q)->flags)
328 #define idpf_queue_has(f, q) test_bit(__IDPF_Q_##f, (q)->flags)
331 __test_and_clear_bit(__IDPF_Q_##f, (q)->flags)
333 __assign_bit(__IDPF_Q_##f, (q)->flags, v)
357 * @dyn_ctl_sw_itridx_ena_m: Mask for SW ITR index
358 * @dyn_ctl_swint_trig_m: Mask for dyn_ctl SW triggered interrupt enable
478 /* Index used for 'SW ITR' update in DYN_CTL register */
486 * struct idpf_txq_stash - Tx buffer stash for Flow-based scheduling mode
497 * struct idpf_rx_queue - software structure representing a receive queue
588 * struct idpf_tx_queue - software structure representing a transmit queue
610 * --------------------------------
611 * | GEN=0-1023 |IDX = 0-63|
612 * --------------------------------
618 * --------------------------------
619 * |GEN | IDX = 0-8159 |
620 * --------------------------------
636 * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
637 * @stash: Tx buffer stash for Flow-based scheduling mode
707 * struct idpf_buf_queue - software structure representing a buffer queue
766 * struct idpf_compl_queue - software structure representing a completion queue
870 * managed by at most two bufqs (depending on performance configuration).
943 cpu = cpumask_first(q_vector->affinity_mask); in idpf_q_vector_to_mem()
949 * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag
962 * idpf_tx_singleq_build_ctob - populate command tag offset and size
987 * idpf_tx_splitq_build_desc - determine which type of data descriptor to build
997 if (params->dtype == IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2) in idpf_tx_splitq_build_desc()
1004 * idpf_vport_intr_set_wb_on_itr - enable descriptor writeback on disabled interrupts
1011 if (q_vector->wb_on_itr) in idpf_vport_intr_set_wb_on_itr()
1014 q_vector->wb_on_itr = true; in idpf_vport_intr_set_wb_on_itr()
1015 reg = &q_vector->intr_reg; in idpf_vport_intr_set_wb_on_itr()
1017 writel(reg->dyn_ctl_wb_on_itr_m | reg->dyn_ctl_intena_msk_m | in idpf_vport_intr_set_wb_on_itr()
1018 (IDPF_NO_ITR_UPDATE_IDX << reg->dyn_ctl_itridx_s), in idpf_vport_intr_set_wb_on_itr()
1019 reg->dyn_ctl); in idpf_vport_intr_set_wb_on_itr()
1064 return !netif_subqueue_maybe_stop(tx_q->netdev, tx_q->idx, in idpf_tx_maybe_stop_common()