Lines Matching +full:4 +full:- +full:ring
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
34 * the value of the rate limit is non-zero
37 #define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
41 * i40e_intrl_usec_to_reg - convert interrupt rate limit to register
97 (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, (pf)->hw.caps) ? \
111 * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
112 * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
140 pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; in i40e_compute_pad()
154 * cache-line alignment. in i40e_skb_pad()
162 rx_buf_len -= NET_IP_ALIGN; in i40e_skb_pad()
174 * i40e_test_staterr - tests bits in Rx descriptor status and error fields
186 return !!(rx_desc->wb.qword1.status_error_len & in i40e_test_staterr()
196 if ((i) == (r)->count) \
205 /* The size limit for a transmit buffer in a descriptor is (16K - 1).
207 * the nearest 4K which represents our maximum read request size.
210 #define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
212 (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
215 * i40e_txd_use_count - estimate the number of descriptors needed for Tx
218 * Due to hardware alignment restrictions (4K alignment), we need to
220 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
225 * To divide by 12K, we first divide by 4K, then divide by 3:
226 * To divide by 4K, shift right by 12 bits
231 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
253 #define I40E_TX_FLAGS_IPV4 BIT(4)
326 /* struct that defines a descriptor ring, associated with a VSI */
328 struct i40e_ring *next; /* pointer to next ring in q_vector */
329 void *desc; /* Descriptor ring memory */
331 struct net_device *netdev; /* netdev ring maps to */
339 u16 queue_index; /* Queue number of ring */
340 u8 dcb_tc; /* Traffic class of ring */
343 /* Storing xdp_buff on ring helps in saving the state of partially built
345 * and to resume packet building for this ring in the next call to
362 u16 reg_idx; /* HW register index of the ring */
373 bool ring_active; /* is ring online or not */
390 unsigned int size; /* length of descriptor ring in bytes */
391 dma_addr_t dma; /* physical address of ring */
405 static inline bool ring_uses_build_skb(struct i40e_ring *ring) in ring_uses_build_skb() argument
407 return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED); in ring_uses_build_skb()
410 static inline void set_ring_build_skb_enabled(struct i40e_ring *ring) in set_ring_build_skb_enabled() argument
412 ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED; in set_ring_build_skb_enabled()
415 static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring) in clear_ring_build_skb_enabled() argument
417 ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED; in clear_ring_build_skb_enabled()
420 static inline bool ring_is_xdp(struct i40e_ring *ring) in ring_is_xdp() argument
422 return !!(ring->flags & I40E_TXR_FLAGS_XDP); in ring_is_xdp()
425 static inline void set_ring_xdp(struct i40e_ring *ring) in set_ring_xdp() argument
427 ring->flags |= I40E_TXR_FLAGS_XDP; in set_ring_xdp()
437 struct i40e_ring *ring; /* pointer to linked list of ring(s) */ member
442 u16 target_itr; /* target ITR setting for ring(s) */
443 u16 current_itr; /* current ITR setting for ring(s) */
446 /* iterator for handling rings in ring container */
448 for (pos = (head).ring; pos != NULL; pos = pos->next)
450 static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring) in i40e_rx_pg_order() argument
453 if (ring->rx_buf_len > (PAGE_SIZE / 2)) in i40e_rx_pg_order()
473 u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
483 * i40e_get_head - Retrieve head from head writeback
484 * @tx_ring: tx ring to fetch head of
486 * Returns value of Tx ring head based on value stored
487 * in head write-back location
491 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; in i40e_get_head()
497 * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
501 * there is not enough descriptors available in this ring since we need at least
506 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; in i40e_xmit_descriptor_count()
507 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; in i40e_xmit_descriptor_count()
513 if (!nr_frags--) in i40e_xmit_descriptor_count()
523 * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
524 * @tx_ring: the ring to be checked
537 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
541 * Note: Our HW can't scatter-gather more than 8 fragments to build
559 * txring_txq - Find the netdev Tx ring based on the i40e Tx ring
560 * @ring: Tx ring to find the netdev equivalent of
562 static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring) in txring_txq() argument
564 return netdev_get_tx_queue(ring->netdev, ring->queue_index); in txring_txq()