xref: /linux/include/net/tso.h (revision 05e352444b2430de4b183b4a988085381e5fd6ad)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _TSO_H
3 #define _TSO_H
4 
5 #include <linux/skbuff.h>
6 #include <linux/dma-mapping.h>
7 #include <net/ip.h>
8 
9 #define TSO_HEADER_SIZE		256
10 
11 struct tso_t {
12 	int	next_frag_idx;
13 	int	size;
14 	void	*data;
15 	u16	ip_id;
16 	u8	tlen; /* transport header len */
17 	bool	ipv6;
18 	u32	tcp_seq;
19 };
20 
21 /* Calculate the worst case buffer count */
22 static inline int tso_count_descs(const struct sk_buff *skb)
23 {
24 	return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags;
25 }
26 
27 void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso,
28 		   int size, bool is_last);
29 void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size);
30 int tso_start(struct sk_buff *skb, struct tso_t *tso);
31 
32 /**
33  * struct tso_dma_map - DMA mapping state for GSO payload
34  * @dev: device used for DMA mapping
35  * @skb: the GSO skb being mapped
36  * @hdr_len: per-segment header length
37  * @iova_state: DMA IOVA state (when IOMMU available)
38  * @iova_offset: global byte offset into IOVA range (IOVA path only)
39  * @total_len: total payload length
40  * @frag_idx: current region (-1 = linear, 0..nr_frags-1 = frag)
41  * @offset: byte offset within current region
42  * @linear_dma: DMA address of the linear payload
43  * @linear_len: length of the linear payload
44  * @nr_frags: number of frags successfully DMA-mapped
45  * @frags: per-frag DMA address and length
46  *
47  * DMA-maps the payload regions of a GSO skb (linear data + frags).
48  * Prefers the DMA IOVA API for a single contiguous mapping with one
49  * IOTLB sync; falls back to per-region dma_map_phys() otherwise.
50  */
51 struct tso_dma_map {
52 	struct device		*dev;
53 	const struct sk_buff	*skb;
54 	unsigned int		hdr_len;
55 	/* IOVA path */
56 	struct dma_iova_state	iova_state;
57 	size_t			iova_offset;
58 	size_t			total_len;
59 	/* Fallback path if IOVA path fails */
60 	int			frag_idx;
61 	unsigned int		offset;
62 	dma_addr_t		linear_dma;
63 	unsigned int		linear_len;
64 	unsigned int		nr_frags;
65 	struct {
66 		dma_addr_t	dma;
67 		unsigned int	len;
68 	} frags[MAX_SKB_FRAGS];
69 };
70 
71 /**
72  * struct tso_dma_map_completion_state - Completion-time cleanup state
73  * @iova_state: DMA IOVA state (when IOMMU available)
74  * @total_len: total payload length of the IOVA mapping
75  *
76  * Drivers store this on their SW ring at xmit time via
77  * tso_dma_map_completion_save(), then call tso_dma_map_complete() at
78  * completion time.
79  */
80 struct tso_dma_map_completion_state {
81 	struct dma_iova_state iova_state;
82 	size_t total_len;
83 };
84 
85 int tso_dma_map_init(struct tso_dma_map *map, struct device *dev,
86 		     const struct sk_buff *skb, unsigned int hdr_len);
87 void tso_dma_map_cleanup(struct tso_dma_map *map);
88 unsigned int tso_dma_map_count(struct tso_dma_map *map, unsigned int len);
89 bool tso_dma_map_next(struct tso_dma_map *map, dma_addr_t *addr,
90 		      unsigned int *chunk_len, unsigned int *mapping_len,
91 		      unsigned int seg_remaining);
92 
93 /**
94  * tso_dma_map_completion_save - save state needed for completion-time cleanup
95  * @map: the xmit-time DMA map
96  * @cstate: driver-owned storage that persists until completion
97  *
98  * Should be called at xmit time to update the completion state and later passed
99  * to tso_dma_map_complete().
100  */
101 static inline void
102 tso_dma_map_completion_save(const struct tso_dma_map *map,
103 			    struct tso_dma_map_completion_state *cstate)
104 {
105 	cstate->iova_state = map->iova_state;
106 	cstate->total_len = map->total_len;
107 }
108 
109 /**
110  * tso_dma_map_complete - tear down mapping at completion time
111  * @dev: the device that owns the mapping
112  * @cstate: state saved by tso_dma_map_completion_save()
113  *
114  * Return: true if the IOVA path was used and the mapping has been
115  * destroyed; false if the fallback per-region path was used and the
116  * driver must unmap via its normal completion path.
117  */
118 static inline bool
119 tso_dma_map_complete(struct device *dev,
120 		     struct tso_dma_map_completion_state *cstate)
121 {
122 	if (dma_use_iova(&cstate->iova_state)) {
123 		dma_iova_destroy(dev, &cstate->iova_state, cstate->total_len,
124 				 DMA_TO_DEVICE, 0);
125 		return true;
126 	}
127 
128 	return false;
129 }
130 
131 #endif	/* _TSO_H */
132