xref: /linux/drivers/net/ethernet/intel/libeth/rx.c (revision 90d32e92011eaae8e70a9169b4e7acf4ca8f9d3a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2024 Intel Corporation */
3 
4 #include <net/libeth/rx.h>
5 
6 /* Rx buffer management */
7 
8 /**
9  * libeth_rx_hw_len - get the actual buffer size to be passed to HW
10  * @pp: &page_pool_params of the netdev to calculate the size for
11  * @max_len: maximum buffer size for a single descriptor
12  *
13  * Return: HW-writeable length per one buffer to pass it to the HW accounting:
14  * MTU the @dev has, HW required alignment, minimum and maximum allowed values,
15  * and system's page size.
16  */
17 static u32 libeth_rx_hw_len(const struct page_pool_params *pp, u32 max_len)
18 {
19 	u32 len;
20 
21 	len = READ_ONCE(pp->netdev->mtu) + LIBETH_RX_LL_LEN;
22 	len = ALIGN(len, LIBETH_RX_BUF_STRIDE);
23 	len = min3(len, ALIGN_DOWN(max_len ? : U32_MAX, LIBETH_RX_BUF_STRIDE),
24 		   pp->max_len);
25 
26 	return len;
27 }
28 
29 /**
30  * libeth_rx_fq_create - create a PP with the default libeth settings
31  * @fq: buffer queue struct to fill
32  * @napi: &napi_struct covering this PP (no usage outside its poll loops)
33  *
34  * Return: %0 on success, -%errno on failure.
35  */
36 int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi)
37 {
38 	struct page_pool_params pp = {
39 		.flags		= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
40 		.order		= LIBETH_RX_PAGE_ORDER,
41 		.pool_size	= fq->count,
42 		.nid		= fq->nid,
43 		.dev		= napi->dev->dev.parent,
44 		.netdev		= napi->dev,
45 		.napi		= napi,
46 		.dma_dir	= DMA_FROM_DEVICE,
47 		.offset		= LIBETH_SKB_HEADROOM,
48 	};
49 	struct libeth_fqe *fqes;
50 	struct page_pool *pool;
51 
52 	/* HW-writeable / syncable length per one page */
53 	pp.max_len = LIBETH_RX_PAGE_LEN(pp.offset);
54 
55 	/* HW-writeable length per buffer */
56 	fq->buf_len = libeth_rx_hw_len(&pp, fq->buf_len);
57 	/* Buffer size to allocate */
58 	fq->truesize = roundup_pow_of_two(SKB_HEAD_ALIGN(pp.offset +
59 							 fq->buf_len));
60 
61 	pool = page_pool_create(&pp);
62 	if (IS_ERR(pool))
63 		return PTR_ERR(pool);
64 
65 	fqes = kvcalloc_node(fq->count, sizeof(*fqes), GFP_KERNEL, fq->nid);
66 	if (!fqes)
67 		goto err_buf;
68 
69 	fq->fqes = fqes;
70 	fq->pp = pool;
71 
72 	return 0;
73 
74 err_buf:
75 	page_pool_destroy(pool);
76 
77 	return -ENOMEM;
78 }
79 EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_create, LIBETH);
80 
81 /**
82  * libeth_rx_fq_destroy - destroy a &page_pool created by libeth
83  * @fq: buffer queue to process
84  */
85 void libeth_rx_fq_destroy(struct libeth_fq *fq)
86 {
87 	kvfree(fq->fqes);
88 	page_pool_destroy(fq->pp);
89 }
90 EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_destroy, LIBETH);
91 
92 /**
93  * libeth_rx_recycle_slow - recycle a libeth page from the NAPI context
94  * @page: page to recycle
95  *
96  * To be used on exceptions or rare cases not requiring fast inline recycling.
97  */
98 void libeth_rx_recycle_slow(struct page *page)
99 {
100 	page_pool_recycle_direct(page->pp, page);
101 }
102 EXPORT_SYMBOL_NS_GPL(libeth_rx_recycle_slow, LIBETH);
103 
104 /* Converting abstract packet type numbers into a software structure with
105  * the packet parameters to do O(1) lookup on Rx.
106  */
107 
108 static const u16 libeth_rx_pt_xdp_oip[] = {
109 	[LIBETH_RX_PT_OUTER_L2]		= XDP_RSS_TYPE_NONE,
110 	[LIBETH_RX_PT_OUTER_IPV4]	= XDP_RSS_L3_IPV4,
111 	[LIBETH_RX_PT_OUTER_IPV6]	= XDP_RSS_L3_IPV6,
112 };
113 
114 static const u16 libeth_rx_pt_xdp_iprot[] = {
115 	[LIBETH_RX_PT_INNER_NONE]	= XDP_RSS_TYPE_NONE,
116 	[LIBETH_RX_PT_INNER_UDP]	= XDP_RSS_L4_UDP,
117 	[LIBETH_RX_PT_INNER_TCP]	= XDP_RSS_L4_TCP,
118 	[LIBETH_RX_PT_INNER_SCTP]	= XDP_RSS_L4_SCTP,
119 	[LIBETH_RX_PT_INNER_ICMP]	= XDP_RSS_L4_ICMP,
120 	[LIBETH_RX_PT_INNER_TIMESYNC]	= XDP_RSS_TYPE_NONE,
121 };
122 
123 static const u16 libeth_rx_pt_xdp_pl[] = {
124 	[LIBETH_RX_PT_PAYLOAD_NONE]	= XDP_RSS_TYPE_NONE,
125 	[LIBETH_RX_PT_PAYLOAD_L2]	= XDP_RSS_TYPE_NONE,
126 	[LIBETH_RX_PT_PAYLOAD_L3]	= XDP_RSS_TYPE_NONE,
127 	[LIBETH_RX_PT_PAYLOAD_L4]	= XDP_RSS_L4,
128 };
129 
130 /**
131  * libeth_rx_pt_gen_hash_type - generate an XDP RSS hash type for a PT
132  * @pt: PT structure to evaluate
133  *
134  * Generates ```hash_type``` field with XDP RSS type values from the parsed
135  * packet parameters if they're obtained dynamically at runtime.
136  */
137 void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt)
138 {
139 	pt->hash_type = 0;
140 	pt->hash_type |= libeth_rx_pt_xdp_oip[pt->outer_ip];
141 	pt->hash_type |= libeth_rx_pt_xdp_iprot[pt->inner_prot];
142 	pt->hash_type |= libeth_rx_pt_xdp_pl[pt->payload_layer];
143 }
144 EXPORT_SYMBOL_NS_GPL(libeth_rx_pt_gen_hash_type, LIBETH);
145 
146 /* Module */
147 
148 MODULE_AUTHOR("Intel Corporation");
149 MODULE_DESCRIPTION("Common Ethernet library");
150 MODULE_LICENSE("GPL");
151