xref: /linux/net/core/xdp.c (revision f7543209ce5dc09e3f5a27a7d4ee53e226283719)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* net/core/xdp.c
3  *
4  * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
5  */
6 #include <linux/bpf.h>
7 #include <linux/btf.h>
8 #include <linux/btf_ids.h>
9 #include <linux/filter.h>
10 #include <linux/types.h>
11 #include <linux/mm.h>
12 #include <linux/netdevice.h>
13 #include <linux/slab.h>
14 #include <linux/idr.h>
15 #include <linux/rhashtable.h>
16 #include <linux/bug.h>
17 #include <net/page_pool/helpers.h>
18 
19 #include <net/hotdata.h>
20 #include <net/xdp.h>
21 #include <net/xdp_priv.h> /* struct xdp_mem_allocator */
22 #include <trace/events/xdp.h>
23 #include <net/xdp_sock_drv.h>
24 
25 #define REG_STATE_NEW		0x0
26 #define REG_STATE_REGISTERED	0x1
27 #define REG_STATE_UNREGISTERED	0x2
28 #define REG_STATE_UNUSED	0x3
29 
30 static DEFINE_IDA(mem_id_pool);
31 static DEFINE_MUTEX(mem_id_lock);
32 #define MEM_ID_MAX 0xFFFE
33 #define MEM_ID_MIN 1
34 static int mem_id_next = MEM_ID_MIN;
35 
36 static bool mem_id_init; /* false */
37 static struct rhashtable *mem_id_ht;
38 
39 static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
40 {
41 	const u32 *k = data;
42 	const u32 key = *k;
43 
44 	BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id)
45 		     != sizeof(u32));
46 
47 	/* Use cyclic increasing ID as direct hash key */
48 	return key;
49 }
50 
51 static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
52 			  const void *ptr)
53 {
54 	const struct xdp_mem_allocator *xa = ptr;
55 	u32 mem_id = *(u32 *)arg->key;
56 
57 	return xa->mem.id != mem_id;
58 }
59 
60 static const struct rhashtable_params mem_id_rht_params = {
61 	.nelem_hint = 64,
62 	.head_offset = offsetof(struct xdp_mem_allocator, node),
63 	.key_offset  = offsetof(struct xdp_mem_allocator, mem.id),
64 	.key_len = sizeof_field(struct xdp_mem_allocator, mem.id),
65 	.max_size = MEM_ID_MAX,
66 	.min_size = 8,
67 	.automatic_shrinking = true,
68 	.hashfn    = xdp_mem_id_hashfn,
69 	.obj_cmpfn = xdp_mem_id_cmp,
70 };
71 
72 static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
73 {
74 	struct xdp_mem_allocator *xa;
75 
76 	xa = container_of(rcu, struct xdp_mem_allocator, rcu);
77 
78 	/* Allow this ID to be reused */
79 	ida_free(&mem_id_pool, xa->mem.id);
80 
81 	kfree(xa);
82 }
83 
84 static void mem_xa_remove(struct xdp_mem_allocator *xa)
85 {
86 	trace_mem_disconnect(xa);
87 
88 	if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
89 		call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
90 }
91 
92 static void mem_allocator_disconnect(void *allocator)
93 {
94 	struct xdp_mem_allocator *xa;
95 	struct rhashtable_iter iter;
96 
97 	mutex_lock(&mem_id_lock);
98 
99 	rhashtable_walk_enter(mem_id_ht, &iter);
100 	do {
101 		rhashtable_walk_start(&iter);
102 
103 		while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) {
104 			if (xa->allocator == allocator)
105 				mem_xa_remove(xa);
106 		}
107 
108 		rhashtable_walk_stop(&iter);
109 
110 	} while (xa == ERR_PTR(-EAGAIN));
111 	rhashtable_walk_exit(&iter);
112 
113 	mutex_unlock(&mem_id_lock);
114 }
115 
116 void xdp_unreg_mem_model(struct xdp_mem_info *mem)
117 {
118 	struct xdp_mem_allocator *xa;
119 	int type = mem->type;
120 	int id = mem->id;
121 
122 	/* Reset mem info to defaults */
123 	mem->id = 0;
124 	mem->type = 0;
125 
126 	if (id == 0)
127 		return;
128 
129 	if (type == MEM_TYPE_PAGE_POOL) {
130 		rcu_read_lock();
131 		xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
132 		page_pool_destroy(xa->page_pool);
133 		rcu_read_unlock();
134 	}
135 }
136 EXPORT_SYMBOL_GPL(xdp_unreg_mem_model);
137 
138 void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
139 {
140 	if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
141 		WARN(1, "Missing register, driver bug");
142 		return;
143 	}
144 
145 	xdp_unreg_mem_model(&xdp_rxq->mem);
146 }
147 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
148 
149 void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
150 {
151 	/* Simplify driver cleanup code paths, allow unreg "unused" */
152 	if (xdp_rxq->reg_state == REG_STATE_UNUSED)
153 		return;
154 
155 	xdp_rxq_info_unreg_mem_model(xdp_rxq);
156 
157 	xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
158 	xdp_rxq->dev = NULL;
159 }
160 EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
161 
162 static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
163 {
164 	memset(xdp_rxq, 0, sizeof(*xdp_rxq));
165 }
166 
167 /* Returns 0 on success, negative on failure */
168 int __xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
169 		       struct net_device *dev, u32 queue_index,
170 		       unsigned int napi_id, u32 frag_size)
171 {
172 	if (!dev) {
173 		WARN(1, "Missing net_device from driver");
174 		return -ENODEV;
175 	}
176 
177 	if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
178 		WARN(1, "Driver promised not to register this");
179 		return -EINVAL;
180 	}
181 
182 	if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
183 		WARN(1, "Missing unregister, handled but fix driver");
184 		xdp_rxq_info_unreg(xdp_rxq);
185 	}
186 
187 	/* State either UNREGISTERED or NEW */
188 	xdp_rxq_info_init(xdp_rxq);
189 	xdp_rxq->dev = dev;
190 	xdp_rxq->queue_index = queue_index;
191 	xdp_rxq->napi_id = napi_id;
192 	xdp_rxq->frag_size = frag_size;
193 
194 	xdp_rxq->reg_state = REG_STATE_REGISTERED;
195 	return 0;
196 }
197 EXPORT_SYMBOL_GPL(__xdp_rxq_info_reg);
198 
199 void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
200 {
201 	xdp_rxq->reg_state = REG_STATE_UNUSED;
202 }
203 EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
204 
205 bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
206 {
207 	return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
208 }
209 EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
210 
211 static int __mem_id_init_hash_table(void)
212 {
213 	struct rhashtable *rht;
214 	int ret;
215 
216 	if (unlikely(mem_id_init))
217 		return 0;
218 
219 	rht = kzalloc(sizeof(*rht), GFP_KERNEL);
220 	if (!rht)
221 		return -ENOMEM;
222 
223 	ret = rhashtable_init(rht, &mem_id_rht_params);
224 	if (ret < 0) {
225 		kfree(rht);
226 		return ret;
227 	}
228 	mem_id_ht = rht;
229 	smp_mb(); /* mutex lock should provide enough pairing */
230 	mem_id_init = true;
231 
232 	return 0;
233 }
234 
235 /* Allocate a cyclic ID that maps to allocator pointer.
236  * See: https://www.kernel.org/doc/html/latest/core-api/idr.html
237  *
238  * Caller must lock mem_id_lock.
239  */
240 static int __mem_id_cyclic_get(gfp_t gfp)
241 {
242 	int retries = 1;
243 	int id;
244 
245 again:
246 	id = ida_alloc_range(&mem_id_pool, mem_id_next, MEM_ID_MAX - 1, gfp);
247 	if (id < 0) {
248 		if (id == -ENOSPC) {
249 			/* Cyclic allocator, reset next id */
250 			if (retries--) {
251 				mem_id_next = MEM_ID_MIN;
252 				goto again;
253 			}
254 		}
255 		return id; /* errno */
256 	}
257 	mem_id_next = id + 1;
258 
259 	return id;
260 }
261 
262 static bool __is_supported_mem_type(enum xdp_mem_type type)
263 {
264 	if (type == MEM_TYPE_PAGE_POOL)
265 		return is_page_pool_compiled_in();
266 
267 	if (type >= MEM_TYPE_MAX)
268 		return false;
269 
270 	return true;
271 }
272 
273 static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
274 						     enum xdp_mem_type type,
275 						     void *allocator)
276 {
277 	struct xdp_mem_allocator *xdp_alloc;
278 	gfp_t gfp = GFP_KERNEL;
279 	int id, errno, ret;
280 	void *ptr;
281 
282 	if (!__is_supported_mem_type(type))
283 		return ERR_PTR(-EOPNOTSUPP);
284 
285 	mem->type = type;
286 
287 	if (!allocator) {
288 		if (type == MEM_TYPE_PAGE_POOL)
289 			return ERR_PTR(-EINVAL); /* Setup time check page_pool req */
290 		return NULL;
291 	}
292 
293 	/* Delay init of rhashtable to save memory if feature isn't used */
294 	if (!mem_id_init) {
295 		mutex_lock(&mem_id_lock);
296 		ret = __mem_id_init_hash_table();
297 		mutex_unlock(&mem_id_lock);
298 		if (ret < 0)
299 			return ERR_PTR(ret);
300 	}
301 
302 	xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
303 	if (!xdp_alloc)
304 		return ERR_PTR(-ENOMEM);
305 
306 	mutex_lock(&mem_id_lock);
307 	id = __mem_id_cyclic_get(gfp);
308 	if (id < 0) {
309 		errno = id;
310 		goto err;
311 	}
312 	mem->id = id;
313 	xdp_alloc->mem = *mem;
314 	xdp_alloc->allocator = allocator;
315 
316 	/* Insert allocator into ID lookup table */
317 	ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
318 	if (IS_ERR(ptr)) {
319 		ida_free(&mem_id_pool, mem->id);
320 		mem->id = 0;
321 		errno = PTR_ERR(ptr);
322 		goto err;
323 	}
324 
325 	if (type == MEM_TYPE_PAGE_POOL)
326 		page_pool_use_xdp_mem(allocator, mem_allocator_disconnect, mem);
327 
328 	mutex_unlock(&mem_id_lock);
329 
330 	return xdp_alloc;
331 err:
332 	mutex_unlock(&mem_id_lock);
333 	kfree(xdp_alloc);
334 	return ERR_PTR(errno);
335 }
336 
337 int xdp_reg_mem_model(struct xdp_mem_info *mem,
338 		      enum xdp_mem_type type, void *allocator)
339 {
340 	struct xdp_mem_allocator *xdp_alloc;
341 
342 	xdp_alloc = __xdp_reg_mem_model(mem, type, allocator);
343 	if (IS_ERR(xdp_alloc))
344 		return PTR_ERR(xdp_alloc);
345 	return 0;
346 }
347 EXPORT_SYMBOL_GPL(xdp_reg_mem_model);
348 
349 int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
350 			       enum xdp_mem_type type, void *allocator)
351 {
352 	struct xdp_mem_allocator *xdp_alloc;
353 
354 	if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
355 		WARN(1, "Missing register, driver bug");
356 		return -EFAULT;
357 	}
358 
359 	xdp_alloc = __xdp_reg_mem_model(&xdp_rxq->mem, type, allocator);
360 	if (IS_ERR(xdp_alloc))
361 		return PTR_ERR(xdp_alloc);
362 
363 	if (trace_mem_connect_enabled() && xdp_alloc)
364 		trace_mem_connect(xdp_alloc, xdp_rxq);
365 	return 0;
366 }
367 
368 EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
369 
370 /* XDP RX runs under NAPI protection, and in different delivery error
371  * scenarios (e.g. queue full), it is possible to return the xdp_frame
372  * while still leveraging this protection.  The @napi_direct boolean
373  * is used for those calls sites.  Thus, allowing for faster recycling
374  * of xdp_frames/pages in those cases.
375  */
376 void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
377 		  struct xdp_buff *xdp)
378 {
379 	struct page *page;
380 
381 	switch (mem->type) {
382 	case MEM_TYPE_PAGE_POOL:
383 		page = virt_to_head_page(data);
384 		if (napi_direct && xdp_return_frame_no_direct())
385 			napi_direct = false;
386 		/* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
387 		 * as mem->type knows this a page_pool page
388 		 */
389 		page_pool_put_full_page(page->pp, page, napi_direct);
390 		break;
391 	case MEM_TYPE_PAGE_SHARED:
392 		page_frag_free(data);
393 		break;
394 	case MEM_TYPE_PAGE_ORDER0:
395 		page = virt_to_page(data); /* Assumes order0 page*/
396 		put_page(page);
397 		break;
398 	case MEM_TYPE_XSK_BUFF_POOL:
399 		/* NB! Only valid from an xdp_buff! */
400 		xsk_buff_free(xdp);
401 		break;
402 	default:
403 		/* Not possible, checked in xdp_rxq_info_reg_mem_model() */
404 		WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
405 		break;
406 	}
407 }
408 
409 void xdp_return_frame(struct xdp_frame *xdpf)
410 {
411 	struct skb_shared_info *sinfo;
412 	int i;
413 
414 	if (likely(!xdp_frame_has_frags(xdpf)))
415 		goto out;
416 
417 	sinfo = xdp_get_shared_info_from_frame(xdpf);
418 	for (i = 0; i < sinfo->nr_frags; i++) {
419 		struct page *page = skb_frag_page(&sinfo->frags[i]);
420 
421 		__xdp_return(page_address(page), &xdpf->mem, false, NULL);
422 	}
423 out:
424 	__xdp_return(xdpf->data, &xdpf->mem, false, NULL);
425 }
426 EXPORT_SYMBOL_GPL(xdp_return_frame);
427 
428 void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
429 {
430 	struct skb_shared_info *sinfo;
431 	int i;
432 
433 	if (likely(!xdp_frame_has_frags(xdpf)))
434 		goto out;
435 
436 	sinfo = xdp_get_shared_info_from_frame(xdpf);
437 	for (i = 0; i < sinfo->nr_frags; i++) {
438 		struct page *page = skb_frag_page(&sinfo->frags[i]);
439 
440 		__xdp_return(page_address(page), &xdpf->mem, true, NULL);
441 	}
442 out:
443 	__xdp_return(xdpf->data, &xdpf->mem, true, NULL);
444 }
445 EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
446 
447 /* XDP bulk APIs introduce a defer/flush mechanism to return
448  * pages belonging to the same xdp_mem_allocator object
449  * (identified via the mem.id field) in bulk to optimize
450  * I-cache and D-cache.
451  * The bulk queue size is set to 16 to be aligned to how
452  * XDP_REDIRECT bulking works. The bulk is flushed when
453  * it is full or when mem.id changes.
454  * xdp_frame_bulk is usually stored/allocated on the function
455  * call-stack to avoid locking penalties.
456  */
457 void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
458 {
459 	struct xdp_mem_allocator *xa = bq->xa;
460 
461 	if (unlikely(!xa || !bq->count))
462 		return;
463 
464 	page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count);
465 	/* bq->xa is not cleared to save lookup, if mem.id same in next bulk */
466 	bq->count = 0;
467 }
468 EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk);
469 
470 /* Must be called with rcu_read_lock held */
471 void xdp_return_frame_bulk(struct xdp_frame *xdpf,
472 			   struct xdp_frame_bulk *bq)
473 {
474 	struct xdp_mem_info *mem = &xdpf->mem;
475 	struct xdp_mem_allocator *xa;
476 
477 	if (mem->type != MEM_TYPE_PAGE_POOL) {
478 		xdp_return_frame(xdpf);
479 		return;
480 	}
481 
482 	xa = bq->xa;
483 	if (unlikely(!xa)) {
484 		xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
485 		bq->count = 0;
486 		bq->xa = xa;
487 	}
488 
489 	if (bq->count == XDP_BULK_QUEUE_SIZE)
490 		xdp_flush_frame_bulk(bq);
491 
492 	if (unlikely(mem->id != xa->mem.id)) {
493 		xdp_flush_frame_bulk(bq);
494 		bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
495 	}
496 
497 	if (unlikely(xdp_frame_has_frags(xdpf))) {
498 		struct skb_shared_info *sinfo;
499 		int i;
500 
501 		sinfo = xdp_get_shared_info_from_frame(xdpf);
502 		for (i = 0; i < sinfo->nr_frags; i++) {
503 			skb_frag_t *frag = &sinfo->frags[i];
504 
505 			bq->q[bq->count++] = skb_frag_address(frag);
506 			if (bq->count == XDP_BULK_QUEUE_SIZE)
507 				xdp_flush_frame_bulk(bq);
508 		}
509 	}
510 	bq->q[bq->count++] = xdpf->data;
511 }
512 EXPORT_SYMBOL_GPL(xdp_return_frame_bulk);
513 
514 void xdp_return_buff(struct xdp_buff *xdp)
515 {
516 	struct skb_shared_info *sinfo;
517 	int i;
518 
519 	if (likely(!xdp_buff_has_frags(xdp)))
520 		goto out;
521 
522 	sinfo = xdp_get_shared_info_from_buff(xdp);
523 	for (i = 0; i < sinfo->nr_frags; i++) {
524 		struct page *page = skb_frag_page(&sinfo->frags[i]);
525 
526 		__xdp_return(page_address(page), &xdp->rxq->mem, true, xdp);
527 	}
528 out:
529 	__xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
530 }
531 EXPORT_SYMBOL_GPL(xdp_return_buff);
532 
533 void xdp_attachment_setup(struct xdp_attachment_info *info,
534 			  struct netdev_bpf *bpf)
535 {
536 	if (info->prog)
537 		bpf_prog_put(info->prog);
538 	info->prog = bpf->prog;
539 	info->flags = bpf->flags;
540 }
541 EXPORT_SYMBOL_GPL(xdp_attachment_setup);
542 
543 struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
544 {
545 	unsigned int metasize, totsize;
546 	void *addr, *data_to_copy;
547 	struct xdp_frame *xdpf;
548 	struct page *page;
549 
550 	/* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */
551 	metasize = xdp_data_meta_unsupported(xdp) ? 0 :
552 		   xdp->data - xdp->data_meta;
553 	totsize = xdp->data_end - xdp->data + metasize;
554 
555 	if (sizeof(*xdpf) + totsize > PAGE_SIZE)
556 		return NULL;
557 
558 	page = dev_alloc_page();
559 	if (!page)
560 		return NULL;
561 
562 	addr = page_to_virt(page);
563 	xdpf = addr;
564 	memset(xdpf, 0, sizeof(*xdpf));
565 
566 	addr += sizeof(*xdpf);
567 	data_to_copy = metasize ? xdp->data_meta : xdp->data;
568 	memcpy(addr, data_to_copy, totsize);
569 
570 	xdpf->data = addr + metasize;
571 	xdpf->len = totsize - metasize;
572 	xdpf->headroom = 0;
573 	xdpf->metasize = metasize;
574 	xdpf->frame_sz = PAGE_SIZE;
575 	xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
576 
577 	xsk_buff_free(xdp);
578 	return xdpf;
579 }
580 EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);
581 
582 /* Used by XDP_WARN macro, to avoid inlining WARN() in fast-path */
583 void xdp_warn(const char *msg, const char *func, const int line)
584 {
585 	WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg);
586 };
587 EXPORT_SYMBOL_GPL(xdp_warn);
588 
589 int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
590 {
591 	n_skb = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, gfp, n_skb, skbs);
592 	if (unlikely(!n_skb))
593 		return -ENOMEM;
594 
595 	return 0;
596 }
597 EXPORT_SYMBOL_GPL(xdp_alloc_skb_bulk);
598 
599 struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
600 					   struct sk_buff *skb,
601 					   struct net_device *dev)
602 {
603 	struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
604 	unsigned int headroom, frame_size;
605 	void *hard_start;
606 	u8 nr_frags;
607 
608 	/* xdp frags frame */
609 	if (unlikely(xdp_frame_has_frags(xdpf)))
610 		nr_frags = sinfo->nr_frags;
611 
612 	/* Part of headroom was reserved to xdpf */
613 	headroom = sizeof(*xdpf) + xdpf->headroom;
614 
615 	/* Memory size backing xdp_frame data already have reserved
616 	 * room for build_skb to place skb_shared_info in tailroom.
617 	 */
618 	frame_size = xdpf->frame_sz;
619 
620 	hard_start = xdpf->data - headroom;
621 	skb = build_skb_around(skb, hard_start, frame_size);
622 	if (unlikely(!skb))
623 		return NULL;
624 
625 	skb_reserve(skb, headroom);
626 	__skb_put(skb, xdpf->len);
627 	if (xdpf->metasize)
628 		skb_metadata_set(skb, xdpf->metasize);
629 
630 	if (unlikely(xdp_frame_has_frags(xdpf)))
631 		xdp_update_skb_shared_info(skb, nr_frags,
632 					   sinfo->xdp_frags_size,
633 					   nr_frags * xdpf->frame_sz,
634 					   xdp_frame_is_frag_pfmemalloc(xdpf));
635 
636 	/* Essential SKB info: protocol and skb->dev */
637 	skb->protocol = eth_type_trans(skb, dev);
638 
639 	/* Optional SKB info, currently missing:
640 	 * - HW checksum info		(skb->ip_summed)
641 	 * - HW RX hash			(skb_set_hash)
642 	 * - RX ring dev queue index	(skb_record_rx_queue)
643 	 */
644 
645 	if (xdpf->mem.type == MEM_TYPE_PAGE_POOL)
646 		skb_mark_for_recycle(skb);
647 
648 	/* Allow SKB to reuse area used by xdp_frame */
649 	xdp_scrub_frame(xdpf);
650 
651 	return skb;
652 }
653 EXPORT_SYMBOL_GPL(__xdp_build_skb_from_frame);
654 
655 struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
656 					 struct net_device *dev)
657 {
658 	struct sk_buff *skb;
659 
660 	skb = kmem_cache_alloc(net_hotdata.skbuff_cache, GFP_ATOMIC);
661 	if (unlikely(!skb))
662 		return NULL;
663 
664 	memset(skb, 0, offsetof(struct sk_buff, tail));
665 
666 	return __xdp_build_skb_from_frame(xdpf, skb, dev);
667 }
668 EXPORT_SYMBOL_GPL(xdp_build_skb_from_frame);
669 
670 struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf)
671 {
672 	unsigned int headroom, totalsize;
673 	struct xdp_frame *nxdpf;
674 	struct page *page;
675 	void *addr;
676 
677 	headroom = xdpf->headroom + sizeof(*xdpf);
678 	totalsize = headroom + xdpf->len;
679 
680 	if (unlikely(totalsize > PAGE_SIZE))
681 		return NULL;
682 	page = dev_alloc_page();
683 	if (!page)
684 		return NULL;
685 	addr = page_to_virt(page);
686 
687 	memcpy(addr, xdpf, totalsize);
688 
689 	nxdpf = addr;
690 	nxdpf->data = addr + headroom;
691 	nxdpf->frame_sz = PAGE_SIZE;
692 	nxdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
693 	nxdpf->mem.id = 0;
694 
695 	return nxdpf;
696 }
697 
698 __bpf_kfunc_start_defs();
699 
700 /**
701  * bpf_xdp_metadata_rx_timestamp - Read XDP frame RX timestamp.
702  * @ctx: XDP context pointer.
703  * @timestamp: Return value pointer.
704  *
705  * Return:
706  * * Returns 0 on success or ``-errno`` on error.
707  * * ``-EOPNOTSUPP`` : means device driver does not implement kfunc
708  * * ``-ENODATA``    : means no RX-timestamp available for this frame
709  */
710 __bpf_kfunc int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
711 {
712 	return -EOPNOTSUPP;
713 }
714 
715 /**
716  * bpf_xdp_metadata_rx_hash - Read XDP frame RX hash.
717  * @ctx: XDP context pointer.
718  * @hash: Return value pointer.
719  * @rss_type: Return value pointer for RSS type.
720  *
721  * The RSS hash type (@rss_type) specifies what portion of packet headers NIC
722  * hardware used when calculating RSS hash value.  The RSS type can be decoded
723  * via &enum xdp_rss_hash_type either matching on individual L3/L4 bits
724  * ``XDP_RSS_L*`` or by combined traditional *RSS Hashing Types*
725  * ``XDP_RSS_TYPE_L*``.
726  *
727  * Return:
728  * * Returns 0 on success or ``-errno`` on error.
729  * * ``-EOPNOTSUPP`` : means device driver doesn't implement kfunc
730  * * ``-ENODATA``    : means no RX-hash available for this frame
731  */
732 __bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash,
733 					 enum xdp_rss_hash_type *rss_type)
734 {
735 	return -EOPNOTSUPP;
736 }
737 
738 /**
739  * bpf_xdp_metadata_rx_vlan_tag - Get XDP packet outermost VLAN tag
740  * @ctx: XDP context pointer.
741  * @vlan_proto: Destination pointer for VLAN Tag protocol identifier (TPID).
742  * @vlan_tci: Destination pointer for VLAN TCI (VID + DEI + PCP)
743  *
744  * In case of success, ``vlan_proto`` contains *Tag protocol identifier (TPID)*,
745  * usually ``ETH_P_8021Q`` or ``ETH_P_8021AD``, but some networks can use
746  * custom TPIDs. ``vlan_proto`` is stored in **network byte order (BE)**
747  * and should be used as follows:
748  * ``if (vlan_proto == bpf_htons(ETH_P_8021Q)) do_something();``
749  *
750  * ``vlan_tci`` contains the remaining 16 bits of a VLAN tag.
751  * Driver is expected to provide those in **host byte order (usually LE)**,
752  * so the bpf program should not perform byte conversion.
753  * According to 802.1Q standard, *VLAN TCI (Tag control information)*
754  * is a bit field that contains:
755  * *VLAN identifier (VID)* that can be read with ``vlan_tci & 0xfff``,
756  * *Drop eligible indicator (DEI)* - 1 bit,
757  * *Priority code point (PCP)* - 3 bits.
758  * For detailed meaning of DEI and PCP, please refer to other sources.
759  *
760  * Return:
761  * * Returns 0 on success or ``-errno`` on error.
762  * * ``-EOPNOTSUPP`` : device driver doesn't implement kfunc
763  * * ``-ENODATA``    : VLAN tag was not stripped or is not available
764  */
765 __bpf_kfunc int bpf_xdp_metadata_rx_vlan_tag(const struct xdp_md *ctx,
766 					     __be16 *vlan_proto, u16 *vlan_tci)
767 {
768 	return -EOPNOTSUPP;
769 }
770 
771 __bpf_kfunc_end_defs();
772 
773 BTF_KFUNCS_START(xdp_metadata_kfunc_ids)
774 #define XDP_METADATA_KFUNC(_, __, name, ___) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS)
775 XDP_METADATA_KFUNC_xxx
776 #undef XDP_METADATA_KFUNC
777 BTF_KFUNCS_END(xdp_metadata_kfunc_ids)
778 
779 static const struct btf_kfunc_id_set xdp_metadata_kfunc_set = {
780 	.owner = THIS_MODULE,
781 	.set   = &xdp_metadata_kfunc_ids,
782 };
783 
784 BTF_ID_LIST(xdp_metadata_kfunc_ids_unsorted)
785 #define XDP_METADATA_KFUNC(name, _, str, __) BTF_ID(func, str)
786 XDP_METADATA_KFUNC_xxx
787 #undef XDP_METADATA_KFUNC
788 
789 u32 bpf_xdp_metadata_kfunc_id(int id)
790 {
791 	/* xdp_metadata_kfunc_ids is sorted and can't be used */
792 	return xdp_metadata_kfunc_ids_unsorted[id];
793 }
794 
795 bool bpf_dev_bound_kfunc_id(u32 btf_id)
796 {
797 	return btf_id_set8_contains(&xdp_metadata_kfunc_ids, btf_id);
798 }
799 
800 static int __init xdp_metadata_init(void)
801 {
802 	return register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &xdp_metadata_kfunc_set);
803 }
804 late_initcall(xdp_metadata_init);
805 
806 void xdp_set_features_flag(struct net_device *dev, xdp_features_t val)
807 {
808 	val &= NETDEV_XDP_ACT_MASK;
809 	if (dev->xdp_features == val)
810 		return;
811 
812 	dev->xdp_features = val;
813 
814 	if (dev->reg_state == NETREG_REGISTERED)
815 		call_netdevice_notifiers(NETDEV_XDP_FEAT_CHANGE, dev);
816 }
817 EXPORT_SYMBOL_GPL(xdp_set_features_flag);
818 
819 void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg)
820 {
821 	xdp_features_t val = (dev->xdp_features | NETDEV_XDP_ACT_NDO_XMIT);
822 
823 	if (support_sg)
824 		val |= NETDEV_XDP_ACT_NDO_XMIT_SG;
825 	xdp_set_features_flag(dev, val);
826 }
827 EXPORT_SYMBOL_GPL(xdp_features_set_redirect_target);
828 
829 void xdp_features_clear_redirect_target(struct net_device *dev)
830 {
831 	xdp_features_t val = dev->xdp_features;
832 
833 	val &= ~(NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_NDO_XMIT_SG);
834 	xdp_set_features_flag(dev, val);
835 }
836 EXPORT_SYMBOL_GPL(xdp_features_clear_redirect_target);
837