xref: /linux/kernel/bpf/devmap.c (revision a0aab7d7c8605f53ea77dfaafec1dcc0bfb1f232)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3  */
4 
5 /* Devmaps primary use is as a backend map for XDP BPF helper call
6  * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7  * spent some effort to ensure the datapath with redirect maps does not use
8  * any locking. This is a quick note on the details.
9  *
10  * We have three possible paths to get into the devmap control plane bpf
11  * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12  * will invoke an update, delete, or lookup operation. To ensure updates and
13  * deletes appear atomic from the datapath side xchg() is used to modify the
14  * netdev_map array. Then because the datapath does a lookup into the netdev_map
15  * array (read-only) from an RCU critical section we use call_rcu() to wait for
16  * an rcu grace period before free'ing the old data structures. This ensures the
17  * datapath always has a valid copy. However, the datapath does a "flush"
18  * operation that pushes any pending packets in the driver outside the RCU
19  * critical section. Each bpf_dtab_netdev tracks these pending operations using
20  * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed  until
21  * this list is empty, indicating outstanding flush operations have completed.
22  *
23  * BPF syscalls may race with BPF program calls on any of the update, delete
24  * or lookup operations. As noted above the xchg() operation also keep the
25  * netdev_map consistent in this case. From the devmap side BPF programs
26  * calling into these operations are the same as multiple user space threads
27  * making system calls.
28  *
29  * Finally, any of the above may race with a netdev_unregister notifier. The
30  * unregister notifier must search for net devices in the map structure that
31  * contain a reference to the net device and remove them. This is a two step
32  * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
33  * check to see if the ifindex is the same as the net_device being removed.
34  * When removing the dev a cmpxchg() is used to ensure the correct dev is
35  * removed, in the case of a concurrent update or delete operation it is
36  * possible that the initially referenced dev is no longer in the map. As the
37  * notifier hook walks the map we know that new dev references can not be
38  * added by the user because core infrastructure ensures dev_get_by_index()
39  * calls will fail at this point.
40  *
41  * The devmap_hash type is a map type which interprets keys as ifindexes and
42  * indexes these using a hashmap. This allows maps that use ifindex as key to be
43  * densely packed instead of having holes in the lookup array for unused
44  * ifindexes. The setup and packet enqueue/send code is shared between the two
45  * types of devmap; only the lookup and insertion is different.
46  */
47 #include <linux/bpf.h>
48 #include <net/xdp.h>
49 #include <linux/filter.h>
50 #include <trace/events/xdp.h>
51 #include <linux/btf_ids.h>
52 
53 #define DEV_CREATE_FLAG_MASK \
54 	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
55 
56 struct xdp_dev_bulk_queue {
57 	struct xdp_frame *q[DEV_MAP_BULK_SIZE];
58 	struct list_head flush_node;
59 	struct net_device *dev;
60 	struct net_device *dev_rx;
61 	struct bpf_prog *xdp_prog;
62 	unsigned int count;
63 };
64 
65 struct bpf_dtab_netdev {
66 	struct net_device *dev; /* must be first member, due to tracepoint */
67 	struct hlist_node index_hlist;
68 	struct bpf_prog *xdp_prog;
69 	struct rcu_head rcu;
70 	unsigned int idx;
71 	struct bpf_devmap_val val;
72 };
73 
74 struct bpf_dtab {
75 	struct bpf_map map;
76 	struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */
77 	struct list_head list;
78 
79 	/* these are only used for DEVMAP_HASH type maps */
80 	struct hlist_head *dev_index_head;
81 	spinlock_t index_lock;
82 	unsigned int items;
83 	u32 n_buckets;
84 };
85 
86 static DEFINE_SPINLOCK(dev_map_lock);
87 static LIST_HEAD(dev_map_list);
88 
89 static struct hlist_head *dev_map_create_hash(unsigned int entries,
90 					      int numa_node)
91 {
92 	int i;
93 	struct hlist_head *hash;
94 
95 	hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
96 	if (hash != NULL)
97 		for (i = 0; i < entries; i++)
98 			INIT_HLIST_HEAD(&hash[i]);
99 
100 	return hash;
101 }
102 
103 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
104 						    int idx)
105 {
106 	return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
107 }
108 
109 static int dev_map_alloc_check(union bpf_attr *attr)
110 {
111 	u32 valsize = attr->value_size;
112 
113 	/* check sanity of attributes. 2 value sizes supported:
114 	 * 4 bytes: ifindex
115 	 * 8 bytes: ifindex + prog fd
116 	 */
117 	if (attr->max_entries == 0 || attr->key_size != 4 ||
118 	    (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
119 	     valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
120 	    attr->map_flags & ~DEV_CREATE_FLAG_MASK)
121 		return -EINVAL;
122 
123 	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
124 		/* Hash table size must be power of 2; roundup_pow_of_two()
125 		 * can overflow into UB on 32-bit arches
126 		 */
127 		if (attr->max_entries > 1UL << 31)
128 			return -EINVAL;
129 	}
130 
131 	return 0;
132 }
133 
134 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
135 {
136 	/* Lookup returns a pointer straight to dev->ifindex, so make sure the
137 	 * verifier prevents writes from the BPF side
138 	 */
139 	attr->map_flags |= BPF_F_RDONLY_PROG;
140 	bpf_map_init_from_attr(&dtab->map, attr);
141 
142 	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
143 		/* Hash table size must be power of 2 */
144 		dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
145 		dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
146 							   dtab->map.numa_node);
147 		if (!dtab->dev_index_head)
148 			return -ENOMEM;
149 
150 		spin_lock_init(&dtab->index_lock);
151 	} else {
152 		dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
153 						      sizeof(struct bpf_dtab_netdev *),
154 						      dtab->map.numa_node);
155 		if (!dtab->netdev_map)
156 			return -ENOMEM;
157 	}
158 
159 	return 0;
160 }
161 
162 static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
163 {
164 	struct bpf_dtab *dtab;
165 	int err;
166 
167 	dtab = bpf_map_area_alloc(sizeof(*dtab), NUMA_NO_NODE);
168 	if (!dtab)
169 		return ERR_PTR(-ENOMEM);
170 
171 	err = dev_map_init_map(dtab, attr);
172 	if (err) {
173 		bpf_map_area_free(dtab);
174 		return ERR_PTR(err);
175 	}
176 
177 	spin_lock(&dev_map_lock);
178 	list_add_tail_rcu(&dtab->list, &dev_map_list);
179 	spin_unlock(&dev_map_lock);
180 
181 	return &dtab->map;
182 }
183 
184 static void dev_map_free(struct bpf_map *map)
185 {
186 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
187 	int i;
188 
189 	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
190 	 * so the programs (can be more than one that used this map) were
191 	 * disconnected from events. The following synchronize_rcu() guarantees
192 	 * both rcu read critical sections complete and waits for
193 	 * preempt-disable regions (NAPI being the relevant context here) so we
194 	 * are certain there will be no further reads against the netdev_map and
195 	 * all flush operations are complete. Flush operations can only be done
196 	 * from NAPI context for this reason.
197 	 */
198 
199 	spin_lock(&dev_map_lock);
200 	list_del_rcu(&dtab->list);
201 	spin_unlock(&dev_map_lock);
202 
203 	/* bpf_redirect_info->map is assigned in __bpf_xdp_redirect_map()
204 	 * during NAPI callback and cleared after the XDP redirect. There is no
205 	 * explicit RCU read section which protects bpf_redirect_info->map but
206 	 * local_bh_disable() also marks the beginning an RCU section. This
207 	 * makes the complete softirq callback RCU protected. Thus after
208 	 * following synchronize_rcu() there no bpf_redirect_info->map == map
209 	 * assignment.
210 	 */
211 	synchronize_rcu();
212 
213 	/* Make sure prior __dev_map_entry_free() have completed. */
214 	rcu_barrier();
215 
216 	if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
217 		for (i = 0; i < dtab->n_buckets; i++) {
218 			struct bpf_dtab_netdev *dev;
219 			struct hlist_head *head;
220 			struct hlist_node *next;
221 
222 			head = dev_map_index_hash(dtab, i);
223 
224 			hlist_for_each_entry_safe(dev, next, head, index_hlist) {
225 				hlist_del_rcu(&dev->index_hlist);
226 				if (dev->xdp_prog)
227 					bpf_prog_put(dev->xdp_prog);
228 				dev_put(dev->dev);
229 				kfree(dev);
230 			}
231 		}
232 
233 		bpf_map_area_free(dtab->dev_index_head);
234 	} else {
235 		for (i = 0; i < dtab->map.max_entries; i++) {
236 			struct bpf_dtab_netdev *dev;
237 
238 			dev = rcu_dereference_raw(dtab->netdev_map[i]);
239 			if (!dev)
240 				continue;
241 
242 			if (dev->xdp_prog)
243 				bpf_prog_put(dev->xdp_prog);
244 			dev_put(dev->dev);
245 			kfree(dev);
246 		}
247 
248 		bpf_map_area_free(dtab->netdev_map);
249 	}
250 
251 	bpf_map_area_free(dtab);
252 }
253 
254 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
255 {
256 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
257 	u32 index = key ? *(u32 *)key : U32_MAX;
258 	u32 *next = next_key;
259 
260 	if (index >= dtab->map.max_entries) {
261 		*next = 0;
262 		return 0;
263 	}
264 
265 	if (index == dtab->map.max_entries - 1)
266 		return -ENOENT;
267 	*next = index + 1;
268 	return 0;
269 }
270 
271 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
272  * by local_bh_disable() (from XDP calls inside NAPI). The
273  * rcu_read_lock_bh_held() below makes lockdep accept both.
274  */
275 static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
276 {
277 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
278 	struct hlist_head *head = dev_map_index_hash(dtab, key);
279 	struct bpf_dtab_netdev *dev;
280 
281 	hlist_for_each_entry_rcu(dev, head, index_hlist,
282 				 lockdep_is_held(&dtab->index_lock))
283 		if (dev->idx == key)
284 			return dev;
285 
286 	return NULL;
287 }
288 
289 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
290 				    void *next_key)
291 {
292 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
293 	u32 idx, *next = next_key;
294 	struct bpf_dtab_netdev *dev, *next_dev;
295 	struct hlist_head *head;
296 	int i = 0;
297 
298 	if (!key)
299 		goto find_first;
300 
301 	idx = *(u32 *)key;
302 
303 	dev = __dev_map_hash_lookup_elem(map, idx);
304 	if (!dev)
305 		goto find_first;
306 
307 	next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
308 				    struct bpf_dtab_netdev, index_hlist);
309 
310 	if (next_dev) {
311 		*next = next_dev->idx;
312 		return 0;
313 	}
314 
315 	i = idx & (dtab->n_buckets - 1);
316 	i++;
317 
318  find_first:
319 	for (; i < dtab->n_buckets; i++) {
320 		head = dev_map_index_hash(dtab, i);
321 
322 		next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
323 					    struct bpf_dtab_netdev,
324 					    index_hlist);
325 		if (next_dev) {
326 			*next = next_dev->idx;
327 			return 0;
328 		}
329 	}
330 
331 	return -ENOENT;
332 }
333 
334 static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
335 				struct xdp_frame **frames, int n,
336 				struct net_device *dev)
337 {
338 	struct xdp_txq_info txq = { .dev = dev };
339 	struct xdp_buff xdp;
340 	int i, nframes = 0;
341 
342 	for (i = 0; i < n; i++) {
343 		struct xdp_frame *xdpf = frames[i];
344 		u32 act;
345 		int err;
346 
347 		xdp_convert_frame_to_buff(xdpf, &xdp);
348 		xdp.txq = &txq;
349 
350 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
351 		switch (act) {
352 		case XDP_PASS:
353 			err = xdp_update_frame_from_buff(&xdp, xdpf);
354 			if (unlikely(err < 0))
355 				xdp_return_frame_rx_napi(xdpf);
356 			else
357 				frames[nframes++] = xdpf;
358 			break;
359 		default:
360 			bpf_warn_invalid_xdp_action(NULL, xdp_prog, act);
361 			fallthrough;
362 		case XDP_ABORTED:
363 			trace_xdp_exception(dev, xdp_prog, act);
364 			fallthrough;
365 		case XDP_DROP:
366 			xdp_return_frame_rx_napi(xdpf);
367 			break;
368 		}
369 	}
370 	return nframes; /* sent frames count */
371 }
372 
373 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
374 {
375 	struct net_device *dev = bq->dev;
376 	unsigned int cnt = bq->count;
377 	int sent = 0, err = 0;
378 	int to_send = cnt;
379 	int i;
380 
381 	if (unlikely(!cnt))
382 		return;
383 
384 	for (i = 0; i < cnt; i++) {
385 		struct xdp_frame *xdpf = bq->q[i];
386 
387 		prefetch(xdpf);
388 	}
389 
390 	if (bq->xdp_prog) {
391 		to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev);
392 		if (!to_send)
393 			goto out;
394 	}
395 
396 	sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
397 	if (sent < 0) {
398 		/* If ndo_xdp_xmit fails with an errno, no frames have
399 		 * been xmit'ed.
400 		 */
401 		err = sent;
402 		sent = 0;
403 	}
404 
405 	/* If not all frames have been transmitted, it is our
406 	 * responsibility to free them
407 	 */
408 	for (i = sent; unlikely(i < to_send); i++)
409 		xdp_return_frame_rx_napi(bq->q[i]);
410 
411 out:
412 	bq->count = 0;
413 	trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err);
414 }
415 
416 /* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the
417  * driver before returning from its napi->poll() routine. See the comment above
418  * xdp_do_flush() in filter.c.
419  */
420 void __dev_flush(struct list_head *flush_list)
421 {
422 	struct xdp_dev_bulk_queue *bq, *tmp;
423 
424 	list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
425 		bq_xmit_all(bq, XDP_XMIT_FLUSH);
426 		bq->dev_rx = NULL;
427 		bq->xdp_prog = NULL;
428 		__list_del_clearprev(&bq->flush_node);
429 	}
430 }
431 
432 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
433  * by local_bh_disable() (from XDP calls inside NAPI). The
434  * rcu_read_lock_bh_held() below makes lockdep accept both.
435  */
436 static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
437 {
438 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
439 	struct bpf_dtab_netdev *obj;
440 
441 	if (key >= map->max_entries)
442 		return NULL;
443 
444 	obj = rcu_dereference_check(dtab->netdev_map[key],
445 				    rcu_read_lock_bh_held());
446 	return obj;
447 }
448 
449 /* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
450  * variable access, and map elements stick around. See comment above
451  * xdp_do_flush() in filter.c.
452  */
453 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
454 		       struct net_device *dev_rx, struct bpf_prog *xdp_prog)
455 {
456 	struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
457 
458 	if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
459 		bq_xmit_all(bq, 0);
460 
461 	/* Ingress dev_rx will be the same for all xdp_frame's in
462 	 * bulk_queue, because bq stored per-CPU and must be flushed
463 	 * from net_device drivers NAPI func end.
464 	 *
465 	 * Do the same with xdp_prog and flush_list since these fields
466 	 * are only ever modified together.
467 	 */
468 	if (!bq->dev_rx) {
469 		struct list_head *flush_list = bpf_net_ctx_get_dev_flush_list();
470 
471 		bq->dev_rx = dev_rx;
472 		bq->xdp_prog = xdp_prog;
473 		list_add(&bq->flush_node, flush_list);
474 	}
475 
476 	bq->q[bq->count++] = xdpf;
477 }
478 
479 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
480 				struct net_device *dev_rx,
481 				struct bpf_prog *xdp_prog)
482 {
483 	int err;
484 
485 	if (!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
486 		return -EOPNOTSUPP;
487 
488 	if (unlikely(!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
489 		     xdp_frame_has_frags(xdpf)))
490 		return -EOPNOTSUPP;
491 
492 	err = xdp_ok_fwd_dev(dev, xdp_get_frame_len(xdpf));
493 	if (unlikely(err))
494 		return err;
495 
496 	bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
497 	return 0;
498 }
499 
500 static u32 dev_map_bpf_prog_run_skb(struct sk_buff *skb, struct bpf_dtab_netdev *dst)
501 {
502 	struct xdp_txq_info txq = { .dev = dst->dev };
503 	struct xdp_buff xdp;
504 	u32 act;
505 
506 	if (!dst->xdp_prog)
507 		return XDP_PASS;
508 
509 	__skb_pull(skb, skb->mac_len);
510 	xdp.txq = &txq;
511 
512 	act = bpf_prog_run_generic_xdp(skb, &xdp, dst->xdp_prog);
513 	switch (act) {
514 	case XDP_PASS:
515 		__skb_push(skb, skb->mac_len);
516 		break;
517 	default:
518 		bpf_warn_invalid_xdp_action(NULL, dst->xdp_prog, act);
519 		fallthrough;
520 	case XDP_ABORTED:
521 		trace_xdp_exception(dst->dev, dst->xdp_prog, act);
522 		fallthrough;
523 	case XDP_DROP:
524 		kfree_skb(skb);
525 		break;
526 	}
527 
528 	return act;
529 }
530 
531 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
532 		    struct net_device *dev_rx)
533 {
534 	return __xdp_enqueue(dev, xdpf, dev_rx, NULL);
535 }
536 
537 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
538 		    struct net_device *dev_rx)
539 {
540 	struct net_device *dev = dst->dev;
541 
542 	return __xdp_enqueue(dev, xdpf, dev_rx, dst->xdp_prog);
543 }
544 
545 static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
546 {
547 	if (!obj)
548 		return false;
549 
550 	if (!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
551 		return false;
552 
553 	if (unlikely(!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
554 		     xdp_frame_has_frags(xdpf)))
555 		return false;
556 
557 	if (xdp_ok_fwd_dev(obj->dev, xdp_get_frame_len(xdpf)))
558 		return false;
559 
560 	return true;
561 }
562 
563 static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
564 				 struct net_device *dev_rx,
565 				 struct xdp_frame *xdpf)
566 {
567 	struct xdp_frame *nxdpf;
568 
569 	nxdpf = xdpf_clone(xdpf);
570 	if (!nxdpf)
571 		return -ENOMEM;
572 
573 	bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog);
574 
575 	return 0;
576 }
577 
578 static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifindex)
579 {
580 	while (num_excluded--) {
581 		if (ifindex == excluded[num_excluded])
582 			return true;
583 	}
584 	return false;
585 }
586 
587 /* Get ifindex of each upper device. 'indexes' must be able to hold at
588  * least MAX_NEST_DEV elements.
589  * Returns the number of ifindexes added.
590  */
591 static int get_upper_ifindexes(struct net_device *dev, int *indexes)
592 {
593 	struct net_device *upper;
594 	struct list_head *iter;
595 	int n = 0;
596 
597 	netdev_for_each_upper_dev_rcu(dev, upper, iter) {
598 		indexes[n++] = upper->ifindex;
599 	}
600 	return n;
601 }
602 
603 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
604 			  struct bpf_map *map, bool exclude_ingress)
605 {
606 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
607 	struct bpf_dtab_netdev *dst, *last_dst = NULL;
608 	int excluded_devices[1+MAX_NEST_DEV];
609 	struct hlist_head *head;
610 	int num_excluded = 0;
611 	unsigned int i;
612 	int err;
613 
614 	if (exclude_ingress) {
615 		num_excluded = get_upper_ifindexes(dev_rx, excluded_devices);
616 		excluded_devices[num_excluded++] = dev_rx->ifindex;
617 	}
618 
619 	if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
620 		for (i = 0; i < map->max_entries; i++) {
621 			dst = rcu_dereference_check(dtab->netdev_map[i],
622 						    rcu_read_lock_bh_held());
623 			if (!is_valid_dst(dst, xdpf))
624 				continue;
625 
626 			if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
627 				continue;
628 
629 			/* we only need n-1 clones; last_dst enqueued below */
630 			if (!last_dst) {
631 				last_dst = dst;
632 				continue;
633 			}
634 
635 			err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
636 			if (err)
637 				return err;
638 
639 			last_dst = dst;
640 		}
641 	} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
642 		for (i = 0; i < dtab->n_buckets; i++) {
643 			head = dev_map_index_hash(dtab, i);
644 			hlist_for_each_entry_rcu(dst, head, index_hlist,
645 						 lockdep_is_held(&dtab->index_lock)) {
646 				if (!is_valid_dst(dst, xdpf))
647 					continue;
648 
649 				if (is_ifindex_excluded(excluded_devices, num_excluded,
650 							dst->dev->ifindex))
651 					continue;
652 
653 				/* we only need n-1 clones; last_dst enqueued below */
654 				if (!last_dst) {
655 					last_dst = dst;
656 					continue;
657 				}
658 
659 				err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
660 				if (err)
661 					return err;
662 
663 				last_dst = dst;
664 			}
665 		}
666 	}
667 
668 	/* consume the last copy of the frame */
669 	if (last_dst)
670 		bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog);
671 	else
672 		xdp_return_frame_rx_napi(xdpf); /* dtab is empty */
673 
674 	return 0;
675 }
676 
677 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
678 			     struct bpf_prog *xdp_prog)
679 {
680 	int err;
681 
682 	err = xdp_ok_fwd_dev(dst->dev, skb->len);
683 	if (unlikely(err))
684 		return err;
685 
686 	/* Redirect has already succeeded semantically at this point, so we just
687 	 * return 0 even if packet is dropped. Helper below takes care of
688 	 * freeing skb.
689 	 */
690 	if (dev_map_bpf_prog_run_skb(skb, dst) != XDP_PASS)
691 		return 0;
692 
693 	skb->dev = dst->dev;
694 	generic_xdp_tx(skb, xdp_prog);
695 
696 	return 0;
697 }
698 
699 static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
700 				  struct sk_buff *skb,
701 				  struct bpf_prog *xdp_prog)
702 {
703 	struct sk_buff *nskb;
704 	int err;
705 
706 	nskb = skb_clone(skb, GFP_ATOMIC);
707 	if (!nskb)
708 		return -ENOMEM;
709 
710 	err = dev_map_generic_redirect(dst, nskb, xdp_prog);
711 	if (unlikely(err)) {
712 		consume_skb(nskb);
713 		return err;
714 	}
715 
716 	return 0;
717 }
718 
719 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
720 			   struct bpf_prog *xdp_prog, struct bpf_map *map,
721 			   bool exclude_ingress)
722 {
723 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
724 	struct bpf_dtab_netdev *dst, *last_dst = NULL;
725 	int excluded_devices[1+MAX_NEST_DEV];
726 	struct hlist_head *head;
727 	struct hlist_node *next;
728 	int num_excluded = 0;
729 	unsigned int i;
730 	int err;
731 
732 	if (exclude_ingress) {
733 		num_excluded = get_upper_ifindexes(dev, excluded_devices);
734 		excluded_devices[num_excluded++] = dev->ifindex;
735 	}
736 
737 	if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
738 		for (i = 0; i < map->max_entries; i++) {
739 			dst = rcu_dereference_check(dtab->netdev_map[i],
740 						    rcu_read_lock_bh_held());
741 			if (!dst)
742 				continue;
743 
744 			if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
745 				continue;
746 
747 			/* we only need n-1 clones; last_dst enqueued below */
748 			if (!last_dst) {
749 				last_dst = dst;
750 				continue;
751 			}
752 
753 			err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
754 			if (err)
755 				return err;
756 
757 			last_dst = dst;
758 
759 		}
760 	} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
761 		for (i = 0; i < dtab->n_buckets; i++) {
762 			head = dev_map_index_hash(dtab, i);
763 			hlist_for_each_entry_safe(dst, next, head, index_hlist) {
764 				if (is_ifindex_excluded(excluded_devices, num_excluded,
765 							dst->dev->ifindex))
766 					continue;
767 
768 				/* we only need n-1 clones; last_dst enqueued below */
769 				if (!last_dst) {
770 					last_dst = dst;
771 					continue;
772 				}
773 
774 				err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
775 				if (err)
776 					return err;
777 
778 				last_dst = dst;
779 			}
780 		}
781 	}
782 
783 	/* consume the first skb and return */
784 	if (last_dst)
785 		return dev_map_generic_redirect(last_dst, skb, xdp_prog);
786 
787 	/* dtab is empty */
788 	consume_skb(skb);
789 	return 0;
790 }
791 
792 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
793 {
794 	struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
795 
796 	return obj ? &obj->val : NULL;
797 }
798 
799 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
800 {
801 	struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
802 								*(u32 *)key);
803 	return obj ? &obj->val : NULL;
804 }
805 
806 static void __dev_map_entry_free(struct rcu_head *rcu)
807 {
808 	struct bpf_dtab_netdev *dev;
809 
810 	dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
811 	if (dev->xdp_prog)
812 		bpf_prog_put(dev->xdp_prog);
813 	dev_put(dev->dev);
814 	kfree(dev);
815 }
816 
817 static long dev_map_delete_elem(struct bpf_map *map, void *key)
818 {
819 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
820 	struct bpf_dtab_netdev *old_dev;
821 	int k = *(u32 *)key;
822 
823 	if (k >= map->max_entries)
824 		return -EINVAL;
825 
826 	old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL));
827 	if (old_dev) {
828 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
829 		atomic_dec((atomic_t *)&dtab->items);
830 	}
831 	return 0;
832 }
833 
834 static long dev_map_hash_delete_elem(struct bpf_map *map, void *key)
835 {
836 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
837 	struct bpf_dtab_netdev *old_dev;
838 	int k = *(u32 *)key;
839 	unsigned long flags;
840 	int ret = -ENOENT;
841 
842 	spin_lock_irqsave(&dtab->index_lock, flags);
843 
844 	old_dev = __dev_map_hash_lookup_elem(map, k);
845 	if (old_dev) {
846 		dtab->items--;
847 		hlist_del_init_rcu(&old_dev->index_hlist);
848 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
849 		ret = 0;
850 	}
851 	spin_unlock_irqrestore(&dtab->index_lock, flags);
852 
853 	return ret;
854 }
855 
856 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
857 						    struct bpf_dtab *dtab,
858 						    struct bpf_devmap_val *val,
859 						    unsigned int idx)
860 {
861 	struct bpf_prog *prog = NULL;
862 	struct bpf_dtab_netdev *dev;
863 
864 	dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
865 				   GFP_NOWAIT | __GFP_NOWARN,
866 				   dtab->map.numa_node);
867 	if (!dev)
868 		return ERR_PTR(-ENOMEM);
869 
870 	dev->dev = dev_get_by_index(net, val->ifindex);
871 	if (!dev->dev)
872 		goto err_out;
873 
874 	if (val->bpf_prog.fd > 0) {
875 		prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
876 					     BPF_PROG_TYPE_XDP, false);
877 		if (IS_ERR(prog))
878 			goto err_put_dev;
879 		if (prog->expected_attach_type != BPF_XDP_DEVMAP ||
880 		    !bpf_prog_map_compatible(&dtab->map, prog))
881 			goto err_put_prog;
882 	}
883 
884 	dev->idx = idx;
885 	if (prog) {
886 		dev->xdp_prog = prog;
887 		dev->val.bpf_prog.id = prog->aux->id;
888 	} else {
889 		dev->xdp_prog = NULL;
890 		dev->val.bpf_prog.id = 0;
891 	}
892 	dev->val.ifindex = val->ifindex;
893 
894 	return dev;
895 err_put_prog:
896 	bpf_prog_put(prog);
897 err_put_dev:
898 	dev_put(dev->dev);
899 err_out:
900 	kfree(dev);
901 	return ERR_PTR(-EINVAL);
902 }
903 
904 static long __dev_map_update_elem(struct net *net, struct bpf_map *map,
905 				  void *key, void *value, u64 map_flags)
906 {
907 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
908 	struct bpf_dtab_netdev *dev, *old_dev;
909 	struct bpf_devmap_val val = {};
910 	u32 i = *(u32 *)key;
911 
912 	if (unlikely(map_flags > BPF_EXIST))
913 		return -EINVAL;
914 	if (unlikely(i >= dtab->map.max_entries))
915 		return -E2BIG;
916 	if (unlikely(map_flags == BPF_NOEXIST))
917 		return -EEXIST;
918 
919 	/* already verified value_size <= sizeof val */
920 	memcpy(&val, value, map->value_size);
921 
922 	if (!val.ifindex) {
923 		dev = NULL;
924 		/* can not specify fd if ifindex is 0 */
925 		if (val.bpf_prog.fd > 0)
926 			return -EINVAL;
927 	} else {
928 		dev = __dev_map_alloc_node(net, dtab, &val, i);
929 		if (IS_ERR(dev))
930 			return PTR_ERR(dev);
931 	}
932 
933 	/* Use call_rcu() here to ensure rcu critical sections have completed
934 	 * Remembering the driver side flush operation will happen before the
935 	 * net device is removed.
936 	 */
937 	old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
938 	if (old_dev)
939 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
940 	else
941 		atomic_inc((atomic_t *)&dtab->items);
942 
943 	return 0;
944 }
945 
946 static long dev_map_update_elem(struct bpf_map *map, void *key, void *value,
947 				u64 map_flags)
948 {
949 	return __dev_map_update_elem(current->nsproxy->net_ns,
950 				     map, key, value, map_flags);
951 }
952 
953 static long __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
954 				       void *key, void *value, u64 map_flags)
955 {
956 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
957 	struct bpf_dtab_netdev *dev, *old_dev;
958 	struct bpf_devmap_val val = {};
959 	u32 idx = *(u32 *)key;
960 	unsigned long flags;
961 	int err = -EEXIST;
962 
963 	/* already verified value_size <= sizeof val */
964 	memcpy(&val, value, map->value_size);
965 
966 	if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
967 		return -EINVAL;
968 
969 	spin_lock_irqsave(&dtab->index_lock, flags);
970 
971 	old_dev = __dev_map_hash_lookup_elem(map, idx);
972 	if (old_dev && (map_flags & BPF_NOEXIST))
973 		goto out_err;
974 
975 	dev = __dev_map_alloc_node(net, dtab, &val, idx);
976 	if (IS_ERR(dev)) {
977 		err = PTR_ERR(dev);
978 		goto out_err;
979 	}
980 
981 	if (old_dev) {
982 		hlist_del_rcu(&old_dev->index_hlist);
983 	} else {
984 		if (dtab->items >= dtab->map.max_entries) {
985 			spin_unlock_irqrestore(&dtab->index_lock, flags);
986 			call_rcu(&dev->rcu, __dev_map_entry_free);
987 			return -E2BIG;
988 		}
989 		dtab->items++;
990 	}
991 
992 	hlist_add_head_rcu(&dev->index_hlist,
993 			   dev_map_index_hash(dtab, idx));
994 	spin_unlock_irqrestore(&dtab->index_lock, flags);
995 
996 	if (old_dev)
997 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
998 
999 	return 0;
1000 
1001 out_err:
1002 	spin_unlock_irqrestore(&dtab->index_lock, flags);
1003 	return err;
1004 }
1005 
1006 static long dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
1007 				     u64 map_flags)
1008 {
1009 	return __dev_map_hash_update_elem(current->nsproxy->net_ns,
1010 					 map, key, value, map_flags);
1011 }
1012 
1013 static long dev_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1014 {
1015 	return __bpf_xdp_redirect_map(map, ifindex, flags,
1016 				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1017 				      __dev_map_lookup_elem);
1018 }
1019 
1020 static long dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1021 {
1022 	return __bpf_xdp_redirect_map(map, ifindex, flags,
1023 				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1024 				      __dev_map_hash_lookup_elem);
1025 }
1026 
1027 static u64 dev_map_mem_usage(const struct bpf_map *map)
1028 {
1029 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
1030 	u64 usage = sizeof(struct bpf_dtab);
1031 
1032 	if (map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)
1033 		usage += (u64)dtab->n_buckets * sizeof(struct hlist_head);
1034 	else
1035 		usage += (u64)map->max_entries * sizeof(struct bpf_dtab_netdev *);
1036 	usage += atomic_read((atomic_t *)&dtab->items) *
1037 			 (u64)sizeof(struct bpf_dtab_netdev);
1038 	return usage;
1039 }
1040 
1041 BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab)
1042 const struct bpf_map_ops dev_map_ops = {
1043 	.map_meta_equal = bpf_map_meta_equal,
1044 	.map_alloc_check = dev_map_alloc_check,
1045 	.map_alloc = dev_map_alloc,
1046 	.map_free = dev_map_free,
1047 	.map_get_next_key = dev_map_get_next_key,
1048 	.map_lookup_elem = dev_map_lookup_elem,
1049 	.map_update_elem = dev_map_update_elem,
1050 	.map_delete_elem = dev_map_delete_elem,
1051 	.map_check_btf = map_check_no_btf,
1052 	.map_mem_usage = dev_map_mem_usage,
1053 	.map_btf_id = &dev_map_btf_ids[0],
1054 	.map_redirect = dev_map_redirect,
1055 };
1056 
1057 const struct bpf_map_ops dev_map_hash_ops = {
1058 	.map_meta_equal = bpf_map_meta_equal,
1059 	.map_alloc_check = dev_map_alloc_check,
1060 	.map_alloc = dev_map_alloc,
1061 	.map_free = dev_map_free,
1062 	.map_get_next_key = dev_map_hash_get_next_key,
1063 	.map_lookup_elem = dev_map_hash_lookup_elem,
1064 	.map_update_elem = dev_map_hash_update_elem,
1065 	.map_delete_elem = dev_map_hash_delete_elem,
1066 	.map_check_btf = map_check_no_btf,
1067 	.map_mem_usage = dev_map_mem_usage,
1068 	.map_btf_id = &dev_map_btf_ids[0],
1069 	.map_redirect = dev_hash_map_redirect,
1070 };
1071 
1072 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
1073 				       struct net_device *netdev)
1074 {
1075 	unsigned long flags;
1076 	u32 i;
1077 
1078 	spin_lock_irqsave(&dtab->index_lock, flags);
1079 	for (i = 0; i < dtab->n_buckets; i++) {
1080 		struct bpf_dtab_netdev *dev;
1081 		struct hlist_head *head;
1082 		struct hlist_node *next;
1083 
1084 		head = dev_map_index_hash(dtab, i);
1085 
1086 		hlist_for_each_entry_safe(dev, next, head, index_hlist) {
1087 			if (netdev != dev->dev)
1088 				continue;
1089 
1090 			dtab->items--;
1091 			hlist_del_rcu(&dev->index_hlist);
1092 			call_rcu(&dev->rcu, __dev_map_entry_free);
1093 		}
1094 	}
1095 	spin_unlock_irqrestore(&dtab->index_lock, flags);
1096 }
1097 
1098 static int dev_map_notification(struct notifier_block *notifier,
1099 				ulong event, void *ptr)
1100 {
1101 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1102 	struct bpf_dtab *dtab;
1103 	int i, cpu;
1104 
1105 	switch (event) {
1106 	case NETDEV_REGISTER:
1107 		if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
1108 			break;
1109 
1110 		/* will be freed in free_netdev() */
1111 		netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
1112 		if (!netdev->xdp_bulkq)
1113 			return NOTIFY_BAD;
1114 
1115 		for_each_possible_cpu(cpu)
1116 			per_cpu_ptr(netdev->xdp_bulkq, cpu)->dev = netdev;
1117 		break;
1118 	case NETDEV_UNREGISTER:
1119 		/* This rcu_read_lock/unlock pair is needed because
1120 		 * dev_map_list is an RCU list AND to ensure a delete
1121 		 * operation does not free a netdev_map entry while we
1122 		 * are comparing it against the netdev being unregistered.
1123 		 */
1124 		rcu_read_lock();
1125 		list_for_each_entry_rcu(dtab, &dev_map_list, list) {
1126 			if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
1127 				dev_map_hash_remove_netdev(dtab, netdev);
1128 				continue;
1129 			}
1130 
1131 			for (i = 0; i < dtab->map.max_entries; i++) {
1132 				struct bpf_dtab_netdev *dev, *odev;
1133 
1134 				dev = rcu_dereference(dtab->netdev_map[i]);
1135 				if (!dev || netdev != dev->dev)
1136 					continue;
1137 				odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
1138 				if (dev == odev) {
1139 					call_rcu(&dev->rcu,
1140 						 __dev_map_entry_free);
1141 					atomic_dec((atomic_t *)&dtab->items);
1142 				}
1143 			}
1144 		}
1145 		rcu_read_unlock();
1146 		break;
1147 	default:
1148 		break;
1149 	}
1150 	return NOTIFY_OK;
1151 }
1152 
1153 static struct notifier_block dev_map_notifier = {
1154 	.notifier_call = dev_map_notification,
1155 };
1156 
1157 static int __init dev_map_init(void)
1158 {
1159 	/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
1160 	BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
1161 		     offsetof(struct _bpf_dtab_netdev, dev));
1162 	register_netdevice_notifier(&dev_map_notifier);
1163 
1164 	return 0;
1165 }
1166 
1167 subsys_initcall(dev_map_init);
1168