xref: /linux/kernel/bpf/devmap.c (revision eb71ab2bf72260054677e348498ba995a057c463)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
3  */
4 
5 /* Devmaps primary use is as a backend map for XDP BPF helper call
6  * bpf_redirect_map(). Because XDP is mostly concerned with performance we
7  * spent some effort to ensure the datapath with redirect maps does not use
8  * any locking. This is a quick note on the details.
9  *
10  * We have three possible paths to get into the devmap control plane bpf
11  * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
12  * will invoke an update, delete, or lookup operation. To ensure updates and
13  * deletes appear atomic from the datapath side xchg() is used to modify the
14  * netdev_map array. Then because the datapath does a lookup into the netdev_map
15  * array (read-only) from an RCU critical section we use call_rcu() to wait for
16  * an rcu grace period before free'ing the old data structures. This ensures the
17  * datapath always has a valid copy. However, the datapath does a "flush"
18  * operation that pushes any pending packets in the driver outside the RCU
19  * critical section. Each bpf_dtab_netdev tracks these pending operations using
20  * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed  until
21  * this list is empty, indicating outstanding flush operations have completed.
22  *
23  * BPF syscalls may race with BPF program calls on any of the update, delete
24  * or lookup operations. As noted above the xchg() operation also keep the
25  * netdev_map consistent in this case. From the devmap side BPF programs
26  * calling into these operations are the same as multiple user space threads
27  * making system calls.
28  *
29  * Finally, any of the above may race with a netdev_unregister notifier. The
30  * unregister notifier must search for net devices in the map structure that
31  * contain a reference to the net device and remove them. This is a two step
32  * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
33  * check to see if the ifindex is the same as the net_device being removed.
34  * When removing the dev a cmpxchg() is used to ensure the correct dev is
35  * removed, in the case of a concurrent update or delete operation it is
36  * possible that the initially referenced dev is no longer in the map. As the
37  * notifier hook walks the map we know that new dev references can not be
38  * added by the user because core infrastructure ensures dev_get_by_index()
39  * calls will fail at this point.
40  *
41  * The devmap_hash type is a map type which interprets keys as ifindexes and
42  * indexes these using a hashmap. This allows maps that use ifindex as key to be
43  * densely packed instead of having holes in the lookup array for unused
44  * ifindexes. The setup and packet enqueue/send code is shared between the two
45  * types of devmap; only the lookup and insertion is different.
46  */
47 #include <linux/bpf.h>
48 #include <linux/local_lock.h>
49 #include <net/xdp.h>
50 #include <linux/filter.h>
51 #include <trace/events/xdp.h>
52 #include <linux/btf_ids.h>
53 
54 #define DEV_CREATE_FLAG_MASK \
55 	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
56 
57 struct xdp_dev_bulk_queue {
58 	struct xdp_frame *q[DEV_MAP_BULK_SIZE];
59 	struct list_head flush_node;
60 	struct net_device *dev;
61 	struct net_device *dev_rx;
62 	struct bpf_prog *xdp_prog;
63 	unsigned int count;
64 	local_lock_t bq_lock;
65 };
66 
67 struct bpf_dtab_netdev {
68 	struct net_device *dev; /* must be first member, due to tracepoint */
69 	struct hlist_node index_hlist;
70 	struct bpf_prog *xdp_prog;
71 	struct rcu_head rcu;
72 	unsigned int idx;
73 	struct bpf_devmap_val val;
74 };
75 
76 struct bpf_dtab {
77 	struct bpf_map map;
78 	struct bpf_dtab_netdev __rcu **netdev_map; /* DEVMAP type only */
79 	struct list_head list;
80 
81 	/* these are only used for DEVMAP_HASH type maps */
82 	struct hlist_head *dev_index_head;
83 	spinlock_t index_lock;
84 	unsigned int items;
85 	u32 n_buckets;
86 };
87 
88 static DEFINE_SPINLOCK(dev_map_lock);
89 static LIST_HEAD(dev_map_list);
90 
dev_map_create_hash(unsigned int entries,int numa_node)91 static struct hlist_head *dev_map_create_hash(unsigned int entries,
92 					      int numa_node)
93 {
94 	int i;
95 	struct hlist_head *hash;
96 
97 	hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
98 	if (hash != NULL)
99 		for (i = 0; i < entries; i++)
100 			INIT_HLIST_HEAD(&hash[i]);
101 
102 	return hash;
103 }
104 
dev_map_index_hash(struct bpf_dtab * dtab,int idx)105 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,
106 						    int idx)
107 {
108 	return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)];
109 }
110 
dev_map_alloc_check(union bpf_attr * attr)111 static int dev_map_alloc_check(union bpf_attr *attr)
112 {
113 	u32 valsize = attr->value_size;
114 
115 	/* check sanity of attributes. 2 value sizes supported:
116 	 * 4 bytes: ifindex
117 	 * 8 bytes: ifindex + prog fd
118 	 */
119 	if (attr->max_entries == 0 || attr->key_size != 4 ||
120 	    (valsize != offsetofend(struct bpf_devmap_val, ifindex) &&
121 	     valsize != offsetofend(struct bpf_devmap_val, bpf_prog.fd)) ||
122 	    attr->map_flags & ~DEV_CREATE_FLAG_MASK)
123 		return -EINVAL;
124 
125 	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
126 		/* Hash table size must be power of 2; roundup_pow_of_two()
127 		 * can overflow into UB on 32-bit arches
128 		 */
129 		if (attr->max_entries > 1UL << 31)
130 			return -EINVAL;
131 	}
132 
133 	return 0;
134 }
135 
dev_map_init_map(struct bpf_dtab * dtab,union bpf_attr * attr)136 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
137 {
138 	/* Lookup returns a pointer straight to dev->ifindex, so make sure the
139 	 * verifier prevents writes from the BPF side
140 	 */
141 	attr->map_flags |= BPF_F_RDONLY_PROG;
142 	bpf_map_init_from_attr(&dtab->map, attr);
143 
144 	if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
145 		/* Hash table size must be power of 2 */
146 		dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries);
147 		dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
148 							   dtab->map.numa_node);
149 		if (!dtab->dev_index_head)
150 			return -ENOMEM;
151 
152 		spin_lock_init(&dtab->index_lock);
153 	} else {
154 		dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
155 						      sizeof(struct bpf_dtab_netdev *),
156 						      dtab->map.numa_node);
157 		if (!dtab->netdev_map)
158 			return -ENOMEM;
159 	}
160 
161 	return 0;
162 }
163 
dev_map_alloc(union bpf_attr * attr)164 static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
165 {
166 	struct bpf_dtab *dtab;
167 	int err;
168 
169 	dtab = bpf_map_area_alloc(sizeof(*dtab), NUMA_NO_NODE);
170 	if (!dtab)
171 		return ERR_PTR(-ENOMEM);
172 
173 	err = dev_map_init_map(dtab, attr);
174 	if (err) {
175 		bpf_map_area_free(dtab);
176 		return ERR_PTR(err);
177 	}
178 
179 	spin_lock(&dev_map_lock);
180 	list_add_tail_rcu(&dtab->list, &dev_map_list);
181 	spin_unlock(&dev_map_lock);
182 
183 	return &dtab->map;
184 }
185 
dev_map_free(struct bpf_map * map)186 static void dev_map_free(struct bpf_map *map)
187 {
188 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
189 	u32 i;
190 
191 	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
192 	 * so the programs (can be more than one that used this map) were
193 	 * disconnected from events. The following synchronize_rcu() guarantees
194 	 * both rcu read critical sections complete and waits for
195 	 * preempt-disable regions (NAPI being the relevant context here) so we
196 	 * are certain there will be no further reads against the netdev_map and
197 	 * all flush operations are complete. Flush operations can only be done
198 	 * from NAPI context for this reason.
199 	 */
200 
201 	spin_lock(&dev_map_lock);
202 	list_del_rcu(&dtab->list);
203 	spin_unlock(&dev_map_lock);
204 
205 	/* bpf_redirect_info->map is assigned in __bpf_xdp_redirect_map()
206 	 * during NAPI callback and cleared after the XDP redirect. There is no
207 	 * explicit RCU read section which protects bpf_redirect_info->map but
208 	 * local_bh_disable() also marks the beginning an RCU section. This
209 	 * makes the complete softirq callback RCU protected. Thus after
210 	 * following synchronize_rcu() there no bpf_redirect_info->map == map
211 	 * assignment.
212 	 */
213 	synchronize_rcu();
214 
215 	/* Make sure prior __dev_map_entry_free() have completed. */
216 	rcu_barrier();
217 
218 	if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
219 		for (i = 0; i < dtab->n_buckets; i++) {
220 			struct bpf_dtab_netdev *dev;
221 			struct hlist_head *head;
222 			struct hlist_node *next;
223 
224 			head = dev_map_index_hash(dtab, i);
225 
226 			hlist_for_each_entry_safe(dev, next, head, index_hlist) {
227 				hlist_del_rcu(&dev->index_hlist);
228 				if (dev->xdp_prog)
229 					bpf_prog_put(dev->xdp_prog);
230 				dev_put(dev->dev);
231 				kfree(dev);
232 			}
233 		}
234 
235 		bpf_map_area_free(dtab->dev_index_head);
236 	} else {
237 		for (i = 0; i < dtab->map.max_entries; i++) {
238 			struct bpf_dtab_netdev *dev;
239 
240 			dev = rcu_dereference_raw(dtab->netdev_map[i]);
241 			if (!dev)
242 				continue;
243 
244 			if (dev->xdp_prog)
245 				bpf_prog_put(dev->xdp_prog);
246 			dev_put(dev->dev);
247 			kfree(dev);
248 		}
249 
250 		bpf_map_area_free(dtab->netdev_map);
251 	}
252 
253 	bpf_map_area_free(dtab);
254 }
255 
dev_map_get_next_key(struct bpf_map * map,void * key,void * next_key)256 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
257 {
258 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
259 	u32 index = key ? *(u32 *)key : U32_MAX;
260 	u32 *next = next_key;
261 
262 	if (index >= dtab->map.max_entries) {
263 		*next = 0;
264 		return 0;
265 	}
266 
267 	if (index == dtab->map.max_entries - 1)
268 		return -ENOENT;
269 	*next = index + 1;
270 	return 0;
271 }
272 
273 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
274  * by local_bh_disable() (from XDP calls inside NAPI). The
275  * rcu_read_lock_bh_held() below makes lockdep accept both.
276  */
__dev_map_hash_lookup_elem(struct bpf_map * map,u32 key)277 static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
278 {
279 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
280 	struct hlist_head *head = dev_map_index_hash(dtab, key);
281 	struct bpf_dtab_netdev *dev;
282 
283 	hlist_for_each_entry_rcu(dev, head, index_hlist,
284 				 lockdep_is_held(&dtab->index_lock))
285 		if (dev->idx == key)
286 			return dev;
287 
288 	return NULL;
289 }
290 
dev_map_hash_get_next_key(struct bpf_map * map,void * key,void * next_key)291 static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
292 				    void *next_key)
293 {
294 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
295 	u32 idx, *next = next_key;
296 	struct bpf_dtab_netdev *dev, *next_dev;
297 	struct hlist_head *head;
298 	int i = 0;
299 
300 	if (!key)
301 		goto find_first;
302 
303 	idx = *(u32 *)key;
304 
305 	dev = __dev_map_hash_lookup_elem(map, idx);
306 	if (!dev)
307 		goto find_first;
308 
309 	next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)),
310 				    struct bpf_dtab_netdev, index_hlist);
311 
312 	if (next_dev) {
313 		*next = next_dev->idx;
314 		return 0;
315 	}
316 
317 	i = idx & (dtab->n_buckets - 1);
318 	i++;
319 
320  find_first:
321 	for (; i < dtab->n_buckets; i++) {
322 		head = dev_map_index_hash(dtab, i);
323 
324 		next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
325 					    struct bpf_dtab_netdev,
326 					    index_hlist);
327 		if (next_dev) {
328 			*next = next_dev->idx;
329 			return 0;
330 		}
331 	}
332 
333 	return -ENOENT;
334 }
335 
dev_map_bpf_prog_run(struct bpf_prog * xdp_prog,struct xdp_frame ** frames,int n,struct net_device * tx_dev,struct net_device * rx_dev)336 static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
337 				struct xdp_frame **frames, int n,
338 				struct net_device *tx_dev,
339 				struct net_device *rx_dev)
340 {
341 	struct xdp_txq_info txq = { .dev = tx_dev };
342 	struct xdp_rxq_info rxq = { .dev = rx_dev };
343 	struct xdp_buff xdp;
344 	int i, nframes = 0;
345 
346 	for (i = 0; i < n; i++) {
347 		struct xdp_frame *xdpf = frames[i];
348 		u32 act;
349 		int err;
350 
351 		xdp_convert_frame_to_buff(xdpf, &xdp);
352 		xdp.txq = &txq;
353 		xdp.rxq = &rxq;
354 
355 		act = bpf_prog_run_xdp(xdp_prog, &xdp);
356 		switch (act) {
357 		case XDP_PASS:
358 			err = xdp_update_frame_from_buff(&xdp, xdpf);
359 			if (unlikely(err < 0))
360 				xdp_return_frame_rx_napi(xdpf);
361 			else
362 				frames[nframes++] = xdpf;
363 			break;
364 		default:
365 			bpf_warn_invalid_xdp_action(NULL, xdp_prog, act);
366 			fallthrough;
367 		case XDP_ABORTED:
368 			trace_xdp_exception(tx_dev, xdp_prog, act);
369 			fallthrough;
370 		case XDP_DROP:
371 			xdp_return_frame_rx_napi(xdpf);
372 			break;
373 		}
374 	}
375 	return nframes; /* sent frames count */
376 }
377 
bq_xmit_all(struct xdp_dev_bulk_queue * bq,u32 flags)378 static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
379 {
380 	struct net_device *dev = bq->dev;
381 	unsigned int cnt = bq->count;
382 	int sent = 0, err = 0;
383 	int to_send = cnt;
384 	int i;
385 
386 	lockdep_assert_held(&bq->bq_lock);
387 
388 	if (unlikely(!cnt))
389 		return;
390 
391 	for (i = 0; i < cnt; i++) {
392 		struct xdp_frame *xdpf = bq->q[i];
393 
394 		prefetch(xdpf);
395 	}
396 
397 	if (bq->xdp_prog) {
398 		to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev, bq->dev_rx);
399 		if (!to_send)
400 			goto out;
401 	}
402 
403 	sent = dev->netdev_ops->ndo_xdp_xmit(dev, to_send, bq->q, flags);
404 	if (sent < 0) {
405 		/* If ndo_xdp_xmit fails with an errno, no frames have
406 		 * been xmit'ed.
407 		 */
408 		err = sent;
409 		sent = 0;
410 	}
411 
412 	/* If not all frames have been transmitted, it is our
413 	 * responsibility to free them
414 	 */
415 	for (i = sent; unlikely(i < to_send); i++)
416 		xdp_return_frame_rx_napi(bq->q[i]);
417 
418 out:
419 	bq->count = 0;
420 	trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, cnt - sent, err);
421 }
422 
423 /* __dev_flush is called from xdp_do_flush() which _must_ be signalled from the
424  * driver before returning from its napi->poll() routine. See the comment above
425  * xdp_do_flush() in filter.c.
426  */
__dev_flush(struct list_head * flush_list)427 void __dev_flush(struct list_head *flush_list)
428 {
429 	struct xdp_dev_bulk_queue *bq, *tmp;
430 
431 	list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
432 		local_lock_nested_bh(&bq->dev->xdp_bulkq->bq_lock);
433 		bq_xmit_all(bq, XDP_XMIT_FLUSH);
434 		bq->dev_rx = NULL;
435 		bq->xdp_prog = NULL;
436 		__list_del_clearprev(&bq->flush_node);
437 		local_unlock_nested_bh(&bq->dev->xdp_bulkq->bq_lock);
438 	}
439 }
440 
441 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
442  * by local_bh_disable() (from XDP calls inside NAPI). The
443  * rcu_read_lock_bh_held() below makes lockdep accept both.
444  */
__dev_map_lookup_elem(struct bpf_map * map,u32 key)445 static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
446 {
447 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
448 	struct bpf_dtab_netdev *obj;
449 
450 	if (key >= map->max_entries)
451 		return NULL;
452 
453 	obj = rcu_dereference_check(dtab->netdev_map[key],
454 				    rcu_read_lock_bh_held());
455 	return obj;
456 }
457 
458 /* Runs in NAPI, i.e., softirq under local_bh_disable(). Thus, safe percpu
459  * variable access, and map elements stick around. See comment above
460  * xdp_do_flush() in filter.c. PREEMPT_RT relies on local_lock_nested_bh()
461  * to serialise access to the per-CPU bq.
462  */
bq_enqueue(struct net_device * dev,struct xdp_frame * xdpf,struct net_device * dev_rx,struct bpf_prog * xdp_prog)463 static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
464 		       struct net_device *dev_rx, struct bpf_prog *xdp_prog)
465 {
466 	struct xdp_dev_bulk_queue *bq;
467 
468 	local_lock_nested_bh(&dev->xdp_bulkq->bq_lock);
469 	bq = this_cpu_ptr(dev->xdp_bulkq);
470 
471 	if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
472 		bq_xmit_all(bq, 0);
473 
474 	/* Ingress dev_rx will be the same for all xdp_frame's in
475 	 * bulk_queue, because bq stored per-CPU and must be flushed
476 	 * from net_device drivers NAPI func end.
477 	 *
478 	 * Do the same with xdp_prog and flush_list since these fields
479 	 * are only ever modified together.
480 	 */
481 	if (!bq->dev_rx) {
482 		struct list_head *flush_list = bpf_net_ctx_get_dev_flush_list();
483 
484 		bq->dev_rx = dev_rx;
485 		bq->xdp_prog = xdp_prog;
486 		list_add(&bq->flush_node, flush_list);
487 	}
488 
489 	bq->q[bq->count++] = xdpf;
490 
491 	local_unlock_nested_bh(&dev->xdp_bulkq->bq_lock);
492 }
493 
__xdp_enqueue(struct net_device * dev,struct xdp_frame * xdpf,struct net_device * dev_rx,struct bpf_prog * xdp_prog)494 static inline int __xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
495 				struct net_device *dev_rx,
496 				struct bpf_prog *xdp_prog)
497 {
498 	int err;
499 
500 	if (!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
501 		return -EOPNOTSUPP;
502 
503 	if (unlikely(!(dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
504 		     xdp_frame_has_frags(xdpf)))
505 		return -EOPNOTSUPP;
506 
507 	err = xdp_ok_fwd_dev(dev, xdp_get_frame_len(xdpf));
508 	if (unlikely(err))
509 		return err;
510 
511 	bq_enqueue(dev, xdpf, dev_rx, xdp_prog);
512 	return 0;
513 }
514 
dev_map_bpf_prog_run_skb(struct sk_buff * skb,struct bpf_dtab_netdev * dst)515 static u32 dev_map_bpf_prog_run_skb(struct sk_buff *skb, struct bpf_dtab_netdev *dst)
516 {
517 	struct xdp_txq_info txq = { .dev = dst->dev };
518 	struct xdp_buff xdp;
519 	u32 act;
520 
521 	if (!dst->xdp_prog)
522 		return XDP_PASS;
523 
524 	__skb_pull(skb, skb->mac_len);
525 	xdp.txq = &txq;
526 
527 	act = bpf_prog_run_generic_xdp(skb, &xdp, dst->xdp_prog);
528 	switch (act) {
529 	case XDP_PASS:
530 		__skb_push(skb, skb->mac_len);
531 		break;
532 	default:
533 		bpf_warn_invalid_xdp_action(NULL, dst->xdp_prog, act);
534 		fallthrough;
535 	case XDP_ABORTED:
536 		trace_xdp_exception(dst->dev, dst->xdp_prog, act);
537 		fallthrough;
538 	case XDP_DROP:
539 		kfree_skb(skb);
540 		break;
541 	}
542 
543 	return act;
544 }
545 
dev_xdp_enqueue(struct net_device * dev,struct xdp_frame * xdpf,struct net_device * dev_rx)546 int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
547 		    struct net_device *dev_rx)
548 {
549 	return __xdp_enqueue(dev, xdpf, dev_rx, NULL);
550 }
551 
dev_map_enqueue(struct bpf_dtab_netdev * dst,struct xdp_frame * xdpf,struct net_device * dev_rx)552 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf,
553 		    struct net_device *dev_rx)
554 {
555 	struct net_device *dev = dst->dev;
556 
557 	return __xdp_enqueue(dev, xdpf, dev_rx, dst->xdp_prog);
558 }
559 
is_valid_dst(struct bpf_dtab_netdev * obj,struct xdp_frame * xdpf)560 static bool is_valid_dst(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf)
561 {
562 	if (!obj)
563 		return false;
564 
565 	if (!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT))
566 		return false;
567 
568 	if (unlikely(!(obj->dev->xdp_features & NETDEV_XDP_ACT_NDO_XMIT_SG) &&
569 		     xdp_frame_has_frags(xdpf)))
570 		return false;
571 
572 	if (xdp_ok_fwd_dev(obj->dev, xdp_get_frame_len(xdpf)))
573 		return false;
574 
575 	return true;
576 }
577 
dev_map_enqueue_clone(struct bpf_dtab_netdev * obj,struct net_device * dev_rx,struct xdp_frame * xdpf)578 static int dev_map_enqueue_clone(struct bpf_dtab_netdev *obj,
579 				 struct net_device *dev_rx,
580 				 struct xdp_frame *xdpf)
581 {
582 	struct xdp_frame *nxdpf;
583 
584 	nxdpf = xdpf_clone(xdpf);
585 	if (!nxdpf)
586 		return -ENOMEM;
587 
588 	bq_enqueue(obj->dev, nxdpf, dev_rx, obj->xdp_prog);
589 
590 	return 0;
591 }
592 
is_ifindex_excluded(int * excluded,int num_excluded,int ifindex)593 static inline bool is_ifindex_excluded(int *excluded, int num_excluded, int ifindex)
594 {
595 	while (num_excluded--) {
596 		if (ifindex == excluded[num_excluded])
597 			return true;
598 	}
599 	return false;
600 }
601 
602 /* Get ifindex of each upper device. 'indexes' must be able to hold at
603  * least 'max' elements.
604  * Returns the number of ifindexes added, or -EOVERFLOW if there are too
605  * many upper devices.
606  */
get_upper_ifindexes(struct net_device * dev,int * indexes,int max)607 static int get_upper_ifindexes(struct net_device *dev, int *indexes, int max)
608 {
609 	struct net_device *upper;
610 	struct list_head *iter;
611 	int n = 0;
612 
613 	netdev_for_each_upper_dev_rcu(dev, upper, iter) {
614 		if (n >= max)
615 			return -EOVERFLOW;
616 		indexes[n++] = upper->ifindex;
617 	}
618 
619 	return n;
620 }
621 
dev_map_enqueue_multi(struct xdp_frame * xdpf,struct net_device * dev_rx,struct bpf_map * map,bool exclude_ingress)622 int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx,
623 			  struct bpf_map *map, bool exclude_ingress)
624 {
625 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
626 	struct bpf_dtab_netdev *dst, *last_dst = NULL;
627 	int excluded_devices[1+MAX_NEST_DEV];
628 	struct hlist_head *head;
629 	int num_excluded = 0;
630 	unsigned int i;
631 	int err;
632 
633 	if (exclude_ingress) {
634 		num_excluded = get_upper_ifindexes(dev_rx, excluded_devices,
635 						   ARRAY_SIZE(excluded_devices) - 1);
636 		if (num_excluded < 0)
637 			return num_excluded;
638 
639 		excluded_devices[num_excluded++] = dev_rx->ifindex;
640 	}
641 
642 	if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
643 		for (i = 0; i < map->max_entries; i++) {
644 			dst = rcu_dereference_check(dtab->netdev_map[i],
645 						    rcu_read_lock_bh_held());
646 			if (!is_valid_dst(dst, xdpf))
647 				continue;
648 
649 			if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
650 				continue;
651 
652 			/* we only need n-1 clones; last_dst enqueued below */
653 			if (!last_dst) {
654 				last_dst = dst;
655 				continue;
656 			}
657 
658 			err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
659 			if (err)
660 				return err;
661 
662 			last_dst = dst;
663 		}
664 	} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
665 		for (i = 0; i < dtab->n_buckets; i++) {
666 			head = dev_map_index_hash(dtab, i);
667 			hlist_for_each_entry_rcu(dst, head, index_hlist,
668 						 lockdep_is_held(&dtab->index_lock)) {
669 				if (!is_valid_dst(dst, xdpf))
670 					continue;
671 
672 				if (is_ifindex_excluded(excluded_devices, num_excluded,
673 							dst->dev->ifindex))
674 					continue;
675 
676 				/* we only need n-1 clones; last_dst enqueued below */
677 				if (!last_dst) {
678 					last_dst = dst;
679 					continue;
680 				}
681 
682 				err = dev_map_enqueue_clone(last_dst, dev_rx, xdpf);
683 				if (err)
684 					return err;
685 
686 				last_dst = dst;
687 			}
688 		}
689 	}
690 
691 	/* consume the last copy of the frame */
692 	if (last_dst)
693 		bq_enqueue(last_dst->dev, xdpf, dev_rx, last_dst->xdp_prog);
694 	else
695 		xdp_return_frame_rx_napi(xdpf); /* dtab is empty */
696 
697 	return 0;
698 }
699 
dev_map_generic_redirect(struct bpf_dtab_netdev * dst,struct sk_buff * skb,const struct bpf_prog * xdp_prog)700 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
701 			     const struct bpf_prog *xdp_prog)
702 {
703 	int err;
704 
705 	err = xdp_ok_fwd_dev(dst->dev, skb->len);
706 	if (unlikely(err))
707 		return err;
708 
709 	/* Redirect has already succeeded semantically at this point, so we just
710 	 * return 0 even if packet is dropped. Helper below takes care of
711 	 * freeing skb.
712 	 */
713 	if (dev_map_bpf_prog_run_skb(skb, dst) != XDP_PASS)
714 		return 0;
715 
716 	skb->dev = dst->dev;
717 	generic_xdp_tx(skb, xdp_prog);
718 
719 	return 0;
720 }
721 
dev_map_redirect_clone(struct bpf_dtab_netdev * dst,struct sk_buff * skb,const struct bpf_prog * xdp_prog)722 static int dev_map_redirect_clone(struct bpf_dtab_netdev *dst,
723 				  struct sk_buff *skb,
724 				  const struct bpf_prog *xdp_prog)
725 {
726 	struct sk_buff *nskb;
727 	int err;
728 
729 	nskb = skb_clone(skb, GFP_ATOMIC);
730 	if (!nskb)
731 		return -ENOMEM;
732 
733 	err = dev_map_generic_redirect(dst, nskb, xdp_prog);
734 	if (unlikely(err)) {
735 		consume_skb(nskb);
736 		return err;
737 	}
738 
739 	return 0;
740 }
741 
dev_map_redirect_multi(struct net_device * dev,struct sk_buff * skb,const struct bpf_prog * xdp_prog,struct bpf_map * map,bool exclude_ingress)742 int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
743 			   const struct bpf_prog *xdp_prog,
744 			   struct bpf_map *map, bool exclude_ingress)
745 {
746 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
747 	struct bpf_dtab_netdev *dst, *last_dst = NULL;
748 	int excluded_devices[1+MAX_NEST_DEV];
749 	struct hlist_head *head;
750 	struct hlist_node *next;
751 	int num_excluded = 0;
752 	unsigned int i;
753 	int err;
754 
755 	if (exclude_ingress) {
756 		num_excluded = get_upper_ifindexes(dev, excluded_devices,
757 						   ARRAY_SIZE(excluded_devices) - 1);
758 		if (num_excluded < 0)
759 			return num_excluded;
760 
761 		excluded_devices[num_excluded++] = dev->ifindex;
762 	}
763 
764 	if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
765 		for (i = 0; i < map->max_entries; i++) {
766 			dst = rcu_dereference_check(dtab->netdev_map[i],
767 						    rcu_read_lock_bh_held());
768 			if (!dst)
769 				continue;
770 
771 			if (is_ifindex_excluded(excluded_devices, num_excluded, dst->dev->ifindex))
772 				continue;
773 
774 			/* we only need n-1 clones; last_dst enqueued below */
775 			if (!last_dst) {
776 				last_dst = dst;
777 				continue;
778 			}
779 
780 			err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
781 			if (err)
782 				return err;
783 
784 			last_dst = dst;
785 
786 		}
787 	} else { /* BPF_MAP_TYPE_DEVMAP_HASH */
788 		for (i = 0; i < dtab->n_buckets; i++) {
789 			head = dev_map_index_hash(dtab, i);
790 			hlist_for_each_entry_safe(dst, next, head, index_hlist) {
791 				if (is_ifindex_excluded(excluded_devices, num_excluded,
792 							dst->dev->ifindex))
793 					continue;
794 
795 				/* we only need n-1 clones; last_dst enqueued below */
796 				if (!last_dst) {
797 					last_dst = dst;
798 					continue;
799 				}
800 
801 				err = dev_map_redirect_clone(last_dst, skb, xdp_prog);
802 				if (err)
803 					return err;
804 
805 				last_dst = dst;
806 			}
807 		}
808 	}
809 
810 	/* consume the first skb and return */
811 	if (last_dst)
812 		return dev_map_generic_redirect(last_dst, skb, xdp_prog);
813 
814 	/* dtab is empty */
815 	consume_skb(skb);
816 	return 0;
817 }
818 
dev_map_lookup_elem(struct bpf_map * map,void * key)819 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
820 {
821 	struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
822 
823 	return obj ? &obj->val : NULL;
824 }
825 
dev_map_hash_lookup_elem(struct bpf_map * map,void * key)826 static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
827 {
828 	struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map,
829 								*(u32 *)key);
830 	return obj ? &obj->val : NULL;
831 }
832 
__dev_map_entry_free(struct rcu_head * rcu)833 static void __dev_map_entry_free(struct rcu_head *rcu)
834 {
835 	struct bpf_dtab_netdev *dev;
836 
837 	dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
838 	if (dev->xdp_prog)
839 		bpf_prog_put(dev->xdp_prog);
840 	dev_put(dev->dev);
841 	kfree(dev);
842 }
843 
dev_map_delete_elem(struct bpf_map * map,void * key)844 static long dev_map_delete_elem(struct bpf_map *map, void *key)
845 {
846 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
847 	struct bpf_dtab_netdev *old_dev;
848 	u32 k = *(u32 *)key;
849 
850 	if (k >= map->max_entries)
851 		return -EINVAL;
852 
853 	old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL));
854 	if (old_dev) {
855 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
856 		atomic_dec((atomic_t *)&dtab->items);
857 	}
858 	return 0;
859 }
860 
dev_map_hash_delete_elem(struct bpf_map * map,void * key)861 static long dev_map_hash_delete_elem(struct bpf_map *map, void *key)
862 {
863 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
864 	struct bpf_dtab_netdev *old_dev;
865 	u32 k = *(u32 *)key;
866 	unsigned long flags;
867 	int ret = -ENOENT;
868 
869 	spin_lock_irqsave(&dtab->index_lock, flags);
870 
871 	old_dev = __dev_map_hash_lookup_elem(map, k);
872 	if (old_dev) {
873 		dtab->items--;
874 		hlist_del_init_rcu(&old_dev->index_hlist);
875 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
876 		ret = 0;
877 	}
878 	spin_unlock_irqrestore(&dtab->index_lock, flags);
879 
880 	return ret;
881 }
882 
__dev_map_alloc_node(struct net * net,struct bpf_dtab * dtab,struct bpf_devmap_val * val,unsigned int idx)883 static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
884 						    struct bpf_dtab *dtab,
885 						    struct bpf_devmap_val *val,
886 						    unsigned int idx)
887 {
888 	struct bpf_prog *prog = NULL;
889 	struct bpf_dtab_netdev *dev;
890 
891 	dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev),
892 				   GFP_NOWAIT,
893 				   dtab->map.numa_node);
894 	if (!dev)
895 		return ERR_PTR(-ENOMEM);
896 
897 	dev->dev = dev_get_by_index(net, val->ifindex);
898 	if (!dev->dev)
899 		goto err_out;
900 
901 	if (val->bpf_prog.fd > 0) {
902 		prog = bpf_prog_get_type_dev(val->bpf_prog.fd,
903 					     BPF_PROG_TYPE_XDP, false);
904 		if (IS_ERR(prog))
905 			goto err_put_dev;
906 		if (prog->expected_attach_type != BPF_XDP_DEVMAP ||
907 		    !bpf_prog_map_compatible(&dtab->map, prog))
908 			goto err_put_prog;
909 	}
910 
911 	dev->idx = idx;
912 	if (prog) {
913 		dev->xdp_prog = prog;
914 		dev->val.bpf_prog.id = prog->aux->id;
915 	} else {
916 		dev->xdp_prog = NULL;
917 		dev->val.bpf_prog.id = 0;
918 	}
919 	dev->val.ifindex = val->ifindex;
920 
921 	return dev;
922 err_put_prog:
923 	bpf_prog_put(prog);
924 err_put_dev:
925 	dev_put(dev->dev);
926 err_out:
927 	kfree(dev);
928 	return ERR_PTR(-EINVAL);
929 }
930 
__dev_map_update_elem(struct net * net,struct bpf_map * map,void * key,void * value,u64 map_flags)931 static long __dev_map_update_elem(struct net *net, struct bpf_map *map,
932 				  void *key, void *value, u64 map_flags)
933 {
934 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
935 	struct bpf_dtab_netdev *dev, *old_dev;
936 	struct bpf_devmap_val val = {};
937 	u32 i = *(u32 *)key;
938 
939 	if (unlikely(map_flags > BPF_EXIST))
940 		return -EINVAL;
941 	if (unlikely(i >= dtab->map.max_entries))
942 		return -E2BIG;
943 	if (unlikely(map_flags == BPF_NOEXIST))
944 		return -EEXIST;
945 
946 	/* already verified value_size <= sizeof val */
947 	memcpy(&val, value, map->value_size);
948 
949 	if (!val.ifindex) {
950 		dev = NULL;
951 		/* can not specify fd if ifindex is 0 */
952 		if (val.bpf_prog.fd > 0)
953 			return -EINVAL;
954 	} else {
955 		dev = __dev_map_alloc_node(net, dtab, &val, i);
956 		if (IS_ERR(dev))
957 			return PTR_ERR(dev);
958 	}
959 
960 	/* Use call_rcu() here to ensure rcu critical sections have completed
961 	 * Remembering the driver side flush operation will happen before the
962 	 * net device is removed.
963 	 */
964 	old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev)));
965 	if (old_dev)
966 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
967 	else
968 		atomic_inc((atomic_t *)&dtab->items);
969 
970 	return 0;
971 }
972 
dev_map_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)973 static long dev_map_update_elem(struct bpf_map *map, void *key, void *value,
974 				u64 map_flags)
975 {
976 	return __dev_map_update_elem(current->nsproxy->net_ns,
977 				     map, key, value, map_flags);
978 }
979 
__dev_map_hash_update_elem(struct net * net,struct bpf_map * map,void * key,void * value,u64 map_flags)980 static long __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
981 				       void *key, void *value, u64 map_flags)
982 {
983 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
984 	struct bpf_dtab_netdev *dev, *old_dev;
985 	struct bpf_devmap_val val = {};
986 	u32 idx = *(u32 *)key;
987 	unsigned long flags;
988 	int err = -EEXIST;
989 
990 	/* already verified value_size <= sizeof val */
991 	memcpy(&val, value, map->value_size);
992 
993 	if (unlikely(map_flags > BPF_EXIST || !val.ifindex))
994 		return -EINVAL;
995 
996 	spin_lock_irqsave(&dtab->index_lock, flags);
997 
998 	old_dev = __dev_map_hash_lookup_elem(map, idx);
999 	if (old_dev && (map_flags & BPF_NOEXIST))
1000 		goto out_err;
1001 
1002 	dev = __dev_map_alloc_node(net, dtab, &val, idx);
1003 	if (IS_ERR(dev)) {
1004 		err = PTR_ERR(dev);
1005 		goto out_err;
1006 	}
1007 
1008 	if (old_dev) {
1009 		hlist_del_rcu(&old_dev->index_hlist);
1010 	} else {
1011 		if (dtab->items >= dtab->map.max_entries) {
1012 			spin_unlock_irqrestore(&dtab->index_lock, flags);
1013 			call_rcu(&dev->rcu, __dev_map_entry_free);
1014 			return -E2BIG;
1015 		}
1016 		dtab->items++;
1017 	}
1018 
1019 	hlist_add_head_rcu(&dev->index_hlist,
1020 			   dev_map_index_hash(dtab, idx));
1021 	spin_unlock_irqrestore(&dtab->index_lock, flags);
1022 
1023 	if (old_dev)
1024 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
1025 
1026 	return 0;
1027 
1028 out_err:
1029 	spin_unlock_irqrestore(&dtab->index_lock, flags);
1030 	return err;
1031 }
1032 
dev_map_hash_update_elem(struct bpf_map * map,void * key,void * value,u64 map_flags)1033 static long dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
1034 				     u64 map_flags)
1035 {
1036 	return __dev_map_hash_update_elem(current->nsproxy->net_ns,
1037 					 map, key, value, map_flags);
1038 }
1039 
dev_map_redirect(struct bpf_map * map,u64 ifindex,u64 flags)1040 static long dev_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1041 {
1042 	return __bpf_xdp_redirect_map(map, ifindex, flags,
1043 				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1044 				      __dev_map_lookup_elem);
1045 }
1046 
dev_hash_map_redirect(struct bpf_map * map,u64 ifindex,u64 flags)1047 static long dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
1048 {
1049 	return __bpf_xdp_redirect_map(map, ifindex, flags,
1050 				      BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
1051 				      __dev_map_hash_lookup_elem);
1052 }
1053 
dev_map_mem_usage(const struct bpf_map * map)1054 static u64 dev_map_mem_usage(const struct bpf_map *map)
1055 {
1056 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
1057 	u64 usage = sizeof(struct bpf_dtab);
1058 
1059 	if (map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)
1060 		usage += (u64)dtab->n_buckets * sizeof(struct hlist_head);
1061 	else
1062 		usage += (u64)map->max_entries * sizeof(struct bpf_dtab_netdev *);
1063 	usage += atomic_read((atomic_t *)&dtab->items) *
1064 			 (u64)sizeof(struct bpf_dtab_netdev);
1065 	return usage;
1066 }
1067 
1068 BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab)
1069 const struct bpf_map_ops dev_map_ops = {
1070 	.map_meta_equal = bpf_map_meta_equal,
1071 	.map_alloc_check = dev_map_alloc_check,
1072 	.map_alloc = dev_map_alloc,
1073 	.map_free = dev_map_free,
1074 	.map_get_next_key = dev_map_get_next_key,
1075 	.map_lookup_elem = dev_map_lookup_elem,
1076 	.map_update_elem = dev_map_update_elem,
1077 	.map_delete_elem = dev_map_delete_elem,
1078 	.map_check_btf = map_check_no_btf,
1079 	.map_mem_usage = dev_map_mem_usage,
1080 	.map_btf_id = &dev_map_btf_ids[0],
1081 	.map_redirect = dev_map_redirect,
1082 };
1083 
1084 const struct bpf_map_ops dev_map_hash_ops = {
1085 	.map_meta_equal = bpf_map_meta_equal,
1086 	.map_alloc_check = dev_map_alloc_check,
1087 	.map_alloc = dev_map_alloc,
1088 	.map_free = dev_map_free,
1089 	.map_get_next_key = dev_map_hash_get_next_key,
1090 	.map_lookup_elem = dev_map_hash_lookup_elem,
1091 	.map_update_elem = dev_map_hash_update_elem,
1092 	.map_delete_elem = dev_map_hash_delete_elem,
1093 	.map_check_btf = map_check_no_btf,
1094 	.map_mem_usage = dev_map_mem_usage,
1095 	.map_btf_id = &dev_map_btf_ids[0],
1096 	.map_redirect = dev_hash_map_redirect,
1097 };
1098 
dev_map_hash_remove_netdev(struct bpf_dtab * dtab,struct net_device * netdev)1099 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
1100 				       struct net_device *netdev)
1101 {
1102 	unsigned long flags;
1103 	u32 i;
1104 
1105 	spin_lock_irqsave(&dtab->index_lock, flags);
1106 	for (i = 0; i < dtab->n_buckets; i++) {
1107 		struct bpf_dtab_netdev *dev;
1108 		struct hlist_head *head;
1109 		struct hlist_node *next;
1110 
1111 		head = dev_map_index_hash(dtab, i);
1112 
1113 		hlist_for_each_entry_safe(dev, next, head, index_hlist) {
1114 			if (netdev != dev->dev)
1115 				continue;
1116 
1117 			dtab->items--;
1118 			hlist_del_rcu(&dev->index_hlist);
1119 			call_rcu(&dev->rcu, __dev_map_entry_free);
1120 		}
1121 	}
1122 	spin_unlock_irqrestore(&dtab->index_lock, flags);
1123 }
1124 
dev_map_notification(struct notifier_block * notifier,ulong event,void * ptr)1125 static int dev_map_notification(struct notifier_block *notifier,
1126 				ulong event, void *ptr)
1127 {
1128 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
1129 	struct bpf_dtab *dtab;
1130 	int i, cpu;
1131 
1132 	switch (event) {
1133 	case NETDEV_REGISTER:
1134 		if (!netdev->netdev_ops->ndo_xdp_xmit || netdev->xdp_bulkq)
1135 			break;
1136 
1137 		/* will be freed in free_netdev() */
1138 		netdev->xdp_bulkq = alloc_percpu(struct xdp_dev_bulk_queue);
1139 		if (!netdev->xdp_bulkq)
1140 			return NOTIFY_BAD;
1141 
1142 		for_each_possible_cpu(cpu) {
1143 			struct xdp_dev_bulk_queue *bq;
1144 
1145 			bq = per_cpu_ptr(netdev->xdp_bulkq, cpu);
1146 			bq->dev = netdev;
1147 			local_lock_init(&bq->bq_lock);
1148 		}
1149 		break;
1150 	case NETDEV_UNREGISTER:
1151 		/* This rcu_read_lock/unlock pair is needed because
1152 		 * dev_map_list is an RCU list AND to ensure a delete
1153 		 * operation does not free a netdev_map entry while we
1154 		 * are comparing it against the netdev being unregistered.
1155 		 */
1156 		rcu_read_lock();
1157 		list_for_each_entry_rcu(dtab, &dev_map_list, list) {
1158 			if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
1159 				dev_map_hash_remove_netdev(dtab, netdev);
1160 				continue;
1161 			}
1162 
1163 			for (i = 0; i < dtab->map.max_entries; i++) {
1164 				struct bpf_dtab_netdev *dev, *odev;
1165 
1166 				dev = rcu_dereference(dtab->netdev_map[i]);
1167 				if (!dev || netdev != dev->dev)
1168 					continue;
1169 				odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL));
1170 				if (dev == odev) {
1171 					call_rcu(&dev->rcu,
1172 						 __dev_map_entry_free);
1173 					atomic_dec((atomic_t *)&dtab->items);
1174 				}
1175 			}
1176 		}
1177 		rcu_read_unlock();
1178 		break;
1179 	default:
1180 		break;
1181 	}
1182 	return NOTIFY_OK;
1183 }
1184 
1185 static struct notifier_block dev_map_notifier = {
1186 	.notifier_call = dev_map_notification,
1187 };
1188 
dev_map_init(void)1189 static int __init dev_map_init(void)
1190 {
1191 	/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
1192 	BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
1193 		     offsetof(struct _bpf_dtab_netdev, dev));
1194 	register_netdevice_notifier(&dev_map_notifier);
1195 
1196 	return 0;
1197 }
1198 
1199 subsys_initcall(dev_map_init);
1200