xref: /linux/kernel/bpf/devmap.c (revision bfd5bb6f90af092aa345b15cd78143956a13c2a8)
1 /* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  *
7  * This program is distributed in the hope that it will be useful, but
8  * WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10  * General Public License for more details.
11  */
12 
13 /* Devmaps primary use is as a backend map for XDP BPF helper call
14  * bpf_redirect_map(). Because XDP is mostly concerned with performance we
15  * spent some effort to ensure the datapath with redirect maps does not use
16  * any locking. This is a quick note on the details.
17  *
18  * We have three possible paths to get into the devmap control plane bpf
19  * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall
20  * will invoke an update, delete, or lookup operation. To ensure updates and
21  * deletes appear atomic from the datapath side xchg() is used to modify the
22  * netdev_map array. Then because the datapath does a lookup into the netdev_map
23  * array (read-only) from an RCU critical section we use call_rcu() to wait for
24  * an rcu grace period before free'ing the old data structures. This ensures the
25  * datapath always has a valid copy. However, the datapath does a "flush"
26  * operation that pushes any pending packets in the driver outside the RCU
27  * critical section. Each bpf_dtab_netdev tracks these pending operations using
28  * an atomic per-cpu bitmap. The bpf_dtab_netdev object will not be destroyed
29  * until all bits are cleared indicating outstanding flush operations have
30  * completed.
31  *
32  * BPF syscalls may race with BPF program calls on any of the update, delete
33  * or lookup operations. As noted above the xchg() operation also keep the
34  * netdev_map consistent in this case. From the devmap side BPF programs
35  * calling into these operations are the same as multiple user space threads
36  * making system calls.
37  *
38  * Finally, any of the above may race with a netdev_unregister notifier. The
39  * unregister notifier must search for net devices in the map structure that
40  * contain a reference to the net device and remove them. This is a two step
41  * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b)
42  * check to see if the ifindex is the same as the net_device being removed.
43  * When removing the dev a cmpxchg() is used to ensure the correct dev is
44  * removed, in the case of a concurrent update or delete operation it is
45  * possible that the initially referenced dev is no longer in the map. As the
46  * notifier hook walks the map we know that new dev references can not be
47  * added by the user because core infrastructure ensures dev_get_by_index()
48  * calls will fail at this point.
49  */
50 #include <linux/bpf.h>
51 #include <net/xdp.h>
52 #include <linux/filter.h>
53 #include <trace/events/xdp.h>
54 
55 #define DEV_CREATE_FLAG_MASK \
56 	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)
57 
58 #define DEV_MAP_BULK_SIZE 16
59 struct xdp_bulk_queue {
60 	struct xdp_frame *q[DEV_MAP_BULK_SIZE];
61 	struct net_device *dev_rx;
62 	unsigned int count;
63 };
64 
65 struct bpf_dtab_netdev {
66 	struct net_device *dev; /* must be first member, due to tracepoint */
67 	struct bpf_dtab *dtab;
68 	unsigned int bit;
69 	struct xdp_bulk_queue __percpu *bulkq;
70 	struct rcu_head rcu;
71 };
72 
73 struct bpf_dtab {
74 	struct bpf_map map;
75 	struct bpf_dtab_netdev **netdev_map;
76 	unsigned long __percpu *flush_needed;
77 	struct list_head list;
78 };
79 
80 static DEFINE_SPINLOCK(dev_map_lock);
81 static LIST_HEAD(dev_map_list);
82 
83 static u64 dev_map_bitmap_size(const union bpf_attr *attr)
84 {
85 	return BITS_TO_LONGS((u64) attr->max_entries) * sizeof(unsigned long);
86 }
87 
88 static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
89 {
90 	struct bpf_dtab *dtab;
91 	int err = -EINVAL;
92 	u64 cost;
93 
94 	if (!capable(CAP_NET_ADMIN))
95 		return ERR_PTR(-EPERM);
96 
97 	/* check sanity of attributes */
98 	if (attr->max_entries == 0 || attr->key_size != 4 ||
99 	    attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK)
100 		return ERR_PTR(-EINVAL);
101 
102 	dtab = kzalloc(sizeof(*dtab), GFP_USER);
103 	if (!dtab)
104 		return ERR_PTR(-ENOMEM);
105 
106 	bpf_map_init_from_attr(&dtab->map, attr);
107 
108 	/* make sure page count doesn't overflow */
109 	cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
110 	cost += dev_map_bitmap_size(attr) * num_possible_cpus();
111 	if (cost >= U32_MAX - PAGE_SIZE)
112 		goto free_dtab;
113 
114 	dtab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
115 
116 	/* if map size is larger than memlock limit, reject it early */
117 	err = bpf_map_precharge_memlock(dtab->map.pages);
118 	if (err)
119 		goto free_dtab;
120 
121 	err = -ENOMEM;
122 
123 	/* A per cpu bitfield with a bit per possible net device */
124 	dtab->flush_needed = __alloc_percpu_gfp(dev_map_bitmap_size(attr),
125 						__alignof__(unsigned long),
126 						GFP_KERNEL | __GFP_NOWARN);
127 	if (!dtab->flush_needed)
128 		goto free_dtab;
129 
130 	dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
131 					      sizeof(struct bpf_dtab_netdev *),
132 					      dtab->map.numa_node);
133 	if (!dtab->netdev_map)
134 		goto free_dtab;
135 
136 	spin_lock(&dev_map_lock);
137 	list_add_tail_rcu(&dtab->list, &dev_map_list);
138 	spin_unlock(&dev_map_lock);
139 
140 	return &dtab->map;
141 free_dtab:
142 	free_percpu(dtab->flush_needed);
143 	kfree(dtab);
144 	return ERR_PTR(err);
145 }
146 
147 static void dev_map_free(struct bpf_map *map)
148 {
149 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
150 	int i, cpu;
151 
152 	/* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
153 	 * so the programs (can be more than one that used this map) were
154 	 * disconnected from events. Wait for outstanding critical sections in
155 	 * these programs to complete. The rcu critical section only guarantees
156 	 * no further reads against netdev_map. It does __not__ ensure pending
157 	 * flush operations (if any) are complete.
158 	 */
159 
160 	spin_lock(&dev_map_lock);
161 	list_del_rcu(&dtab->list);
162 	spin_unlock(&dev_map_lock);
163 
164 	synchronize_rcu();
165 
166 	/* To ensure all pending flush operations have completed wait for flush
167 	 * bitmap to indicate all flush_needed bits to be zero on _all_ cpus.
168 	 * Because the above synchronize_rcu() ensures the map is disconnected
169 	 * from the program we can assume no new bits will be set.
170 	 */
171 	for_each_online_cpu(cpu) {
172 		unsigned long *bitmap = per_cpu_ptr(dtab->flush_needed, cpu);
173 
174 		while (!bitmap_empty(bitmap, dtab->map.max_entries))
175 			cond_resched();
176 	}
177 
178 	for (i = 0; i < dtab->map.max_entries; i++) {
179 		struct bpf_dtab_netdev *dev;
180 
181 		dev = dtab->netdev_map[i];
182 		if (!dev)
183 			continue;
184 
185 		dev_put(dev->dev);
186 		kfree(dev);
187 	}
188 
189 	free_percpu(dtab->flush_needed);
190 	bpf_map_area_free(dtab->netdev_map);
191 	kfree(dtab);
192 }
193 
194 static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
195 {
196 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
197 	u32 index = key ? *(u32 *)key : U32_MAX;
198 	u32 *next = next_key;
199 
200 	if (index >= dtab->map.max_entries) {
201 		*next = 0;
202 		return 0;
203 	}
204 
205 	if (index == dtab->map.max_entries - 1)
206 		return -ENOENT;
207 	*next = index + 1;
208 	return 0;
209 }
210 
211 void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
212 {
213 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
214 	unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);
215 
216 	__set_bit(bit, bitmap);
217 }
218 
219 static int bq_xmit_all(struct bpf_dtab_netdev *obj,
220 		       struct xdp_bulk_queue *bq, u32 flags)
221 {
222 	struct net_device *dev = obj->dev;
223 	int sent = 0, drops = 0, err = 0;
224 	int i;
225 
226 	if (unlikely(!bq->count))
227 		return 0;
228 
229 	for (i = 0; i < bq->count; i++) {
230 		struct xdp_frame *xdpf = bq->q[i];
231 
232 		prefetch(xdpf);
233 	}
234 
235 	sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
236 	if (sent < 0) {
237 		err = sent;
238 		sent = 0;
239 		goto error;
240 	}
241 	drops = bq->count - sent;
242 out:
243 	bq->count = 0;
244 
245 	trace_xdp_devmap_xmit(&obj->dtab->map, obj->bit,
246 			      sent, drops, bq->dev_rx, dev, err);
247 	bq->dev_rx = NULL;
248 	return 0;
249 error:
250 	/* If ndo_xdp_xmit fails with an errno, no frames have been
251 	 * xmit'ed and it's our responsibility to them free all.
252 	 */
253 	for (i = 0; i < bq->count; i++) {
254 		struct xdp_frame *xdpf = bq->q[i];
255 
256 		/* RX path under NAPI protection, can return frames faster */
257 		xdp_return_frame_rx_napi(xdpf);
258 		drops++;
259 	}
260 	goto out;
261 }
262 
263 /* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled
264  * from the driver before returning from its napi->poll() routine. The poll()
265  * routine is called either from busy_poll context or net_rx_action signaled
266  * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the
267  * net device can be torn down. On devmap tear down we ensure the ctx bitmap
268  * is zeroed before completing to ensure all flush operations have completed.
269  */
270 void __dev_map_flush(struct bpf_map *map)
271 {
272 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
273 	unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed);
274 	u32 bit;
275 
276 	for_each_set_bit(bit, bitmap, map->max_entries) {
277 		struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]);
278 		struct xdp_bulk_queue *bq;
279 
280 		/* This is possible if the dev entry is removed by user space
281 		 * between xdp redirect and flush op.
282 		 */
283 		if (unlikely(!dev))
284 			continue;
285 
286 		__clear_bit(bit, bitmap);
287 
288 		bq = this_cpu_ptr(dev->bulkq);
289 		bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
290 	}
291 }
292 
293 /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or
294  * update happens in parallel here a dev_put wont happen until after reading the
295  * ifindex.
296  */
297 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
298 {
299 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
300 	struct bpf_dtab_netdev *obj;
301 
302 	if (key >= map->max_entries)
303 		return NULL;
304 
305 	obj = READ_ONCE(dtab->netdev_map[key]);
306 	return obj;
307 }
308 
309 /* Runs under RCU-read-side, plus in softirq under NAPI protection.
310  * Thus, safe percpu variable access.
311  */
312 static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
313 		      struct net_device *dev_rx)
314 
315 {
316 	struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
317 
318 	if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
319 		bq_xmit_all(obj, bq, 0);
320 
321 	/* Ingress dev_rx will be the same for all xdp_frame's in
322 	 * bulk_queue, because bq stored per-CPU and must be flushed
323 	 * from net_device drivers NAPI func end.
324 	 */
325 	if (!bq->dev_rx)
326 		bq->dev_rx = dev_rx;
327 
328 	bq->q[bq->count++] = xdpf;
329 	return 0;
330 }
331 
332 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
333 		    struct net_device *dev_rx)
334 {
335 	struct net_device *dev = dst->dev;
336 	struct xdp_frame *xdpf;
337 	int err;
338 
339 	if (!dev->netdev_ops->ndo_xdp_xmit)
340 		return -EOPNOTSUPP;
341 
342 	err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
343 	if (unlikely(err))
344 		return err;
345 
346 	xdpf = convert_to_xdp_frame(xdp);
347 	if (unlikely(!xdpf))
348 		return -EOVERFLOW;
349 
350 	return bq_enqueue(dst, xdpf, dev_rx);
351 }
352 
353 int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
354 			     struct bpf_prog *xdp_prog)
355 {
356 	int err;
357 
358 	err = xdp_ok_fwd_dev(dst->dev, skb->len);
359 	if (unlikely(err))
360 		return err;
361 	skb->dev = dst->dev;
362 	generic_xdp_tx(skb, xdp_prog);
363 
364 	return 0;
365 }
366 
367 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
368 {
369 	struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
370 	struct net_device *dev = obj ? obj->dev : NULL;
371 
372 	return dev ? &dev->ifindex : NULL;
373 }
374 
375 static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
376 {
377 	if (dev->dev->netdev_ops->ndo_xdp_xmit) {
378 		struct xdp_bulk_queue *bq;
379 		unsigned long *bitmap;
380 
381 		int cpu;
382 
383 		for_each_online_cpu(cpu) {
384 			bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu);
385 			__clear_bit(dev->bit, bitmap);
386 
387 			bq = per_cpu_ptr(dev->bulkq, cpu);
388 			bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
389 		}
390 	}
391 }
392 
393 static void __dev_map_entry_free(struct rcu_head *rcu)
394 {
395 	struct bpf_dtab_netdev *dev;
396 
397 	dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
398 	dev_map_flush_old(dev);
399 	free_percpu(dev->bulkq);
400 	dev_put(dev->dev);
401 	kfree(dev);
402 }
403 
404 static int dev_map_delete_elem(struct bpf_map *map, void *key)
405 {
406 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
407 	struct bpf_dtab_netdev *old_dev;
408 	int k = *(u32 *)key;
409 
410 	if (k >= map->max_entries)
411 		return -EINVAL;
412 
413 	/* Use call_rcu() here to ensure any rcu critical sections have
414 	 * completed, but this does not guarantee a flush has happened
415 	 * yet. Because driver side rcu_read_lock/unlock only protects the
416 	 * running XDP program. However, for pending flush operations the
417 	 * dev and ctx are stored in another per cpu map. And additionally,
418 	 * the driver tear down ensures all soft irqs are complete before
419 	 * removing the net device in the case of dev_put equals zero.
420 	 */
421 	old_dev = xchg(&dtab->netdev_map[k], NULL);
422 	if (old_dev)
423 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
424 	return 0;
425 }
426 
427 static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
428 				u64 map_flags)
429 {
430 	struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
431 	struct net *net = current->nsproxy->net_ns;
432 	gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
433 	struct bpf_dtab_netdev *dev, *old_dev;
434 	u32 i = *(u32 *)key;
435 	u32 ifindex = *(u32 *)value;
436 
437 	if (unlikely(map_flags > BPF_EXIST))
438 		return -EINVAL;
439 	if (unlikely(i >= dtab->map.max_entries))
440 		return -E2BIG;
441 	if (unlikely(map_flags == BPF_NOEXIST))
442 		return -EEXIST;
443 
444 	if (!ifindex) {
445 		dev = NULL;
446 	} else {
447 		dev = kmalloc_node(sizeof(*dev), gfp, map->numa_node);
448 		if (!dev)
449 			return -ENOMEM;
450 
451 		dev->bulkq = __alloc_percpu_gfp(sizeof(*dev->bulkq),
452 						sizeof(void *), gfp);
453 		if (!dev->bulkq) {
454 			kfree(dev);
455 			return -ENOMEM;
456 		}
457 
458 		dev->dev = dev_get_by_index(net, ifindex);
459 		if (!dev->dev) {
460 			free_percpu(dev->bulkq);
461 			kfree(dev);
462 			return -EINVAL;
463 		}
464 
465 		dev->bit = i;
466 		dev->dtab = dtab;
467 	}
468 
469 	/* Use call_rcu() here to ensure rcu critical sections have completed
470 	 * Remembering the driver side flush operation will happen before the
471 	 * net device is removed.
472 	 */
473 	old_dev = xchg(&dtab->netdev_map[i], dev);
474 	if (old_dev)
475 		call_rcu(&old_dev->rcu, __dev_map_entry_free);
476 
477 	return 0;
478 }
479 
480 const struct bpf_map_ops dev_map_ops = {
481 	.map_alloc = dev_map_alloc,
482 	.map_free = dev_map_free,
483 	.map_get_next_key = dev_map_get_next_key,
484 	.map_lookup_elem = dev_map_lookup_elem,
485 	.map_update_elem = dev_map_update_elem,
486 	.map_delete_elem = dev_map_delete_elem,
487 };
488 
489 static int dev_map_notification(struct notifier_block *notifier,
490 				ulong event, void *ptr)
491 {
492 	struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
493 	struct bpf_dtab *dtab;
494 	int i;
495 
496 	switch (event) {
497 	case NETDEV_UNREGISTER:
498 		/* This rcu_read_lock/unlock pair is needed because
499 		 * dev_map_list is an RCU list AND to ensure a delete
500 		 * operation does not free a netdev_map entry while we
501 		 * are comparing it against the netdev being unregistered.
502 		 */
503 		rcu_read_lock();
504 		list_for_each_entry_rcu(dtab, &dev_map_list, list) {
505 			for (i = 0; i < dtab->map.max_entries; i++) {
506 				struct bpf_dtab_netdev *dev, *odev;
507 
508 				dev = READ_ONCE(dtab->netdev_map[i]);
509 				if (!dev ||
510 				    dev->dev->ifindex != netdev->ifindex)
511 					continue;
512 				odev = cmpxchg(&dtab->netdev_map[i], dev, NULL);
513 				if (dev == odev)
514 					call_rcu(&dev->rcu,
515 						 __dev_map_entry_free);
516 			}
517 		}
518 		rcu_read_unlock();
519 		break;
520 	default:
521 		break;
522 	}
523 	return NOTIFY_OK;
524 }
525 
526 static struct notifier_block dev_map_notifier = {
527 	.notifier_call = dev_map_notification,
528 };
529 
530 static int __init dev_map_init(void)
531 {
532 	/* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */
533 	BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) !=
534 		     offsetof(struct _bpf_dtab_netdev, dev));
535 	register_netdevice_notifier(&dev_map_notifier);
536 	return 0;
537 }
538 
539 subsys_initcall(dev_map_init);
540