xref: /linux/kernel/bpf/arena.c (revision e814f3fd16acfb7f9966773953de8f740a1e3202)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
3 #include <linux/bpf.h>
4 #include <linux/btf.h>
5 #include <linux/err.h>
6 #include "linux/filter.h"
7 #include <linux/btf_ids.h>
8 #include <linux/vmalloc.h>
9 #include <linux/pagemap.h>
10 #include "range_tree.h"
11 
12 /*
13  * bpf_arena is a sparsely populated shared memory region between bpf program and
14  * user space process.
15  *
16  * For example on x86-64 the values could be:
17  * user_vm_start 7f7d26200000     // picked by mmap()
18  * kern_vm_start ffffc90001e69000 // picked by get_vm_area()
19  * For user space all pointers within the arena are normal 8-byte addresses.
20  * In this example 7f7d26200000 is the address of the first page (pgoff=0).
21  * The bpf program will access it as: kern_vm_start + lower_32bit_of_user_ptr
22  * (u32)7f7d26200000 -> 26200000
23  * hence
24  * ffffc90001e69000 + 26200000 == ffffc90028069000 is "pgoff=0" within 4Gb
25  * kernel memory region.
26  *
27  * BPF JITs generate the following code to access arena:
28  *   mov eax, eax  // eax has lower 32-bit of user pointer
29  *   mov word ptr [rax + r12 + off], bx
30  * where r12 == kern_vm_start and off is s16.
31  * Hence allocate 4Gb + GUARD_SZ/2 on each side.
32  *
33  * Initially kernel vm_area and user vma are not populated.
34  * User space can fault-in any address which will insert the page
35  * into kernel and user vma.
36  * bpf program can allocate a page via bpf_arena_alloc_pages() kfunc
37  * which will insert it into kernel vm_area.
38  * The later fault-in from user space will populate that page into user vma.
39  */
40 
41 /* number of bytes addressable by LDX/STX insn with 16-bit 'off' field */
42 #define GUARD_SZ (1ull << sizeof_field(struct bpf_insn, off) * 8)
43 #define KERN_VM_SZ (SZ_4G + GUARD_SZ)
44 
45 struct bpf_arena {
46 	struct bpf_map map;
47 	u64 user_vm_start;
48 	u64 user_vm_end;
49 	struct vm_struct *kern_vm;
50 	struct range_tree rt;
51 	struct list_head vma_list;
52 	struct mutex lock;
53 };
54 
55 u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena)
56 {
57 	return arena ? (u64) (long) arena->kern_vm->addr + GUARD_SZ / 2 : 0;
58 }
59 
60 u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena)
61 {
62 	return arena ? arena->user_vm_start : 0;
63 }
64 
65 static long arena_map_peek_elem(struct bpf_map *map, void *value)
66 {
67 	return -EOPNOTSUPP;
68 }
69 
70 static long arena_map_push_elem(struct bpf_map *map, void *value, u64 flags)
71 {
72 	return -EOPNOTSUPP;
73 }
74 
75 static long arena_map_pop_elem(struct bpf_map *map, void *value)
76 {
77 	return -EOPNOTSUPP;
78 }
79 
80 static long arena_map_delete_elem(struct bpf_map *map, void *value)
81 {
82 	return -EOPNOTSUPP;
83 }
84 
85 static int arena_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
86 {
87 	return -EOPNOTSUPP;
88 }
89 
90 static long compute_pgoff(struct bpf_arena *arena, long uaddr)
91 {
92 	return (u32)(uaddr - (u32)arena->user_vm_start) >> PAGE_SHIFT;
93 }
94 
95 static struct bpf_map *arena_map_alloc(union bpf_attr *attr)
96 {
97 	struct vm_struct *kern_vm;
98 	int numa_node = bpf_map_attr_numa_node(attr);
99 	struct bpf_arena *arena;
100 	u64 vm_range;
101 	int err = -ENOMEM;
102 
103 	if (!bpf_jit_supports_arena())
104 		return ERR_PTR(-EOPNOTSUPP);
105 
106 	if (attr->key_size || attr->value_size || attr->max_entries == 0 ||
107 	    /* BPF_F_MMAPABLE must be set */
108 	    !(attr->map_flags & BPF_F_MMAPABLE) ||
109 	    /* No unsupported flags present */
110 	    (attr->map_flags & ~(BPF_F_SEGV_ON_FAULT | BPF_F_MMAPABLE | BPF_F_NO_USER_CONV)))
111 		return ERR_PTR(-EINVAL);
112 
113 	if (attr->map_extra & ~PAGE_MASK)
114 		/* If non-zero the map_extra is an expected user VMA start address */
115 		return ERR_PTR(-EINVAL);
116 
117 	vm_range = (u64)attr->max_entries * PAGE_SIZE;
118 	if (vm_range > SZ_4G)
119 		return ERR_PTR(-E2BIG);
120 
121 	if ((attr->map_extra >> 32) != ((attr->map_extra + vm_range - 1) >> 32))
122 		/* user vma must not cross 32-bit boundary */
123 		return ERR_PTR(-ERANGE);
124 
125 	kern_vm = get_vm_area(KERN_VM_SZ, VM_SPARSE | VM_USERMAP);
126 	if (!kern_vm)
127 		return ERR_PTR(-ENOMEM);
128 
129 	arena = bpf_map_area_alloc(sizeof(*arena), numa_node);
130 	if (!arena)
131 		goto err;
132 
133 	arena->kern_vm = kern_vm;
134 	arena->user_vm_start = attr->map_extra;
135 	if (arena->user_vm_start)
136 		arena->user_vm_end = arena->user_vm_start + vm_range;
137 
138 	INIT_LIST_HEAD(&arena->vma_list);
139 	bpf_map_init_from_attr(&arena->map, attr);
140 	range_tree_init(&arena->rt);
141 	err = range_tree_set(&arena->rt, 0, attr->max_entries);
142 	if (err) {
143 		bpf_map_area_free(arena);
144 		goto err;
145 	}
146 	mutex_init(&arena->lock);
147 
148 	return &arena->map;
149 err:
150 	free_vm_area(kern_vm);
151 	return ERR_PTR(err);
152 }
153 
154 static int existing_page_cb(pte_t *ptep, unsigned long addr, void *data)
155 {
156 	struct page *page;
157 	pte_t pte;
158 
159 	pte = ptep_get(ptep);
160 	if (!pte_present(pte)) /* sanity check */
161 		return 0;
162 	page = pte_page(pte);
163 	/*
164 	 * We do not update pte here:
165 	 * 1. Nobody should be accessing bpf_arena's range outside of a kernel bug
166 	 * 2. TLB flushing is batched or deferred. Even if we clear pte,
167 	 * the TLB entries can stick around and continue to permit access to
168 	 * the freed page. So it all relies on 1.
169 	 */
170 	__free_page(page);
171 	return 0;
172 }
173 
174 static void arena_map_free(struct bpf_map *map)
175 {
176 	struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
177 
178 	/*
179 	 * Check that user vma-s are not around when bpf map is freed.
180 	 * mmap() holds vm_file which holds bpf_map refcnt.
181 	 * munmap() must have happened on vma followed by arena_vm_close()
182 	 * which would clear arena->vma_list.
183 	 */
184 	if (WARN_ON_ONCE(!list_empty(&arena->vma_list)))
185 		return;
186 
187 	/*
188 	 * free_vm_area() calls remove_vm_area() that calls free_unmap_vmap_area().
189 	 * It unmaps everything from vmalloc area and clears pgtables.
190 	 * Call apply_to_existing_page_range() first to find populated ptes and
191 	 * free those pages.
192 	 */
193 	apply_to_existing_page_range(&init_mm, bpf_arena_get_kern_vm_start(arena),
194 				     KERN_VM_SZ - GUARD_SZ, existing_page_cb, NULL);
195 	free_vm_area(arena->kern_vm);
196 	range_tree_destroy(&arena->rt);
197 	bpf_map_area_free(arena);
198 }
199 
200 static void *arena_map_lookup_elem(struct bpf_map *map, void *key)
201 {
202 	return ERR_PTR(-EINVAL);
203 }
204 
205 static long arena_map_update_elem(struct bpf_map *map, void *key,
206 				  void *value, u64 flags)
207 {
208 	return -EOPNOTSUPP;
209 }
210 
211 static int arena_map_check_btf(const struct bpf_map *map, const struct btf *btf,
212 			       const struct btf_type *key_type, const struct btf_type *value_type)
213 {
214 	return 0;
215 }
216 
217 static u64 arena_map_mem_usage(const struct bpf_map *map)
218 {
219 	return 0;
220 }
221 
222 struct vma_list {
223 	struct vm_area_struct *vma;
224 	struct list_head head;
225 	refcount_t mmap_count;
226 };
227 
228 static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
229 {
230 	struct vma_list *vml;
231 
232 	vml = kmalloc(sizeof(*vml), GFP_KERNEL);
233 	if (!vml)
234 		return -ENOMEM;
235 	refcount_set(&vml->mmap_count, 1);
236 	vma->vm_private_data = vml;
237 	vml->vma = vma;
238 	list_add(&vml->head, &arena->vma_list);
239 	return 0;
240 }
241 
242 static void arena_vm_open(struct vm_area_struct *vma)
243 {
244 	struct vma_list *vml = vma->vm_private_data;
245 
246 	refcount_inc(&vml->mmap_count);
247 }
248 
249 static void arena_vm_close(struct vm_area_struct *vma)
250 {
251 	struct bpf_map *map = vma->vm_file->private_data;
252 	struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
253 	struct vma_list *vml = vma->vm_private_data;
254 
255 	if (!refcount_dec_and_test(&vml->mmap_count))
256 		return;
257 	guard(mutex)(&arena->lock);
258 	/* update link list under lock */
259 	list_del(&vml->head);
260 	vma->vm_private_data = NULL;
261 	kfree(vml);
262 }
263 
264 static vm_fault_t arena_vm_fault(struct vm_fault *vmf)
265 {
266 	struct bpf_map *map = vmf->vma->vm_file->private_data;
267 	struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
268 	struct page *page;
269 	long kbase, kaddr;
270 	int ret;
271 
272 	kbase = bpf_arena_get_kern_vm_start(arena);
273 	kaddr = kbase + (u32)(vmf->address);
274 
275 	guard(mutex)(&arena->lock);
276 	page = vmalloc_to_page((void *)kaddr);
277 	if (page)
278 		/* already have a page vmap-ed */
279 		goto out;
280 
281 	if (arena->map.map_flags & BPF_F_SEGV_ON_FAULT)
282 		/* User space requested to segfault when page is not allocated by bpf prog */
283 		return VM_FAULT_SIGSEGV;
284 
285 	ret = range_tree_clear(&arena->rt, vmf->pgoff, 1);
286 	if (ret)
287 		return VM_FAULT_SIGSEGV;
288 
289 	/* Account into memcg of the process that created bpf_arena */
290 	ret = bpf_map_alloc_pages(map, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, 1, &page);
291 	if (ret) {
292 		range_tree_set(&arena->rt, vmf->pgoff, 1);
293 		return VM_FAULT_SIGSEGV;
294 	}
295 
296 	ret = vm_area_map_pages(arena->kern_vm, kaddr, kaddr + PAGE_SIZE, &page);
297 	if (ret) {
298 		range_tree_set(&arena->rt, vmf->pgoff, 1);
299 		__free_page(page);
300 		return VM_FAULT_SIGSEGV;
301 	}
302 out:
303 	page_ref_add(page, 1);
304 	vmf->page = page;
305 	return 0;
306 }
307 
308 static const struct vm_operations_struct arena_vm_ops = {
309 	.open		= arena_vm_open,
310 	.close		= arena_vm_close,
311 	.fault          = arena_vm_fault,
312 };
313 
314 static unsigned long arena_get_unmapped_area(struct file *filp, unsigned long addr,
315 					     unsigned long len, unsigned long pgoff,
316 					     unsigned long flags)
317 {
318 	struct bpf_map *map = filp->private_data;
319 	struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
320 	long ret;
321 
322 	if (pgoff)
323 		return -EINVAL;
324 	if (len > SZ_4G)
325 		return -E2BIG;
326 
327 	/* if user_vm_start was specified at arena creation time */
328 	if (arena->user_vm_start) {
329 		if (len > arena->user_vm_end - arena->user_vm_start)
330 			return -E2BIG;
331 		if (len != arena->user_vm_end - arena->user_vm_start)
332 			return -EINVAL;
333 		if (addr != arena->user_vm_start)
334 			return -EINVAL;
335 	}
336 
337 	ret = mm_get_unmapped_area(current->mm, filp, addr, len * 2, 0, flags);
338 	if (IS_ERR_VALUE(ret))
339 		return ret;
340 	if ((ret >> 32) == ((ret + len - 1) >> 32))
341 		return ret;
342 	if (WARN_ON_ONCE(arena->user_vm_start))
343 		/* checks at map creation time should prevent this */
344 		return -EFAULT;
345 	return round_up(ret, SZ_4G);
346 }
347 
348 static int arena_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
349 {
350 	struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
351 
352 	guard(mutex)(&arena->lock);
353 	if (arena->user_vm_start && arena->user_vm_start != vma->vm_start)
354 		/*
355 		 * If map_extra was not specified at arena creation time then
356 		 * 1st user process can do mmap(NULL, ...) to pick user_vm_start
357 		 * 2nd user process must pass the same addr to mmap(addr, MAP_FIXED..);
358 		 *   or
359 		 * specify addr in map_extra and
360 		 * use the same addr later with mmap(addr, MAP_FIXED..);
361 		 */
362 		return -EBUSY;
363 
364 	if (arena->user_vm_end && arena->user_vm_end != vma->vm_end)
365 		/* all user processes must have the same size of mmap-ed region */
366 		return -EBUSY;
367 
368 	/* Earlier checks should prevent this */
369 	if (WARN_ON_ONCE(vma->vm_end - vma->vm_start > SZ_4G || vma->vm_pgoff))
370 		return -EFAULT;
371 
372 	if (remember_vma(arena, vma))
373 		return -ENOMEM;
374 
375 	arena->user_vm_start = vma->vm_start;
376 	arena->user_vm_end = vma->vm_end;
377 	/*
378 	 * bpf_map_mmap() checks that it's being mmaped as VM_SHARED and
379 	 * clears VM_MAYEXEC. Set VM_DONTEXPAND as well to avoid
380 	 * potential change of user_vm_start.
381 	 */
382 	vm_flags_set(vma, VM_DONTEXPAND);
383 	vma->vm_ops = &arena_vm_ops;
384 	return 0;
385 }
386 
387 static int arena_map_direct_value_addr(const struct bpf_map *map, u64 *imm, u32 off)
388 {
389 	struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
390 
391 	if ((u64)off > arena->user_vm_end - arena->user_vm_start)
392 		return -ERANGE;
393 	*imm = (unsigned long)arena->user_vm_start;
394 	return 0;
395 }
396 
397 BTF_ID_LIST_SINGLE(bpf_arena_map_btf_ids, struct, bpf_arena)
398 const struct bpf_map_ops arena_map_ops = {
399 	.map_meta_equal = bpf_map_meta_equal,
400 	.map_alloc = arena_map_alloc,
401 	.map_free = arena_map_free,
402 	.map_direct_value_addr = arena_map_direct_value_addr,
403 	.map_mmap = arena_map_mmap,
404 	.map_get_unmapped_area = arena_get_unmapped_area,
405 	.map_get_next_key = arena_map_get_next_key,
406 	.map_push_elem = arena_map_push_elem,
407 	.map_peek_elem = arena_map_peek_elem,
408 	.map_pop_elem = arena_map_pop_elem,
409 	.map_lookup_elem = arena_map_lookup_elem,
410 	.map_update_elem = arena_map_update_elem,
411 	.map_delete_elem = arena_map_delete_elem,
412 	.map_check_btf = arena_map_check_btf,
413 	.map_mem_usage = arena_map_mem_usage,
414 	.map_btf_id = &bpf_arena_map_btf_ids[0],
415 };
416 
417 static u64 clear_lo32(u64 val)
418 {
419 	return val & ~(u64)~0U;
420 }
421 
422 /*
423  * Allocate pages and vmap them into kernel vmalloc area.
424  * Later the pages will be mmaped into user space vma.
425  */
426 static long arena_alloc_pages(struct bpf_arena *arena, long uaddr, long page_cnt, int node_id)
427 {
428 	/* user_vm_end/start are fixed before bpf prog runs */
429 	long page_cnt_max = (arena->user_vm_end - arena->user_vm_start) >> PAGE_SHIFT;
430 	u64 kern_vm_start = bpf_arena_get_kern_vm_start(arena);
431 	struct page **pages;
432 	long pgoff = 0;
433 	u32 uaddr32;
434 	int ret, i;
435 
436 	if (page_cnt > page_cnt_max)
437 		return 0;
438 
439 	if (uaddr) {
440 		if (uaddr & ~PAGE_MASK)
441 			return 0;
442 		pgoff = compute_pgoff(arena, uaddr);
443 		if (pgoff > page_cnt_max - page_cnt)
444 			/* requested address will be outside of user VMA */
445 			return 0;
446 	}
447 
448 	/* zeroing is needed, since alloc_pages_bulk_array() only fills in non-zero entries */
449 	pages = kvcalloc(page_cnt, sizeof(struct page *), GFP_KERNEL);
450 	if (!pages)
451 		return 0;
452 
453 	guard(mutex)(&arena->lock);
454 
455 	if (uaddr) {
456 		ret = is_range_tree_set(&arena->rt, pgoff, page_cnt);
457 		if (ret)
458 			goto out_free_pages;
459 		ret = range_tree_clear(&arena->rt, pgoff, page_cnt);
460 	} else {
461 		ret = pgoff = range_tree_find(&arena->rt, page_cnt);
462 		if (pgoff >= 0)
463 			ret = range_tree_clear(&arena->rt, pgoff, page_cnt);
464 	}
465 	if (ret)
466 		goto out_free_pages;
467 
468 	ret = bpf_map_alloc_pages(&arena->map, GFP_KERNEL | __GFP_ZERO,
469 				  node_id, page_cnt, pages);
470 	if (ret)
471 		goto out;
472 
473 	uaddr32 = (u32)(arena->user_vm_start + pgoff * PAGE_SIZE);
474 	/* Earlier checks made sure that uaddr32 + page_cnt * PAGE_SIZE - 1
475 	 * will not overflow 32-bit. Lower 32-bit need to represent
476 	 * contiguous user address range.
477 	 * Map these pages at kern_vm_start base.
478 	 * kern_vm_start + uaddr32 + page_cnt * PAGE_SIZE - 1 can overflow
479 	 * lower 32-bit and it's ok.
480 	 */
481 	ret = vm_area_map_pages(arena->kern_vm, kern_vm_start + uaddr32,
482 				kern_vm_start + uaddr32 + page_cnt * PAGE_SIZE, pages);
483 	if (ret) {
484 		for (i = 0; i < page_cnt; i++)
485 			__free_page(pages[i]);
486 		goto out;
487 	}
488 	kvfree(pages);
489 	return clear_lo32(arena->user_vm_start) + uaddr32;
490 out:
491 	range_tree_set(&arena->rt, pgoff, page_cnt);
492 out_free_pages:
493 	kvfree(pages);
494 	return 0;
495 }
496 
497 /*
498  * If page is present in vmalloc area, unmap it from vmalloc area,
499  * unmap it from all user space vma-s,
500  * and free it.
501  */
502 static void zap_pages(struct bpf_arena *arena, long uaddr, long page_cnt)
503 {
504 	struct vma_list *vml;
505 
506 	list_for_each_entry(vml, &arena->vma_list, head)
507 		zap_page_range_single(vml->vma, uaddr,
508 				      PAGE_SIZE * page_cnt, NULL);
509 }
510 
511 static void arena_free_pages(struct bpf_arena *arena, long uaddr, long page_cnt)
512 {
513 	u64 full_uaddr, uaddr_end;
514 	long kaddr, pgoff, i;
515 	struct page *page;
516 
517 	/* only aligned lower 32-bit are relevant */
518 	uaddr = (u32)uaddr;
519 	uaddr &= PAGE_MASK;
520 	full_uaddr = clear_lo32(arena->user_vm_start) + uaddr;
521 	uaddr_end = min(arena->user_vm_end, full_uaddr + (page_cnt << PAGE_SHIFT));
522 	if (full_uaddr >= uaddr_end)
523 		return;
524 
525 	page_cnt = (uaddr_end - full_uaddr) >> PAGE_SHIFT;
526 
527 	guard(mutex)(&arena->lock);
528 
529 	pgoff = compute_pgoff(arena, uaddr);
530 	/* clear range */
531 	range_tree_set(&arena->rt, pgoff, page_cnt);
532 
533 	if (page_cnt > 1)
534 		/* bulk zap if multiple pages being freed */
535 		zap_pages(arena, full_uaddr, page_cnt);
536 
537 	kaddr = bpf_arena_get_kern_vm_start(arena) + uaddr;
538 	for (i = 0; i < page_cnt; i++, kaddr += PAGE_SIZE, full_uaddr += PAGE_SIZE) {
539 		page = vmalloc_to_page((void *)kaddr);
540 		if (!page)
541 			continue;
542 		if (page_cnt == 1 && page_mapped(page)) /* mapped by some user process */
543 			/* Optimization for the common case of page_cnt==1:
544 			 * If page wasn't mapped into some user vma there
545 			 * is no need to call zap_pages which is slow. When
546 			 * page_cnt is big it's faster to do the batched zap.
547 			 */
548 			zap_pages(arena, full_uaddr, 1);
549 		vm_area_unmap_pages(arena->kern_vm, kaddr, kaddr + PAGE_SIZE);
550 		__free_page(page);
551 	}
552 }
553 
554 __bpf_kfunc_start_defs();
555 
556 __bpf_kfunc void *bpf_arena_alloc_pages(void *p__map, void *addr__ign, u32 page_cnt,
557 					int node_id, u64 flags)
558 {
559 	struct bpf_map *map = p__map;
560 	struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
561 
562 	if (map->map_type != BPF_MAP_TYPE_ARENA || flags || !page_cnt)
563 		return NULL;
564 
565 	return (void *)arena_alloc_pages(arena, (long)addr__ign, page_cnt, node_id);
566 }
567 
568 __bpf_kfunc void bpf_arena_free_pages(void *p__map, void *ptr__ign, u32 page_cnt)
569 {
570 	struct bpf_map *map = p__map;
571 	struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
572 
573 	if (map->map_type != BPF_MAP_TYPE_ARENA || !page_cnt || !ptr__ign)
574 		return;
575 	arena_free_pages(arena, (long)ptr__ign, page_cnt);
576 }
577 __bpf_kfunc_end_defs();
578 
579 BTF_KFUNCS_START(arena_kfuncs)
580 BTF_ID_FLAGS(func, bpf_arena_alloc_pages, KF_TRUSTED_ARGS | KF_SLEEPABLE)
581 BTF_ID_FLAGS(func, bpf_arena_free_pages, KF_TRUSTED_ARGS | KF_SLEEPABLE)
582 BTF_KFUNCS_END(arena_kfuncs)
583 
584 static const struct btf_kfunc_id_set common_kfunc_set = {
585 	.owner = THIS_MODULE,
586 	.set   = &arena_kfuncs,
587 };
588 
589 static int __init kfunc_init(void)
590 {
591 	return register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set);
592 }
593 late_initcall(kfunc_init);
594