xref: /linux/kernel/bpf/arena.c (revision da51bbcdbace8f43adf6066934c3926b656376e5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
3 #include <linux/bpf.h>
4 #include <linux/btf.h>
5 #include <linux/err.h>
6 #include <linux/btf_ids.h>
7 #include <linux/vmalloc.h>
8 #include <linux/pagemap.h>
9 
10 /*
11  * bpf_arena is a sparsely populated shared memory region between bpf program and
12  * user space process.
13  *
14  * For example on x86-64 the values could be:
15  * user_vm_start 7f7d26200000     // picked by mmap()
16  * kern_vm_start ffffc90001e69000 // picked by get_vm_area()
17  * For user space all pointers within the arena are normal 8-byte addresses.
18  * In this example 7f7d26200000 is the address of the first page (pgoff=0).
19  * The bpf program will access it as: kern_vm_start + lower_32bit_of_user_ptr
20  * (u32)7f7d26200000 -> 26200000
21  * hence
22  * ffffc90001e69000 + 26200000 == ffffc90028069000 is "pgoff=0" within 4Gb
23  * kernel memory region.
24  *
25  * BPF JITs generate the following code to access arena:
26  *   mov eax, eax  // eax has lower 32-bit of user pointer
27  *   mov word ptr [rax + r12 + off], bx
28  * where r12 == kern_vm_start and off is s16.
29  * Hence allocate 4Gb + GUARD_SZ/2 on each side.
30  *
31  * Initially kernel vm_area and user vma are not populated.
32  * User space can fault-in any address which will insert the page
33  * into kernel and user vma.
34  * bpf program can allocate a page via bpf_arena_alloc_pages() kfunc
35  * which will insert it into kernel vm_area.
36  * The later fault-in from user space will populate that page into user vma.
37  */
38 
39 /* number of bytes addressable by LDX/STX insn with 16-bit 'off' field */
40 #define GUARD_SZ (1ull << sizeof(((struct bpf_insn *)0)->off) * 8)
41 #define KERN_VM_SZ ((1ull << 32) + GUARD_SZ)
42 
43 struct bpf_arena {
44 	struct bpf_map map;
45 	u64 user_vm_start;
46 	u64 user_vm_end;
47 	struct vm_struct *kern_vm;
48 	struct maple_tree mt;
49 	struct list_head vma_list;
50 	struct mutex lock;
51 };
52 
53 u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena)
54 {
55 	return arena ? (u64) (long) arena->kern_vm->addr + GUARD_SZ / 2 : 0;
56 }
57 
58 u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena)
59 {
60 	return arena ? arena->user_vm_start : 0;
61 }
62 
63 static long arena_map_peek_elem(struct bpf_map *map, void *value)
64 {
65 	return -EOPNOTSUPP;
66 }
67 
68 static long arena_map_push_elem(struct bpf_map *map, void *value, u64 flags)
69 {
70 	return -EOPNOTSUPP;
71 }
72 
73 static long arena_map_pop_elem(struct bpf_map *map, void *value)
74 {
75 	return -EOPNOTSUPP;
76 }
77 
78 static long arena_map_delete_elem(struct bpf_map *map, void *value)
79 {
80 	return -EOPNOTSUPP;
81 }
82 
83 static int arena_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
84 {
85 	return -EOPNOTSUPP;
86 }
87 
88 static long compute_pgoff(struct bpf_arena *arena, long uaddr)
89 {
90 	return (u32)(uaddr - (u32)arena->user_vm_start) >> PAGE_SHIFT;
91 }
92 
93 static struct bpf_map *arena_map_alloc(union bpf_attr *attr)
94 {
95 	struct vm_struct *kern_vm;
96 	int numa_node = bpf_map_attr_numa_node(attr);
97 	struct bpf_arena *arena;
98 	u64 vm_range;
99 	int err = -ENOMEM;
100 
101 	if (attr->key_size || attr->value_size || attr->max_entries == 0 ||
102 	    /* BPF_F_MMAPABLE must be set */
103 	    !(attr->map_flags & BPF_F_MMAPABLE) ||
104 	    /* No unsupported flags present */
105 	    (attr->map_flags & ~(BPF_F_SEGV_ON_FAULT | BPF_F_MMAPABLE | BPF_F_NO_USER_CONV)))
106 		return ERR_PTR(-EINVAL);
107 
108 	if (attr->map_extra & ~PAGE_MASK)
109 		/* If non-zero the map_extra is an expected user VMA start address */
110 		return ERR_PTR(-EINVAL);
111 
112 	vm_range = (u64)attr->max_entries * PAGE_SIZE;
113 	if (vm_range > (1ull << 32))
114 		return ERR_PTR(-E2BIG);
115 
116 	if ((attr->map_extra >> 32) != ((attr->map_extra + vm_range - 1) >> 32))
117 		/* user vma must not cross 32-bit boundary */
118 		return ERR_PTR(-ERANGE);
119 
120 	kern_vm = get_vm_area(KERN_VM_SZ, VM_SPARSE | VM_USERMAP);
121 	if (!kern_vm)
122 		return ERR_PTR(-ENOMEM);
123 
124 	arena = bpf_map_area_alloc(sizeof(*arena), numa_node);
125 	if (!arena)
126 		goto err;
127 
128 	arena->kern_vm = kern_vm;
129 	arena->user_vm_start = attr->map_extra;
130 	if (arena->user_vm_start)
131 		arena->user_vm_end = arena->user_vm_start + vm_range;
132 
133 	INIT_LIST_HEAD(&arena->vma_list);
134 	bpf_map_init_from_attr(&arena->map, attr);
135 	mt_init_flags(&arena->mt, MT_FLAGS_ALLOC_RANGE);
136 	mutex_init(&arena->lock);
137 
138 	return &arena->map;
139 err:
140 	free_vm_area(kern_vm);
141 	return ERR_PTR(err);
142 }
143 
144 static int existing_page_cb(pte_t *ptep, unsigned long addr, void *data)
145 {
146 	struct page *page;
147 	pte_t pte;
148 
149 	pte = ptep_get(ptep);
150 	if (!pte_present(pte)) /* sanity check */
151 		return 0;
152 	page = pte_page(pte);
153 	/*
154 	 * We do not update pte here:
155 	 * 1. Nobody should be accessing bpf_arena's range outside of a kernel bug
156 	 * 2. TLB flushing is batched or deferred. Even if we clear pte,
157 	 * the TLB entries can stick around and continue to permit access to
158 	 * the freed page. So it all relies on 1.
159 	 */
160 	__free_page(page);
161 	return 0;
162 }
163 
164 static void arena_map_free(struct bpf_map *map)
165 {
166 	struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
167 
168 	/*
169 	 * Check that user vma-s are not around when bpf map is freed.
170 	 * mmap() holds vm_file which holds bpf_map refcnt.
171 	 * munmap() must have happened on vma followed by arena_vm_close()
172 	 * which would clear arena->vma_list.
173 	 */
174 	if (WARN_ON_ONCE(!list_empty(&arena->vma_list)))
175 		return;
176 
177 	/*
178 	 * free_vm_area() calls remove_vm_area() that calls free_unmap_vmap_area().
179 	 * It unmaps everything from vmalloc area and clears pgtables.
180 	 * Call apply_to_existing_page_range() first to find populated ptes and
181 	 * free those pages.
182 	 */
183 	apply_to_existing_page_range(&init_mm, bpf_arena_get_kern_vm_start(arena),
184 				     KERN_VM_SZ - GUARD_SZ, existing_page_cb, NULL);
185 	free_vm_area(arena->kern_vm);
186 	mtree_destroy(&arena->mt);
187 	bpf_map_area_free(arena);
188 }
189 
190 static void *arena_map_lookup_elem(struct bpf_map *map, void *key)
191 {
192 	return ERR_PTR(-EINVAL);
193 }
194 
195 static long arena_map_update_elem(struct bpf_map *map, void *key,
196 				  void *value, u64 flags)
197 {
198 	return -EOPNOTSUPP;
199 }
200 
201 static int arena_map_check_btf(const struct bpf_map *map, const struct btf *btf,
202 			       const struct btf_type *key_type, const struct btf_type *value_type)
203 {
204 	return 0;
205 }
206 
207 static u64 arena_map_mem_usage(const struct bpf_map *map)
208 {
209 	return 0;
210 }
211 
212 struct vma_list {
213 	struct vm_area_struct *vma;
214 	struct list_head head;
215 };
216 
217 static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
218 {
219 	struct vma_list *vml;
220 
221 	vml = kmalloc(sizeof(*vml), GFP_KERNEL);
222 	if (!vml)
223 		return -ENOMEM;
224 	vma->vm_private_data = vml;
225 	vml->vma = vma;
226 	list_add(&vml->head, &arena->vma_list);
227 	return 0;
228 }
229 
230 static void arena_vm_close(struct vm_area_struct *vma)
231 {
232 	struct bpf_map *map = vma->vm_file->private_data;
233 	struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
234 	struct vma_list *vml;
235 
236 	guard(mutex)(&arena->lock);
237 	vml = vma->vm_private_data;
238 	list_del(&vml->head);
239 	vma->vm_private_data = NULL;
240 	kfree(vml);
241 }
242 
243 #define MT_ENTRY ((void *)&arena_map_ops) /* unused. has to be valid pointer */
244 
245 static vm_fault_t arena_vm_fault(struct vm_fault *vmf)
246 {
247 	struct bpf_map *map = vmf->vma->vm_file->private_data;
248 	struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
249 	struct page *page;
250 	long kbase, kaddr;
251 	int ret;
252 
253 	kbase = bpf_arena_get_kern_vm_start(arena);
254 	kaddr = kbase + (u32)(vmf->address & PAGE_MASK);
255 
256 	guard(mutex)(&arena->lock);
257 	page = vmalloc_to_page((void *)kaddr);
258 	if (page)
259 		/* already have a page vmap-ed */
260 		goto out;
261 
262 	if (arena->map.map_flags & BPF_F_SEGV_ON_FAULT)
263 		/* User space requested to segfault when page is not allocated by bpf prog */
264 		return VM_FAULT_SIGSEGV;
265 
266 	ret = mtree_insert(&arena->mt, vmf->pgoff, MT_ENTRY, GFP_KERNEL);
267 	if (ret)
268 		return VM_FAULT_SIGSEGV;
269 
270 	/* Account into memcg of the process that created bpf_arena */
271 	ret = bpf_map_alloc_pages(map, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, 1, &page);
272 	if (ret) {
273 		mtree_erase(&arena->mt, vmf->pgoff);
274 		return VM_FAULT_SIGSEGV;
275 	}
276 
277 	ret = vm_area_map_pages(arena->kern_vm, kaddr, kaddr + PAGE_SIZE, &page);
278 	if (ret) {
279 		mtree_erase(&arena->mt, vmf->pgoff);
280 		__free_page(page);
281 		return VM_FAULT_SIGSEGV;
282 	}
283 out:
284 	page_ref_add(page, 1);
285 	vmf->page = page;
286 	return 0;
287 }
288 
289 static const struct vm_operations_struct arena_vm_ops = {
290 	.close		= arena_vm_close,
291 	.fault          = arena_vm_fault,
292 };
293 
294 static unsigned long arena_get_unmapped_area(struct file *filp, unsigned long addr,
295 					     unsigned long len, unsigned long pgoff,
296 					     unsigned long flags)
297 {
298 	struct bpf_map *map = filp->private_data;
299 	struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
300 	long ret;
301 
302 	if (pgoff)
303 		return -EINVAL;
304 	if (len > (1ull << 32))
305 		return -E2BIG;
306 
307 	/* if user_vm_start was specified at arena creation time */
308 	if (arena->user_vm_start) {
309 		if (len > arena->user_vm_end - arena->user_vm_start)
310 			return -E2BIG;
311 		if (len != arena->user_vm_end - arena->user_vm_start)
312 			return -EINVAL;
313 		if (addr != arena->user_vm_start)
314 			return -EINVAL;
315 	}
316 
317 	ret = current->mm->get_unmapped_area(filp, addr, len * 2, 0, flags);
318 	if (IS_ERR_VALUE(ret))
319 		return ret;
320 	if ((ret >> 32) == ((ret + len - 1) >> 32))
321 		return ret;
322 	if (WARN_ON_ONCE(arena->user_vm_start))
323 		/* checks at map creation time should prevent this */
324 		return -EFAULT;
325 	return round_up(ret, 1ull << 32);
326 }
327 
328 static int arena_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
329 {
330 	struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
331 
332 	guard(mutex)(&arena->lock);
333 	if (arena->user_vm_start && arena->user_vm_start != vma->vm_start)
334 		/*
335 		 * If map_extra was not specified at arena creation time then
336 		 * 1st user process can do mmap(NULL, ...) to pick user_vm_start
337 		 * 2nd user process must pass the same addr to mmap(addr, MAP_FIXED..);
338 		 *   or
339 		 * specify addr in map_extra and
340 		 * use the same addr later with mmap(addr, MAP_FIXED..);
341 		 */
342 		return -EBUSY;
343 
344 	if (arena->user_vm_end && arena->user_vm_end != vma->vm_end)
345 		/* all user processes must have the same size of mmap-ed region */
346 		return -EBUSY;
347 
348 	/* Earlier checks should prevent this */
349 	if (WARN_ON_ONCE(vma->vm_end - vma->vm_start > (1ull << 32) || vma->vm_pgoff))
350 		return -EFAULT;
351 
352 	if (remember_vma(arena, vma))
353 		return -ENOMEM;
354 
355 	arena->user_vm_start = vma->vm_start;
356 	arena->user_vm_end = vma->vm_end;
357 	/*
358 	 * bpf_map_mmap() checks that it's being mmaped as VM_SHARED and
359 	 * clears VM_MAYEXEC. Set VM_DONTEXPAND as well to avoid
360 	 * potential change of user_vm_start.
361 	 */
362 	vm_flags_set(vma, VM_DONTEXPAND);
363 	vma->vm_ops = &arena_vm_ops;
364 	return 0;
365 }
366 
367 static int arena_map_direct_value_addr(const struct bpf_map *map, u64 *imm, u32 off)
368 {
369 	struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
370 
371 	if ((u64)off > arena->user_vm_end - arena->user_vm_start)
372 		return -ERANGE;
373 	*imm = (unsigned long)arena->user_vm_start;
374 	return 0;
375 }
376 
377 BTF_ID_LIST_SINGLE(bpf_arena_map_btf_ids, struct, bpf_arena)
378 const struct bpf_map_ops arena_map_ops = {
379 	.map_meta_equal = bpf_map_meta_equal,
380 	.map_alloc = arena_map_alloc,
381 	.map_free = arena_map_free,
382 	.map_direct_value_addr = arena_map_direct_value_addr,
383 	.map_mmap = arena_map_mmap,
384 	.map_get_unmapped_area = arena_get_unmapped_area,
385 	.map_get_next_key = arena_map_get_next_key,
386 	.map_push_elem = arena_map_push_elem,
387 	.map_peek_elem = arena_map_peek_elem,
388 	.map_pop_elem = arena_map_pop_elem,
389 	.map_lookup_elem = arena_map_lookup_elem,
390 	.map_update_elem = arena_map_update_elem,
391 	.map_delete_elem = arena_map_delete_elem,
392 	.map_check_btf = arena_map_check_btf,
393 	.map_mem_usage = arena_map_mem_usage,
394 	.map_btf_id = &bpf_arena_map_btf_ids[0],
395 };
396 
397 static u64 clear_lo32(u64 val)
398 {
399 	return val & ~(u64)~0U;
400 }
401 
402 /*
403  * Allocate pages and vmap them into kernel vmalloc area.
404  * Later the pages will be mmaped into user space vma.
405  */
406 static long arena_alloc_pages(struct bpf_arena *arena, long uaddr, long page_cnt, int node_id)
407 {
408 	/* user_vm_end/start are fixed before bpf prog runs */
409 	long page_cnt_max = (arena->user_vm_end - arena->user_vm_start) >> PAGE_SHIFT;
410 	u64 kern_vm_start = bpf_arena_get_kern_vm_start(arena);
411 	struct page **pages;
412 	long pgoff = 0;
413 	u32 uaddr32;
414 	int ret, i;
415 
416 	if (page_cnt > page_cnt_max)
417 		return 0;
418 
419 	if (uaddr) {
420 		if (uaddr & ~PAGE_MASK)
421 			return 0;
422 		pgoff = compute_pgoff(arena, uaddr);
423 		if (pgoff + page_cnt > page_cnt_max)
424 			/* requested address will be outside of user VMA */
425 			return 0;
426 	}
427 
428 	/* zeroing is needed, since alloc_pages_bulk_array() only fills in non-zero entries */
429 	pages = kvcalloc(page_cnt, sizeof(struct page *), GFP_KERNEL);
430 	if (!pages)
431 		return 0;
432 
433 	guard(mutex)(&arena->lock);
434 
435 	if (uaddr)
436 		ret = mtree_insert_range(&arena->mt, pgoff, pgoff + page_cnt - 1,
437 					 MT_ENTRY, GFP_KERNEL);
438 	else
439 		ret = mtree_alloc_range(&arena->mt, &pgoff, MT_ENTRY,
440 					page_cnt, 0, page_cnt_max - 1, GFP_KERNEL);
441 	if (ret)
442 		goto out_free_pages;
443 
444 	ret = bpf_map_alloc_pages(&arena->map, GFP_KERNEL | __GFP_ZERO,
445 				  node_id, page_cnt, pages);
446 	if (ret)
447 		goto out;
448 
449 	uaddr32 = (u32)(arena->user_vm_start + pgoff * PAGE_SIZE);
450 	/* Earlier checks make sure that uaddr32 + page_cnt * PAGE_SIZE will not overflow 32-bit */
451 	ret = vm_area_map_pages(arena->kern_vm, kern_vm_start + uaddr32,
452 				kern_vm_start + uaddr32 + page_cnt * PAGE_SIZE, pages);
453 	if (ret) {
454 		for (i = 0; i < page_cnt; i++)
455 			__free_page(pages[i]);
456 		goto out;
457 	}
458 	kvfree(pages);
459 	return clear_lo32(arena->user_vm_start) + uaddr32;
460 out:
461 	mtree_erase(&arena->mt, pgoff);
462 out_free_pages:
463 	kvfree(pages);
464 	return 0;
465 }
466 
467 /*
468  * If page is present in vmalloc area, unmap it from vmalloc area,
469  * unmap it from all user space vma-s,
470  * and free it.
471  */
472 static void zap_pages(struct bpf_arena *arena, long uaddr, long page_cnt)
473 {
474 	struct vma_list *vml;
475 
476 	list_for_each_entry(vml, &arena->vma_list, head)
477 		zap_page_range_single(vml->vma, uaddr,
478 				      PAGE_SIZE * page_cnt, NULL);
479 }
480 
481 static void arena_free_pages(struct bpf_arena *arena, long uaddr, long page_cnt)
482 {
483 	u64 full_uaddr, uaddr_end;
484 	long kaddr, pgoff, i;
485 	struct page *page;
486 
487 	/* only aligned lower 32-bit are relevant */
488 	uaddr = (u32)uaddr;
489 	uaddr &= PAGE_MASK;
490 	full_uaddr = clear_lo32(arena->user_vm_start) + uaddr;
491 	uaddr_end = min(arena->user_vm_end, full_uaddr + (page_cnt << PAGE_SHIFT));
492 	if (full_uaddr >= uaddr_end)
493 		return;
494 
495 	page_cnt = (uaddr_end - full_uaddr) >> PAGE_SHIFT;
496 
497 	guard(mutex)(&arena->lock);
498 
499 	pgoff = compute_pgoff(arena, uaddr);
500 	/* clear range */
501 	mtree_store_range(&arena->mt, pgoff, pgoff + page_cnt - 1, NULL, GFP_KERNEL);
502 
503 	if (page_cnt > 1)
504 		/* bulk zap if multiple pages being freed */
505 		zap_pages(arena, full_uaddr, page_cnt);
506 
507 	kaddr = bpf_arena_get_kern_vm_start(arena) + uaddr;
508 	for (i = 0; i < page_cnt; i++, kaddr += PAGE_SIZE, full_uaddr += PAGE_SIZE) {
509 		page = vmalloc_to_page((void *)kaddr);
510 		if (!page)
511 			continue;
512 		if (page_cnt == 1 && page_mapped(page)) /* mapped by some user process */
513 			zap_pages(arena, full_uaddr, 1);
514 		vm_area_unmap_pages(arena->kern_vm, kaddr, kaddr + PAGE_SIZE);
515 		__free_page(page);
516 	}
517 }
518 
519 __bpf_kfunc_start_defs();
520 
521 __bpf_kfunc void *bpf_arena_alloc_pages(void *p__map, void *addr__ign, u32 page_cnt,
522 					int node_id, u64 flags)
523 {
524 	struct bpf_map *map = p__map;
525 	struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
526 
527 	if (map->map_type != BPF_MAP_TYPE_ARENA || flags || !page_cnt)
528 		return NULL;
529 
530 	return (void *)arena_alloc_pages(arena, (long)addr__ign, page_cnt, node_id);
531 }
532 
533 __bpf_kfunc void bpf_arena_free_pages(void *p__map, void *ptr__ign, u32 page_cnt)
534 {
535 	struct bpf_map *map = p__map;
536 	struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
537 
538 	if (map->map_type != BPF_MAP_TYPE_ARENA || !page_cnt || !ptr__ign)
539 		return;
540 	arena_free_pages(arena, (long)ptr__ign, page_cnt);
541 }
542 __bpf_kfunc_end_defs();
543 
544 BTF_KFUNCS_START(arena_kfuncs)
545 BTF_ID_FLAGS(func, bpf_arena_alloc_pages, KF_TRUSTED_ARGS | KF_SLEEPABLE)
546 BTF_ID_FLAGS(func, bpf_arena_free_pages, KF_TRUSTED_ARGS | KF_SLEEPABLE)
547 BTF_KFUNCS_END(arena_kfuncs)
548 
549 static const struct btf_kfunc_id_set common_kfunc_set = {
550 	.owner = THIS_MODULE,
551 	.set   = &arena_kfuncs,
552 };
553 
554 static int __init kfunc_init(void)
555 {
556 	return register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set);
557 }
558 late_initcall(kfunc_init);
559