Lines Matching defs:arena

17  * For user space all pointers within the arena are normal 8-byte addresses.
25 * BPF JITs generate the following code to access arena:
53 u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena)
55 return arena ? (u64) (long) arena->kern_vm->addr + GUARD_SZ / 2 : 0;
58 u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena)
60 return arena ? arena->user_vm_start : 0;
88 static long compute_pgoff(struct bpf_arena *arena, long uaddr)
90 return (u32)(uaddr - (u32)arena->user_vm_start) >> PAGE_SHIFT;
97 struct bpf_arena *arena;
124 arena = bpf_map_area_alloc(sizeof(*arena), numa_node);
125 if (!arena)
128 arena->kern_vm = kern_vm;
129 arena->user_vm_start = attr->map_extra;
130 if (arena->user_vm_start)
131 arena->user_vm_end = arena->user_vm_start + vm_range;
133 INIT_LIST_HEAD(&arena->vma_list);
134 bpf_map_init_from_attr(&arena->map, attr);
135 mt_init_flags(&arena->mt, MT_FLAGS_ALLOC_RANGE);
136 mutex_init(&arena->lock);
138 return &arena->map;
166 struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
172 * which would clear arena->vma_list.
174 if (WARN_ON_ONCE(!list_empty(&arena->vma_list)))
183 apply_to_existing_page_range(&init_mm, bpf_arena_get_kern_vm_start(arena),
185 free_vm_area(arena->kern_vm);
186 mtree_destroy(&arena->mt);
187 bpf_map_area_free(arena);
218 static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
228 list_add(&vml->head, &arena->vma_list);
242 struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
247 guard(mutex)(&arena->lock);
259 struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
264 kbase = bpf_arena_get_kern_vm_start(arena);
267 guard(mutex)(&arena->lock);
273 if (arena->map.map_flags & BPF_F_SEGV_ON_FAULT)
277 ret = mtree_insert(&arena->mt, vmf->pgoff, MT_ENTRY, GFP_KERNEL);
284 mtree_erase(&arena->mt, vmf->pgoff);
288 ret = vm_area_map_pages(arena->kern_vm, kaddr, kaddr + PAGE_SIZE, &page);
290 mtree_erase(&arena->mt, vmf->pgoff);
311 struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
319 /* if user_vm_start was specified at arena creation time */
320 if (arena->user_vm_start) {
321 if (len > arena->user_vm_end - arena->user_vm_start)
323 if (len != arena->user_vm_end - arena->user_vm_start)
325 if (addr != arena->user_vm_start)
334 if (WARN_ON_ONCE(arena->user_vm_start))
342 struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
344 guard(mutex)(&arena->lock);
345 if (arena->user_vm_start && arena->user_vm_start != vma->vm_start)
347 * If map_extra was not specified at arena creation time then
356 if (arena->user_vm_end && arena->user_vm_end != vma->vm_end)
364 if (remember_vma(arena, vma))
367 arena->user_vm_start = vma->vm_start;
368 arena->user_vm_end = vma->vm_end;
381 struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
383 if ((u64)off > arena->user_vm_end - arena->user_vm_start)
385 *imm = (unsigned long)arena->user_vm_start;
418 static long arena_alloc_pages(struct bpf_arena *arena, long uaddr, long page_cnt, int node_id)
421 long page_cnt_max = (arena->user_vm_end - arena->user_vm_start) >> PAGE_SHIFT;
422 u64 kern_vm_start = bpf_arena_get_kern_vm_start(arena);
434 pgoff = compute_pgoff(arena, uaddr);
445 guard(mutex)(&arena->lock);
448 ret = mtree_insert_range(&arena->mt, pgoff, pgoff + page_cnt - 1,
451 ret = mtree_alloc_range(&arena->mt, &pgoff, MT_ENTRY,
456 ret = bpf_map_alloc_pages(&arena->map, GFP_KERNEL | __GFP_ZERO,
461 uaddr32 = (u32)(arena->user_vm_start + pgoff * PAGE_SIZE);
469 ret = vm_area_map_pages(arena->kern_vm, kern_vm_start + uaddr32,
477 return clear_lo32(arena->user_vm_start) + uaddr32;
479 mtree_erase(&arena->mt, pgoff);
490 static void zap_pages(struct bpf_arena *arena, long uaddr, long page_cnt)
494 list_for_each_entry(vml, &arena->vma_list, head)
499 static void arena_free_pages(struct bpf_arena *arena, long uaddr, long page_cnt)
508 full_uaddr = clear_lo32(arena->user_vm_start) + uaddr;
509 uaddr_end = min(arena->user_vm_end, full_uaddr + (page_cnt << PAGE_SHIFT));
515 guard(mutex)(&arena->lock);
517 pgoff = compute_pgoff(arena, uaddr);
519 mtree_store_range(&arena->mt, pgoff, pgoff + page_cnt - 1, NULL, GFP_KERNEL);
523 zap_pages(arena, full_uaddr, page_cnt);
525 kaddr = bpf_arena_get_kern_vm_start(arena) + uaddr;
536 zap_pages(arena, full_uaddr, 1);
537 vm_area_unmap_pages(arena->kern_vm, kaddr, kaddr + PAGE_SIZE);
548 struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
553 return (void *)arena_alloc_pages(arena, (long)addr__ign, page_cnt, node_id);
559 struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
563 arena_free_pages(arena, (long)ptr__ign, page_cnt);