Lines Matching refs:arena
55 u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena) in bpf_arena_get_kern_vm_start() argument
57 return arena ? (u64) (long) arena->kern_vm->addr + GUARD_SZ / 2 : 0; in bpf_arena_get_kern_vm_start()
60 u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena) in bpf_arena_get_user_vm_start() argument
62 return arena ? arena->user_vm_start : 0; in bpf_arena_get_user_vm_start()
90 static long compute_pgoff(struct bpf_arena *arena, long uaddr) in compute_pgoff() argument
92 return (u32)(uaddr - (u32)arena->user_vm_start) >> PAGE_SHIFT; in compute_pgoff()
99 struct bpf_arena *arena; in arena_map_alloc() local
129 arena = bpf_map_area_alloc(sizeof(*arena), numa_node); in arena_map_alloc()
130 if (!arena) in arena_map_alloc()
133 arena->kern_vm = kern_vm; in arena_map_alloc()
134 arena->user_vm_start = attr->map_extra; in arena_map_alloc()
135 if (arena->user_vm_start) in arena_map_alloc()
136 arena->user_vm_end = arena->user_vm_start + vm_range; in arena_map_alloc()
138 INIT_LIST_HEAD(&arena->vma_list); in arena_map_alloc()
139 bpf_map_init_from_attr(&arena->map, attr); in arena_map_alloc()
140 range_tree_init(&arena->rt); in arena_map_alloc()
141 err = range_tree_set(&arena->rt, 0, attr->max_entries); in arena_map_alloc()
143 bpf_map_area_free(arena); in arena_map_alloc()
146 mutex_init(&arena->lock); in arena_map_alloc()
148 return &arena->map; in arena_map_alloc()
176 struct bpf_arena *arena = container_of(map, struct bpf_arena, map); in arena_map_free() local
184 if (WARN_ON_ONCE(!list_empty(&arena->vma_list))) in arena_map_free()
193 apply_to_existing_page_range(&init_mm, bpf_arena_get_kern_vm_start(arena), in arena_map_free()
195 free_vm_area(arena->kern_vm); in arena_map_free()
196 range_tree_destroy(&arena->rt); in arena_map_free()
197 bpf_map_area_free(arena); in arena_map_free()
228 static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma) in remember_vma() argument
238 list_add(&vml->head, &arena->vma_list); in remember_vma()
252 struct bpf_arena *arena = container_of(map, struct bpf_arena, map); in arena_vm_close() local
257 guard(mutex)(&arena->lock); in arena_vm_close()
267 struct bpf_arena *arena = container_of(map, struct bpf_arena, map); in arena_vm_fault() local
272 kbase = bpf_arena_get_kern_vm_start(arena); in arena_vm_fault()
275 guard(mutex)(&arena->lock); in arena_vm_fault()
281 if (arena->map.map_flags & BPF_F_SEGV_ON_FAULT) in arena_vm_fault()
285 ret = range_tree_clear(&arena->rt, vmf->pgoff, 1); in arena_vm_fault()
292 range_tree_set(&arena->rt, vmf->pgoff, 1); in arena_vm_fault()
296 ret = vm_area_map_pages(arena->kern_vm, kaddr, kaddr + PAGE_SIZE, &page); in arena_vm_fault()
298 range_tree_set(&arena->rt, vmf->pgoff, 1); in arena_vm_fault()
319 struct bpf_arena *arena = container_of(map, struct bpf_arena, map); in arena_get_unmapped_area() local
328 if (arena->user_vm_start) { in arena_get_unmapped_area()
329 if (len > arena->user_vm_end - arena->user_vm_start) in arena_get_unmapped_area()
331 if (len != arena->user_vm_end - arena->user_vm_start) in arena_get_unmapped_area()
333 if (addr != arena->user_vm_start) in arena_get_unmapped_area()
342 if (WARN_ON_ONCE(arena->user_vm_start)) in arena_get_unmapped_area()
350 struct bpf_arena *arena = container_of(map, struct bpf_arena, map); in arena_map_mmap() local
352 guard(mutex)(&arena->lock); in arena_map_mmap()
353 if (arena->user_vm_start && arena->user_vm_start != vma->vm_start) in arena_map_mmap()
364 if (arena->user_vm_end && arena->user_vm_end != vma->vm_end) in arena_map_mmap()
372 if (remember_vma(arena, vma)) in arena_map_mmap()
375 arena->user_vm_start = vma->vm_start; in arena_map_mmap()
376 arena->user_vm_end = vma->vm_end; in arena_map_mmap()
389 struct bpf_arena *arena = container_of(map, struct bpf_arena, map); in arena_map_direct_value_addr() local
391 if ((u64)off > arena->user_vm_end - arena->user_vm_start) in arena_map_direct_value_addr()
393 *imm = (unsigned long)arena->user_vm_start; in arena_map_direct_value_addr()
426 static long arena_alloc_pages(struct bpf_arena *arena, long uaddr, long page_cnt, int node_id) in arena_alloc_pages() argument
429 long page_cnt_max = (arena->user_vm_end - arena->user_vm_start) >> PAGE_SHIFT; in arena_alloc_pages()
430 u64 kern_vm_start = bpf_arena_get_kern_vm_start(arena); in arena_alloc_pages()
442 pgoff = compute_pgoff(arena, uaddr); in arena_alloc_pages()
453 guard(mutex)(&arena->lock); in arena_alloc_pages()
456 ret = is_range_tree_set(&arena->rt, pgoff, page_cnt); in arena_alloc_pages()
459 ret = range_tree_clear(&arena->rt, pgoff, page_cnt); in arena_alloc_pages()
461 ret = pgoff = range_tree_find(&arena->rt, page_cnt); in arena_alloc_pages()
463 ret = range_tree_clear(&arena->rt, pgoff, page_cnt); in arena_alloc_pages()
468 ret = bpf_map_alloc_pages(&arena->map, node_id, page_cnt, pages); in arena_alloc_pages()
472 uaddr32 = (u32)(arena->user_vm_start + pgoff * PAGE_SIZE); in arena_alloc_pages()
480 ret = vm_area_map_pages(arena->kern_vm, kern_vm_start + uaddr32, in arena_alloc_pages()
488 return clear_lo32(arena->user_vm_start) + uaddr32; in arena_alloc_pages()
490 range_tree_set(&arena->rt, pgoff, page_cnt); in arena_alloc_pages()
501 static void zap_pages(struct bpf_arena *arena, long uaddr, long page_cnt) in zap_pages() argument
505 list_for_each_entry(vml, &arena->vma_list, head) in zap_pages()
510 static void arena_free_pages(struct bpf_arena *arena, long uaddr, long page_cnt) in arena_free_pages() argument
519 full_uaddr = clear_lo32(arena->user_vm_start) + uaddr; in arena_free_pages()
520 uaddr_end = min(arena->user_vm_end, full_uaddr + (page_cnt << PAGE_SHIFT)); in arena_free_pages()
526 guard(mutex)(&arena->lock); in arena_free_pages()
528 pgoff = compute_pgoff(arena, uaddr); in arena_free_pages()
530 range_tree_set(&arena->rt, pgoff, page_cnt); in arena_free_pages()
534 zap_pages(arena, full_uaddr, page_cnt); in arena_free_pages()
536 kaddr = bpf_arena_get_kern_vm_start(arena) + uaddr; in arena_free_pages()
547 zap_pages(arena, full_uaddr, 1); in arena_free_pages()
548 vm_area_unmap_pages(arena->kern_vm, kaddr, kaddr + PAGE_SIZE); in arena_free_pages()
557 static int arena_reserve_pages(struct bpf_arena *arena, long uaddr, u32 page_cnt) in arena_reserve_pages() argument
559 long page_cnt_max = (arena->user_vm_end - arena->user_vm_start) >> PAGE_SHIFT; in arena_reserve_pages()
566 pgoff = compute_pgoff(arena, uaddr); in arena_reserve_pages()
570 guard(mutex)(&arena->lock); in arena_reserve_pages()
573 ret = is_range_tree_set(&arena->rt, pgoff, page_cnt); in arena_reserve_pages()
578 return range_tree_clear(&arena->rt, pgoff, page_cnt); in arena_reserve_pages()
587 struct bpf_arena *arena = container_of(map, struct bpf_arena, map); in bpf_arena_alloc_pages() local
592 return (void *)arena_alloc_pages(arena, (long)addr__ign, page_cnt, node_id); in bpf_arena_alloc_pages()
598 struct bpf_arena *arena = container_of(map, struct bpf_arena, map); in bpf_arena_free_pages() local
602 arena_free_pages(arena, (long)ptr__ign, page_cnt); in bpf_arena_free_pages()
608 struct bpf_arena *arena = container_of(map, struct bpf_arena, map); in bpf_arena_reserve_pages() local
616 return arena_reserve_pages(arena, (long)ptr__ign, page_cnt); in bpf_arena_reserve_pages()