| /linux/kernel/bpf/ |
| H A D | arena.c | 55 u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena) in bpf_arena_get_kern_vm_start() argument 57 return arena ? (u64) (long) arena->kern_vm->addr + GUARD_SZ / 2 : 0; in bpf_arena_get_kern_vm_start() 60 u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena) in bpf_arena_get_user_vm_start() argument 62 return arena ? arena->user_vm_start : 0; in bpf_arena_get_user_vm_start() 90 static long compute_pgoff(struct bpf_arena *arena, long uaddr) in compute_pgoff() argument 92 return (u32)(uaddr - (u32)arena->user_vm_start) >> PAGE_SHIFT; in compute_pgoff() 99 struct bpf_arena *arena; in arena_map_alloc() local 129 arena = bpf_map_area_alloc(sizeof(*arena), numa_node); in arena_map_alloc() 130 if (!arena) in arena_map_alloc() 133 arena->kern_vm = kern_vm; in arena_map_alloc() [all …]
|
| H A D | Makefile | 19 obj-$(CONFIG_BPF_SYSCALL) += arena.o range_tree.o
|
| H A D | core.c | 3246 __weak u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena) in bpf_arena_get_user_vm_start() argument 3250 __weak u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena) in bpf_arena_get_kern_vm_start() argument
|
| /linux/arch/alpha/kernel/ |
| H A D | pci_iommu.c | 64 struct pci_iommu_arena *arena; in iommu_arena_new_node() local 75 arena = memblock_alloc_or_panic(sizeof(*arena), SMP_CACHE_BYTES); in iommu_arena_new_node() 76 arena->ptes = memblock_alloc_or_panic(mem_size, align); in iommu_arena_new_node() 78 spin_lock_init(&arena->lock); in iommu_arena_new_node() 79 arena->hose = hose; in iommu_arena_new_node() 80 arena->dma_base = base; in iommu_arena_new_node() 81 arena->size = window_size; in iommu_arena_new_node() 82 arena->next_entry = 0; in iommu_arena_new_node() 86 arena->align_entry = 1; in iommu_arena_new_node() 88 return arena; in iommu_arena_new_node() [all …]
|
| H A D | core_titan.c | 584 struct pci_iommu_arena *arena; member 601 aper->arena = agp->hose->sg_pci; in titan_agp_setup() 603 aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, in titan_agp_setup() 612 aper->arena->dma_base + aper->pg_start * PAGE_SIZE; in titan_agp_setup() 625 status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); in titan_agp_cleanup() 629 iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); in titan_agp_cleanup() 630 status = iommu_release(aper->arena, aper->pg_start, in titan_agp_cleanup() 686 return iommu_bind(aper->arena, aper->pg_start + pg_start, in titan_agp_bind_memory() 694 return iommu_unbind(aper->arena, aper->pg_start + pg_start, in titan_agp_unbind_memory() 702 unsigned long baddr = addr - aper->arena->dma_base; in titan_agp_translate() [all …]
|
| H A D | core_marvel.c | 851 struct pci_iommu_arena *arena; member 867 aper->arena = agp->hose->sg_pci; in marvel_agp_setup() 869 aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, in marvel_agp_setup() 879 aper->arena->dma_base + aper->pg_start * PAGE_SIZE; in marvel_agp_setup() 892 status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); in marvel_agp_cleanup() 896 iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); in marvel_agp_cleanup() 897 status = iommu_release(aper->arena, aper->pg_start, in marvel_agp_cleanup() 975 return iommu_bind(aper->arena, aper->pg_start + pg_start, in marvel_agp_bind_memory() 983 return iommu_unbind(aper->arena, aper->pg_start + pg_start, in marvel_agp_unbind_memory() 991 unsigned long baddr = addr - aper->arena->dma_base; in marvel_agp_translate() [all …]
|
| /linux/tools/testing/selftests/bpf/prog_tests/ |
| H A D | arena_atomics.c | 19 ASSERT_EQ(skel->arena->add64_value, 3, "add64_value"); in test_add() 20 ASSERT_EQ(skel->arena->add64_result, 1, "add64_result"); in test_add() 22 ASSERT_EQ(skel->arena->add32_value, 3, "add32_value"); in test_add() 23 ASSERT_EQ(skel->arena->add32_result, 1, "add32_result"); in test_add() 25 ASSERT_EQ(skel->arena->add_stack_value_copy, 3, "add_stack_value"); in test_add() 26 ASSERT_EQ(skel->arena->add_stack_result, 1, "add_stack_result"); in test_add() 28 ASSERT_EQ(skel->arena->add_noreturn_value, 3, "add_noreturn_value"); in test_add() 44 ASSERT_EQ(skel->arena->sub64_value, -1, "sub64_value"); in test_sub() 45 ASSERT_EQ(skel->arena->sub64_result, 1, "sub64_result"); in test_sub() 47 ASSERT_EQ(skel->arena in test_sub() [all...] |
| H A D | arena_list.c | 52 ASSERT_EQ(skel->arena->arena_sum, expected_sum, "__arena sum of elems"); in test_arena_list_add_del() 53 ASSERT_EQ(skel->arena->test_val, cnt + 1, "num of elems"); in test_arena_list_add_del() 60 ASSERT_EQ(skel->arena->arena_sum, expected_sum, "__arena sum of elems"); in test_arena_list_add_del()
|
| H A D | arena_htab.c | 46 area = bpf_map__initial_value(skel->maps.arena, &arena_sz); in test_arena_htab_llvm()
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | verifier_arena.c | 22 } arena SEC(".maps"); 31 page1 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in basic_alloc1() 35 page2 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in basic_alloc1() 39 no_page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in basic_alloc1() 46 bpf_arena_free_pages(&arena, (void __arena *)page2, 1); in basic_alloc1() 51 page3 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in basic_alloc1() 70 page1 = bpf_arena_alloc_pages(&arena, NULL, 2, NUMA_NO_NODE, 0); in basic_alloc2() 88 bpf_arena_free_pages(&arena, (void __arena *)page1, 2); in basic_alloc2() 109 struct bpf_arena___l *ar = (struct bpf_arena___l *)&arena; in basic_alloc3() 126 page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in basic_reserve1() [all …]
|
| H A D | verifier_arena_large.c | 18 } arena SEC(".maps"); 28 page1 = base = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in big_alloc1() 32 page2 = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE * 2, in big_alloc1() 37 no_page = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE, in big_alloc1() 45 bpf_arena_free_pages(&arena, (void __arena *)page1, 1); in big_alloc1() 50 page3 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in big_alloc1() 81 /* Get a separate region of the arena. */ in access_reserved() 82 page = base = arena_base(&arena) + 16384 * PAGE_SIZE; in access_reserved() 84 ret = bpf_arena_reserve_pages(&arena, base, len); in access_reserved() 120 page = base = arena_base(&arena) in request_partially_reserved() [all...] |
| H A D | arena_atomics.c | 21 } arena SEC(".maps"); 228 page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in uaf() 229 bpf_arena_free_pages(&arena, page, 1); in uaf()
|
| H A D | compute_live_registers.c | 20 } arena SEC(".maps"); 395 __imm_addr(arena) in addr_space_cast()
|
| H A D | arena_htab.c | 14 } arena SEC(".maps");
|
| H A D | arena_list.c | 19 } arena SEC(".maps");
|
| /linux/arch/powerpc/platforms/pseries/ |
| H A D | rtas-work-area.c | 42 char *arena; member 137 const phys_addr_t pa_start = __pa(rwa_state.arena); in rtas_work_area_allocator_init() 144 if (!rwa_state.arena) in rtas_work_area_allocator_init() 157 err = gen_pool_add(pool, (unsigned long)rwa_state.arena, in rtas_work_area_allocator_init() 209 rwa_state.arena = memblock_alloc_try_nid(size, align, min, limit, nid); in rtas_work_area_reserve_arena()
|
| /linux/Documentation/driver-api/nvdimm/ |
| H A D | btt.rst | 35 Each arena follows the same layout for its metadata, and all references in an 36 arena are internal to it (with the exception of one field that points to the 37 next arena). The following depicts the "On-disk" metadata layout:: 105 ABA Arena Block Address - Block offset/number within an arena 106 Premap ABA The block offset into an arena, which was decided upon by range 112 arena. 117 the external LBA at 768G. This falls into the second arena, and of the 512G 118 worth of blocks that this arena contributes, this block is at 256G. Thus, the 157 While 'nfree' describes the number of concurrent IOs an arena can process 183 The RTT is a simple, per arena table with 'nfree' entries. Every reader inserts [all …]
|
| /linux/tools/testing/selftests/bpf/ |
| H A D | bpf_arena_alloc.h | 33 page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in bpf_alloc() 62 bpf_arena_free_pages(&arena, addr, 1); in bpf_free()
|
| H A D | bpf_arena_common.h | 60 __weak char arena[1]; variable
|
| H A D | bpf_arena_htab.h | 95 void __arena *buckets = bpf_arena_alloc_pages(&arena, NULL, 2, NUMA_NO_NODE, 0); in htab_init()
|
| /linux/tools/sched_ext/include/scx/ |
| H A D | bpf_arena_common.h | 20 char __attribute__((weak)) arena[1]; variable
|
| /linux/arch/arm64/net/ |
| H A D | bpf_jit_comp.c | 709 const bool arena = BPF_MODE(code) == BPF_PROBE_ATOMIC; in emit_atomic_ld_st() local 732 if (arena) { in emit_atomic_ld_st() 789 const bool arena = BPF_MODE(code) == BPF_PROBE_ATOMIC; in emit_lse_atomic() local 797 if (arena) { in emit_lse_atomic() 2094 ctx.user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena); in bpf_int_jit_compile() 2095 ctx.arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena); in bpf_int_jit_compile()
|
| /linux/tools/bpf/bpftool/Documentation/ |
| H A D | bpftool-map.rst | 58 | | **task_storage** | **bloom_filter** | **user_ringbuf** | **cgrp_storage** | **arena**
|
| /linux/arch/x86/net/ |
| H A D | bpf_jit_comp.c | 792 if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena)) in emit_bpf_tail_call_indirect() 859 if (bpf_arena_get_kern_vm_start(bpf_prog->aux->arena)) in emit_bpf_tail_call_direct() 1673 arena_vm_start = bpf_arena_get_kern_vm_start(bpf_prog->aux->arena); in do_jit() 1674 user_vm_start = bpf_arena_get_user_vm_start(bpf_prog->aux->arena); in do_jit()
|
| /linux/include/linux/ |
| H A D | bpf.h | 617 u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena); 618 u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena); 1677 struct bpf_arena *arena; member
|