Home
last modified time | relevance | path

Searched refs:arena (Results 1 – 25 of 33) sorted by relevance

12

/linux/drivers/nvdimm/
H A Dbtt.c29 static struct device *to_dev(struct arena_info *arena) in to_dev() argument
31 return &arena->nd_btt->dev; in to_dev()
39 static int arena_read_bytes(struct arena_info *arena, resource_size_t offset, in arena_read_bytes() argument
42 struct nd_btt *nd_btt = arena->nd_btt; in arena_read_bytes()
50 static int arena_write_bytes(struct arena_info *arena, resource_size_t offset, in arena_write_bytes() argument
53 struct nd_btt *nd_btt = arena->nd_btt; in arena_write_bytes()
61 static int btt_info_write(struct arena_info *arena, struct btt_sb *super) in btt_info_write() argument
70 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512), in btt_info_write()
71 "arena->infooff: %#llx is unaligned\n", arena->infooff); in btt_info_write()
72 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512), in btt_info_write()
[all …]
/linux/kernel/bpf/
H A Darena.c49 static void arena_free_pages(struct bpf_arena *arena, long uaddr, long page_cnt, bool sleepable);
76 u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena) in bpf_arena_get_kern_vm_start() argument
78 return arena ? (u64) (long) arena->kern_vm->addr + GUARD_SZ / 2 : 0; in bpf_arena_get_kern_vm_start()
81 u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena) in bpf_arena_get_user_vm_start() argument
83 return arena ? arena->user_vm_start : 0; in bpf_arena_get_user_vm_start()
111 static long compute_pgoff(struct bpf_arena *arena, long uaddr) in compute_pgoff() argument
113 return (u32)(uaddr - (u32)arena->user_vm_start) >> PAGE_SHIFT; in compute_pgoff()
170 static int populate_pgtable_except_pte(struct bpf_arena *arena) in populate_pgtable_except_pte() argument
172 return apply_to_page_range(&init_mm, bpf_arena_get_kern_vm_start(arena), in populate_pgtable_except_pte()
180 struct bpf_arena *arena; in arena_map_alloc() local
[all …]
H A DMakefile19 obj-$(CONFIG_BPF_SYSCALL) += arena.o range_tree.o
/linux/arch/alpha/kernel/
H A Dpci_iommu.c64 struct pci_iommu_arena *arena; in iommu_arena_new_node() local
75 arena = memblock_alloc_or_panic(sizeof(*arena), SMP_CACHE_BYTES); in iommu_arena_new_node()
76 arena->ptes = memblock_alloc_or_panic(mem_size, align); in iommu_arena_new_node()
78 spin_lock_init(&arena->lock); in iommu_arena_new_node()
79 arena->hose = hose; in iommu_arena_new_node()
80 arena->dma_base = base; in iommu_arena_new_node()
81 arena->size = window_size; in iommu_arena_new_node()
82 arena->next_entry = 0; in iommu_arena_new_node()
86 arena->align_entry = 1; in iommu_arena_new_node()
88 return arena; in iommu_arena_new_node()
[all …]
H A Dcore_titan.c584 struct pci_iommu_arena *arena; member
601 aper->arena = agp->hose->sg_pci; in titan_agp_setup()
603 aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, in titan_agp_setup()
612 aper->arena->dma_base + aper->pg_start * PAGE_SIZE; in titan_agp_setup()
625 status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); in titan_agp_cleanup()
629 iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); in titan_agp_cleanup()
630 status = iommu_release(aper->arena, aper->pg_start, in titan_agp_cleanup()
686 return iommu_bind(aper->arena, aper->pg_start + pg_start, in titan_agp_bind_memory()
694 return iommu_unbind(aper->arena, aper->pg_start + pg_start, in titan_agp_unbind_memory()
702 unsigned long baddr = addr - aper->arena->dma_base; in titan_agp_translate()
[all …]
H A Dcore_marvel.c851 struct pci_iommu_arena *arena; member
867 aper->arena = agp->hose->sg_pci; in marvel_agp_setup()
869 aper->pg_start = iommu_reserve(aper->arena, aper->pg_count, in marvel_agp_setup()
879 aper->arena->dma_base + aper->pg_start * PAGE_SIZE; in marvel_agp_setup()
892 status = iommu_release(aper->arena, aper->pg_start, aper->pg_count); in marvel_agp_cleanup()
896 iommu_unbind(aper->arena, aper->pg_start, aper->pg_count); in marvel_agp_cleanup()
897 status = iommu_release(aper->arena, aper->pg_start, in marvel_agp_cleanup()
975 return iommu_bind(aper->arena, aper->pg_start + pg_start, in marvel_agp_bind_memory()
983 return iommu_unbind(aper->arena, aper->pg_start + pg_start, in marvel_agp_unbind_memory()
991 unsigned long baddr = addr - aper->arena->dma_base; in marvel_agp_translate()
[all …]
/linux/tools/testing/selftests/bpf/prog_tests/
H A Darena_atomics.c19 ASSERT_EQ(skel->arena->add64_value, 3, "add64_value"); in test_add()
20 ASSERT_EQ(skel->arena->add64_result, 1, "add64_result"); in test_add()
22 ASSERT_EQ(skel->arena->add32_value, 3, "add32_value"); in test_add()
23 ASSERT_EQ(skel->arena->add32_result, 1, "add32_result"); in test_add()
25 ASSERT_EQ(skel->arena->add_stack_value_copy, 3, "add_stack_value"); in test_add()
26 ASSERT_EQ(skel->arena->add_stack_result, 1, "add_stack_result"); in test_add()
28 ASSERT_EQ(skel->arena->add_noreturn_value, 3, "add_noreturn_value"); in test_add()
44 ASSERT_EQ(skel->arena->sub64_value, -1, "sub64_value"); in test_sub()
45 ASSERT_EQ(skel->arena->sub64_result, 1, "sub64_result"); in test_sub()
47 ASSERT_EQ(skel->arena in test_sub()
[all...]
H A Darena_list.c58 ASSERT_EQ(skel->arena->arena_sum, expected_sum, "__arena sum of elems"); in test_arena_list_add_del()
59 ASSERT_EQ(skel->arena->test_val, cnt + 1, "num of elems"); in test_arena_list_add_del()
66 ASSERT_EQ(skel->arena->arena_sum, expected_sum, "__arena sum of elems"); in test_arena_list()
H A Darena_htab.c46 area = bpf_map__initial_value(skel->maps.arena, &arena_sz); in test_arena_htab_llvm()
/linux/tools/testing/selftests/bpf/progs/
H A Dverifier_arena.c18 __uint(max_entries, 2); /* arena of two pages close to 32-bit boundary*/
24 } arena SEC(".maps");
33 page1 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in basic_alloc1()
37 page2 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in basic_alloc1()
41 no_page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in basic_alloc1()
48 bpf_arena_free_pages(&arena, (void __arena *)page2, 1); in basic_alloc1()
64 page1 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
68 page2 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in basic_alloc2()
72 no_page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in basic_alloc2()
79 bpf_arena_free_pages(&arena, (voi in basic_alloc2()
[all...]
H A Dverifier_arena_large.c18 } arena SEC(".maps");
28 base = (u64)arena_base(&arena); in big_alloc1()
30 page1 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in big_alloc1()
38 page2 = bpf_arena_alloc_pages(&arena, (void __arena *)(ARENA_SIZE - 2 * PAGE_SIZE), in big_alloc1()
44 /* Test for the guard region at the end of the arena. */ in big_alloc1()
45 no_page = bpf_arena_alloc_pages(&arena, (void __arena *)ARENA_SIZE - PAGE_SIZE, in big_alloc1()
50 no_page = bpf_arena_alloc_pages(&arena, (void __arena *)ARENA_SIZE, in big_alloc1()
58 bpf_arena_free_pages(&arena, (void __arena *)page1, 1); in big_alloc1()
63 page3 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in big_alloc1()
94 /* Get a separate region of the arena in access_reserved()
[all...]
H A Darena_atomics.c21 } arena SEC(".maps");
228 page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in uaf()
229 bpf_arena_free_pages(&arena, page, 1); in uaf()
H A Dcompute_live_registers.c20 } arena SEC(".maps");
382 "r1 = %[arena] ll;" in addr_space_cast()
395 __imm_addr(arena) in addr_space_cast()
H A Darena_htab.c14 } arena SEC(".maps");
H A Darena_list.c19 } arena SEC(".maps");
/linux/arch/powerpc/platforms/pseries/
H A Drtas-work-area.c42 char *arena; member
137 const phys_addr_t pa_start = __pa(rwa_state.arena); in rtas_work_area_allocator_init()
144 if (!rwa_state.arena) in rtas_work_area_allocator_init()
157 err = gen_pool_add(pool, (unsigned long)rwa_state.arena, in rtas_work_area_allocator_init()
209 rwa_state.arena = memblock_alloc_try_nid(size, align, min, limit, nid); in rtas_work_area_reserve_arena()
/linux/Documentation/driver-api/nvdimm/
H A Dbtt.rst35 Each arena follows the same layout for its metadata, and all references in an
36 arena are internal to it (with the exception of one field that points to the
37 next arena). The following depicts the "On-disk" metadata layout::
105 ABA Arena Block Address - Block offset/number within an arena
106 Premap ABA The block offset into an arena, which was decided upon by range
112 arena.
117 the external LBA at 768G. This falls into the second arena, and of the 512G
118 worth of blocks that this arena contributes, this block is at 256G. Thus, the
157 While 'nfree' describes the number of concurrent IOs an arena can process
183 The RTT is a simple, per arena table with 'nfree' entries. Every reader inserts
[all …]
/linux/tools/testing/selftests/bpf/
H A Dbpf_arena_alloc.h33 page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); in bpf_alloc()
62 bpf_arena_free_pages(&arena, addr, 1); in bpf_free()
H A DDENYLIST.asan1 *arena*
H A Dbpf_arena_common.h60 __weak char arena[1]; variable
H A Dbpf_arena_htab.h95 void __arena *buckets = bpf_arena_alloc_pages(&arena, NULL, 2, NUMA_NO_NODE, 0); in htab_init()
/linux/tools/sched_ext/include/scx/
H A Dbpf_arena_common.h20 char __attribute__((weak)) arena[1]; variable
/linux/arch/riscv/net/
H A Dbpf_jit_core.c82 ctx->arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena); in bpf_int_jit_compile()
83 ctx->user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena); in bpf_int_jit_compile()
/linux/arch/arm64/net/
H A Dbpf_jit_comp.c709 const bool arena = BPF_MODE(code) == BPF_PROBE_ATOMIC; in emit_atomic_ld_st() local
732 if (arena) { in emit_atomic_ld_st()
788 const bool arena = BPF_MODE(code) == BPF_PROBE_ATOMIC; in emit_lse_atomic() local
796 if (arena) { in emit_lse_atomic()
2087 ctx.user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena); in bpf_int_jit_compile()
2088 ctx.arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena); in bpf_int_jit_compile()
/linux/arch/powerpc/net/
H A Dbpf_jit_comp.c207 cgctx.arena_vm_start = bpf_arena_get_kern_vm_start(fp->aux->arena); in bpf_int_jit_compile()
208 cgctx.user_vm_start = bpf_arena_get_user_vm_start(fp->aux->arena); in bpf_int_jit_compile()

12