Lines Matching full:array

22 static void bpf_array_free_percpu(struct bpf_array *array)
26 for (i = 0; i < array->map.max_entries; i++) {
27 free_percpu(array->pptrs[i]);
32 static int bpf_array_alloc_percpu(struct bpf_array *array)
37 for (i = 0; i < array->map.max_entries; i++) {
38 ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
41 bpf_array_free_percpu(array);
44 array->pptrs[i] = ptr;
90 struct bpf_array *array;
106 /* round up array size to nearest power of 2,
115 array_size = sizeof(*array);
120 * ensure array->value is exactly page-aligned
138 array = data + PAGE_ALIGN(sizeof(struct bpf_array))
141 array = bpf_map_area_alloc(array_size, numa_node);
143 if (!array)
145 array->index_mask = index_mask;
146 array->map.bypass_spec_v1 = bypass_spec_v1;
149 bpf_map_init_from_attr(&array->map, attr);
150 array->elem_size = elem_size;
152 if (percpu && bpf_array_alloc_percpu(array)) {
153 bpf_map_area_free(array);
157 return &array->map;
160 static void *array_map_elem_ptr(struct bpf_array* array, u32 index)
162 return array->value + (u64)array->elem_size * index;
168 struct bpf_array *array = container_of(map, struct bpf_array, map);
171 if (unlikely(index >= array->map.max_entries))
174 return array->value + (u64)array->elem_size * (index & array->index_mask);
180 struct bpf_array *array = container_of(map, struct bpf_array, map);
187 *imm = (unsigned long)array->value;
194 struct bpf_array *array = container_of(map, struct bpf_array, map);
195 u64 base = (unsigned long)array->value;
196 u64 range = array->elem_size;
210 struct bpf_array *array = container_of(map, struct bpf_array, map);
212 u32 elem_size = array->elem_size;
224 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
243 struct bpf_array *array = container_of(map, struct bpf_array, map);
246 if (unlikely(index >= array->map.max_entries))
249 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
255 struct bpf_array *array = container_of(map, struct bpf_array, map);
270 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_0, array->index_mask);
286 struct bpf_array *array = container_of(map, struct bpf_array, map);
292 if (unlikely(index >= array->map.max_entries))
295 return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
300 struct bpf_array *array = container_of(map, struct bpf_array, map);
306 if (unlikely(index >= array->map.max_entries))
313 size = array->elem_size;
315 pptr = array->pptrs[index & array->index_mask];
328 struct bpf_array *array = container_of(map, struct bpf_array, map);
332 if (index >= array->map.max_entries) {
337 if (index == array->map.max_entries - 1)
348 struct bpf_array *array = container_of(map, struct bpf_array, map);
356 if (unlikely(index >= array->map.max_entries))
368 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
369 val = this_cpu_ptr(array->pptrs[index & array->index_mask]);
371 bpf_obj_free_fields(array->map.record, val);
373 val = array->value +
374 (u64)array->elem_size * (index & array->index_mask);
379 bpf_obj_free_fields(array->map.record, val);
387 struct bpf_array *array = container_of(map, struct bpf_array, map);
397 if (unlikely(index >= array->map.max_entries))
411 size = array->elem_size;
413 pptr = array->pptrs[index & array->index_mask];
416 bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu));
429 static void *array_map_vmalloc_addr(struct bpf_array *array)
431 return (void *)round_down((unsigned long)array, PAGE_SIZE);
436 struct bpf_array *array = container_of(map, struct bpf_array, map);
443 for (i = 0; i < array->map.max_entries; i++) {
445 bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i));
447 bpf_obj_free_workqueue(map->record, array_map_elem_ptr(array, i));
455 struct bpf_array *array = container_of(map, struct bpf_array, map);
459 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
460 for (i = 0; i < array->map.max_entries; i++) {
461 void __percpu *pptr = array->pptrs[i & array->index_mask];
470 for (i = 0; i < array->map.max_entries; i++)
471 bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i));
475 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
476 bpf_array_free_percpu(array);
478 if (array->map.map_flags & BPF_F_MMAPABLE)
479 bpf_map_area_free(array_map_vmalloc_addr(array));
481 bpf_map_area_free(array);
508 struct bpf_array *array = container_of(map, struct bpf_array, map);
516 pptr = array->pptrs[index & array->index_mask];
551 /* bpf array can only take a u32 key. This check makes sure
562 struct bpf_array *array = container_of(map, struct bpf_array, map);
563 pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
569 PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
572 return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
595 struct bpf_array *array;
603 array = container_of(map, struct bpf_array, map);
604 index = info->index & array->index_mask;
606 return (void *)(uintptr_t)array->pptrs[index];
607 return array_map_elem_ptr(array, index);
614 struct bpf_array *array;
622 array = container_of(map, struct bpf_array, map);
623 index = info->index & array->index_mask;
625 return (void *)(uintptr_t)array->pptrs[index];
626 return array_map_elem_ptr(array, index);
634 struct bpf_array *array = container_of(map, struct bpf_array, map);
655 size = array->elem_size;
685 struct bpf_array *array = container_of(map, struct bpf_array, map);
690 buf_size = array->elem_size * num_possible_cpus();
733 struct bpf_array *array;
742 array = container_of(map, struct bpf_array, map);
747 val = this_cpu_ptr(array->pptrs[i]);
749 val = array_map_elem_ptr(array, i);
766 struct bpf_array *array = container_of(map, struct bpf_array, map);
768 u32 elem_size = array->elem_size;
770 u64 usage = sizeof(*array);
847 struct bpf_array *array = container_of(map, struct bpf_array, map);
851 for (i = 0; i < array->map.max_entries; i++)
852 BUG_ON(array->ptrs[i] != NULL);
854 bpf_map_area_free(array);
886 struct bpf_array *array = container_of(map, struct bpf_array, map);
893 if (index >= array->map.max_entries)
902 mutex_lock(&array->aux->poke_mutex);
903 old_ptr = xchg(array->ptrs + index, new_ptr);
905 mutex_unlock(&array->aux->poke_mutex);
907 old_ptr = xchg(array->ptrs + index, new_ptr);
917 struct bpf_array *array = container_of(map, struct bpf_array, map);
921 if (index >= array->map.max_entries)
925 mutex_lock(&array->aux->poke_mutex);
926 old_ptr = xchg(array->ptrs + index, NULL);
928 mutex_unlock(&array->aux->poke_mutex);
930 old_ptr = xchg(array->ptrs + index, NULL);
976 struct bpf_array *array = container_of(map, struct bpf_array, map);
979 for (i = 0; i < array->map.max_entries; i++)
1260 struct bpf_array *array = container_of(map, struct bpf_array, map);
1268 for (i = 0; i < array->map.max_entries; i++) {
1269 ee = READ_ONCE(array->ptrs[i]);
1377 struct bpf_array *array = container_of(map, struct bpf_array, map);
1378 u32 elem_size = array->elem_size;
1388 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);