Lines Matching refs:array

23 static void bpf_array_free_percpu(struct bpf_array *array)
27 for (i = 0; i < array->map.max_entries; i++) {
28 free_percpu(array->pptrs[i]);
33 static int bpf_array_alloc_percpu(struct bpf_array *array)
38 for (i = 0; i < array->map.max_entries; i++) {
39 ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
42 bpf_array_free_percpu(array);
45 array->pptrs[i] = ptr;
91 struct bpf_array *array;
107 /* round up array size to nearest power of 2,
116 array_size = sizeof(*array);
121 * ensure array->value is exactly page-aligned
139 array = data + PAGE_ALIGN(sizeof(struct bpf_array))
142 array = bpf_map_area_alloc(array_size, numa_node);
144 if (!array)
146 array->index_mask = index_mask;
147 array->map.bypass_spec_v1 = bypass_spec_v1;
150 bpf_map_init_from_attr(&array->map, attr);
151 array->elem_size = elem_size;
153 if (percpu && bpf_array_alloc_percpu(array)) {
154 bpf_map_area_free(array);
158 return &array->map;
161 static void *array_map_elem_ptr(struct bpf_array* array, u32 index)
163 return array->value + (u64)array->elem_size * index;
169 struct bpf_array *array = container_of(map, struct bpf_array, map);
172 if (unlikely(index >= array->map.max_entries))
175 return array->value + (u64)array->elem_size * (index & array->index_mask);
181 struct bpf_array *array = container_of(map, struct bpf_array, map);
183 sha256(array->value, (u64)array->elem_size * array->map.max_entries,
185 memcpy(array->map.sha, hash_buf, sizeof(array->map.sha));
192 struct bpf_array *array = container_of(map, struct bpf_array, map);
199 *imm = (unsigned long)array->value;
206 struct bpf_array *array = container_of(map, struct bpf_array, map);
207 u64 base = (unsigned long)array->value;
208 u64 range = array->elem_size;
222 struct bpf_array *array = container_of(map, struct bpf_array, map);
224 u32 elem_size = array->elem_size;
236 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
255 struct bpf_array *array = container_of(map, struct bpf_array, map);
258 if (unlikely(index >= array->map.max_entries))
261 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
267 struct bpf_array *array = container_of(map, struct bpf_array, map);
282 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_0, array->index_mask);
298 struct bpf_array *array = container_of(map, struct bpf_array, map);
304 if (unlikely(index >= array->map.max_entries))
307 return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
312 struct bpf_array *array = container_of(map, struct bpf_array, map);
318 if (unlikely(index >= array->map.max_entries))
325 size = array->elem_size;
327 pptr = array->pptrs[index & array->index_mask];
340 struct bpf_array *array = container_of(map, struct bpf_array, map);
344 if (index >= array->map.max_entries) {
349 if (index == array->map.max_entries - 1)
360 struct bpf_array *array = container_of(map, struct bpf_array, map);
368 if (unlikely(index >= array->map.max_entries))
380 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
381 val = this_cpu_ptr(array->pptrs[index & array->index_mask]);
383 bpf_obj_free_fields(array->map.record, val);
385 val = array->value +
386 (u64)array->elem_size * (index & array->index_mask);
391 bpf_obj_free_fields(array->map.record, val);
399 struct bpf_array *array = container_of(map, struct bpf_array, map);
409 if (unlikely(index >= array->map.max_entries))
423 size = array->elem_size;
425 pptr = array->pptrs[index & array->index_mask];
428 bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu));
441 static void *array_map_vmalloc_addr(struct bpf_array *array)
443 return (void *)round_down((unsigned long)array, PAGE_SIZE);
448 struct bpf_array *array = container_of(map, struct bpf_array, map);
455 for (i = 0; i < array->map.max_entries; i++) {
457 bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i));
459 bpf_obj_free_workqueue(map->record, array_map_elem_ptr(array, i));
461 bpf_obj_free_task_work(map->record, array_map_elem_ptr(array, i));
469 struct bpf_array *array = container_of(map, struct bpf_array, map);
473 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
474 for (i = 0; i < array->map.max_entries; i++) {
475 void __percpu *pptr = array->pptrs[i & array->index_mask];
484 for (i = 0; i < array->map.max_entries; i++)
485 bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i));
489 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
490 bpf_array_free_percpu(array);
492 if (array->map.map_flags & BPF_F_MMAPABLE)
493 bpf_map_area_free(array_map_vmalloc_addr(array));
495 bpf_map_area_free(array);
522 struct bpf_array *array = container_of(map, struct bpf_array, map);
530 pptr = array->pptrs[index & array->index_mask];
560 * Bpf array can only take a u32 key. This check makes sure
571 struct bpf_array *array = container_of(map, struct bpf_array, map);
572 pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
578 PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
581 return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
604 struct bpf_array *array;
612 array = container_of(map, struct bpf_array, map);
613 index = info->index & array->index_mask;
615 return (void *)(uintptr_t)array->pptrs[index];
616 return array_map_elem_ptr(array, index);
623 struct bpf_array *array;
631 array = container_of(map, struct bpf_array, map);
632 index = info->index & array->index_mask;
634 return (void *)(uintptr_t)array->pptrs[index];
635 return array_map_elem_ptr(array, index);
643 struct bpf_array *array = container_of(map, struct bpf_array, map);
664 size = array->elem_size;
694 struct bpf_array *array = container_of(map, struct bpf_array, map);
699 buf_size = array->elem_size * num_possible_cpus();
742 struct bpf_array *array;
753 array = container_of(map, struct bpf_array, map);
756 val = this_cpu_ptr(array->pptrs[i]);
758 val = array_map_elem_ptr(array, i);
773 struct bpf_array *array = container_of(map, struct bpf_array, map);
775 u32 elem_size = array->elem_size;
777 u64 usage = sizeof(*array);
855 struct bpf_array *array = container_of(map, struct bpf_array, map);
859 for (i = 0; i < array->map.max_entries; i++)
860 BUG_ON(array->ptrs[i] != NULL);
862 bpf_map_area_free(array);
894 struct bpf_array *array = container_of(map, struct bpf_array, map);
901 if (index >= array->map.max_entries)
910 mutex_lock(&array->aux->poke_mutex);
911 old_ptr = xchg(array->ptrs + index, new_ptr);
913 mutex_unlock(&array->aux->poke_mutex);
915 old_ptr = xchg(array->ptrs + index, new_ptr);
925 struct bpf_array *array = container_of(map, struct bpf_array, map);
929 if (index >= array->map.max_entries)
933 mutex_lock(&array->aux->poke_mutex);
934 old_ptr = xchg(array->ptrs + index, NULL);
936 mutex_unlock(&array->aux->poke_mutex);
938 old_ptr = xchg(array->ptrs + index, NULL);
1006 struct bpf_array *array = container_of(map, struct bpf_array, map);
1009 for (i = 0; i < array->map.max_entries; i++)
1290 struct bpf_array *array = container_of(map, struct bpf_array, map);
1298 for (i = 0; i < array->map.max_entries; i++) {
1299 ee = READ_ONCE(array->ptrs[i]);
1407 struct bpf_array *array = container_of(map, struct bpf_array, map);
1408 u32 elem_size = array->elem_size;
1418 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);