Lines Matching +full:cpu +full:- +full:map
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/bpf-cgroup.h>
16 #include "../cgroup/cgroup-internal.h"
22 struct bpf_map map; member
29 static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map) in map_to_storage() argument
31 return container_of(map, struct bpf_cgroup_storage_map, map); in map_to_storage()
34 static bool attach_type_isolated(const struct bpf_map *map) in attach_type_isolated() argument
36 return map->key_size == sizeof(struct bpf_cgroup_storage_key); in attach_type_isolated()
39 static int bpf_cgroup_storage_key_cmp(const struct bpf_cgroup_storage_map *map, in bpf_cgroup_storage_key_cmp() argument
42 if (attach_type_isolated(&map->map)) { in bpf_cgroup_storage_key_cmp()
46 if (key1->cgroup_inode_id < key2->cgroup_inode_id) in bpf_cgroup_storage_key_cmp()
47 return -1; in bpf_cgroup_storage_key_cmp()
48 else if (key1->cgroup_inode_id > key2->cgroup_inode_id) in bpf_cgroup_storage_key_cmp()
50 else if (key1->attach_type < key2->attach_type) in bpf_cgroup_storage_key_cmp()
51 return -1; in bpf_cgroup_storage_key_cmp()
52 else if (key1->attach_type > key2->attach_type) in bpf_cgroup_storage_key_cmp()
59 return -1; in bpf_cgroup_storage_key_cmp()
67 cgroup_storage_lookup(struct bpf_cgroup_storage_map *map, in cgroup_storage_lookup() argument
70 struct rb_root *root = &map->root; in cgroup_storage_lookup()
74 spin_lock_bh(&map->lock); in cgroup_storage_lookup()
76 node = root->rb_node; in cgroup_storage_lookup()
82 switch (bpf_cgroup_storage_key_cmp(map, key, &storage->key)) { in cgroup_storage_lookup()
83 case -1: in cgroup_storage_lookup()
84 node = node->rb_left; in cgroup_storage_lookup()
87 node = node->rb_right; in cgroup_storage_lookup()
91 spin_unlock_bh(&map->lock); in cgroup_storage_lookup()
97 spin_unlock_bh(&map->lock); in cgroup_storage_lookup()
102 static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map, in cgroup_storage_insert() argument
105 struct rb_root *root = &map->root; in cgroup_storage_insert()
106 struct rb_node **new = &(root->rb_node), *parent = NULL; in cgroup_storage_insert()
114 switch (bpf_cgroup_storage_key_cmp(map, &storage->key, &this->key)) { in cgroup_storage_insert()
115 case -1: in cgroup_storage_insert()
116 new = &((*new)->rb_left); in cgroup_storage_insert()
119 new = &((*new)->rb_right); in cgroup_storage_insert()
122 return -EEXIST; in cgroup_storage_insert()
126 rb_link_node(&storage->node, parent, new); in cgroup_storage_insert()
127 rb_insert_color(&storage->node, root); in cgroup_storage_insert()
134 struct bpf_cgroup_storage_map *map = map_to_storage(_map); in cgroup_storage_lookup_elem() local
137 storage = cgroup_storage_lookup(map, key, false); in cgroup_storage_lookup_elem()
141 return &READ_ONCE(storage->buf)->data[0]; in cgroup_storage_lookup_elem()
144 static long cgroup_storage_update_elem(struct bpf_map *map, void *key, in cgroup_storage_update_elem() argument
151 return -EINVAL; in cgroup_storage_update_elem()
154 !btf_record_has_field(map->record, BPF_SPIN_LOCK))) in cgroup_storage_update_elem()
155 return -EINVAL; in cgroup_storage_update_elem()
157 storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map, in cgroup_storage_update_elem()
160 return -ENOENT; in cgroup_storage_update_elem()
163 copy_map_value_locked(map, storage->buf->data, value, false); in cgroup_storage_update_elem()
167 new = bpf_map_kmalloc_node(map, struct_size(new, data, map->value_size), in cgroup_storage_update_elem()
169 map->numa_node); in cgroup_storage_update_elem()
171 return -ENOMEM; in cgroup_storage_update_elem()
173 memcpy(&new->data[0], value, map->value_size); in cgroup_storage_update_elem()
174 check_and_init_map_value(map, new->data); in cgroup_storage_update_elem()
176 new = xchg(&storage->buf, new); in cgroup_storage_update_elem()
185 struct bpf_cgroup_storage_map *map = map_to_storage(_map); in bpf_percpu_cgroup_storage_copy() local
187 int cpu, off = 0; in bpf_percpu_cgroup_storage_copy() local
191 storage = cgroup_storage_lookup(map, key, false); in bpf_percpu_cgroup_storage_copy()
194 return -ENOENT; in bpf_percpu_cgroup_storage_copy()
197 /* per_cpu areas are zero-filled and bpf programs can only in bpf_percpu_cgroup_storage_copy()
201 size = round_up(_map->value_size, 8); in bpf_percpu_cgroup_storage_copy()
202 for_each_possible_cpu(cpu) { in bpf_percpu_cgroup_storage_copy()
204 per_cpu_ptr(storage->percpu_buf, cpu), size); in bpf_percpu_cgroup_storage_copy()
214 struct bpf_cgroup_storage_map *map = map_to_storage(_map); in bpf_percpu_cgroup_storage_update() local
216 int cpu, off = 0; in bpf_percpu_cgroup_storage_update() local
220 return -EINVAL; in bpf_percpu_cgroup_storage_update()
223 storage = cgroup_storage_lookup(map, key, false); in bpf_percpu_cgroup_storage_update()
226 return -ENOENT; in bpf_percpu_cgroup_storage_update()
230 * will be copied into per-cpu area. bpf programs can only access in bpf_percpu_cgroup_storage_update()
232 * returned or zeros which were zero-filled by percpu_alloc, in bpf_percpu_cgroup_storage_update()
235 size = round_up(_map->value_size, 8); in bpf_percpu_cgroup_storage_update()
236 for_each_possible_cpu(cpu) { in bpf_percpu_cgroup_storage_update()
237 bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu), in bpf_percpu_cgroup_storage_update()
248 struct bpf_cgroup_storage_map *map = map_to_storage(_map); in cgroup_storage_get_next_key() local
251 spin_lock_bh(&map->lock); in cgroup_storage_get_next_key()
253 if (list_empty(&map->list)) in cgroup_storage_get_next_key()
257 storage = cgroup_storage_lookup(map, key, true); in cgroup_storage_get_next_key()
265 storage = list_first_entry(&map->list, in cgroup_storage_get_next_key()
269 spin_unlock_bh(&map->lock); in cgroup_storage_get_next_key()
271 if (attach_type_isolated(&map->map)) { in cgroup_storage_get_next_key()
273 *next = storage->key; in cgroup_storage_get_next_key()
276 *next = storage->key.cgroup_inode_id; in cgroup_storage_get_next_key()
281 spin_unlock_bh(&map->lock); in cgroup_storage_get_next_key()
282 return -ENOENT; in cgroup_storage_get_next_key()
289 struct bpf_cgroup_storage_map *map; in cgroup_storage_map_alloc() local
291 /* percpu is bound by PCPU_MIN_UNIT_SIZE, non-percu in cgroup_storage_map_alloc()
294 if (attr->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) in cgroup_storage_map_alloc()
298 if (attr->key_size != sizeof(struct bpf_cgroup_storage_key) && in cgroup_storage_map_alloc()
299 attr->key_size != sizeof(__u64)) in cgroup_storage_map_alloc()
300 return ERR_PTR(-EINVAL); in cgroup_storage_map_alloc()
302 if (attr->value_size == 0) in cgroup_storage_map_alloc()
303 return ERR_PTR(-EINVAL); in cgroup_storage_map_alloc()
305 if (attr->value_size > max_value_size) in cgroup_storage_map_alloc()
306 return ERR_PTR(-E2BIG); in cgroup_storage_map_alloc()
308 if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK || in cgroup_storage_map_alloc()
309 !bpf_map_flags_access_ok(attr->map_flags)) in cgroup_storage_map_alloc()
310 return ERR_PTR(-EINVAL); in cgroup_storage_map_alloc()
312 if (attr->max_entries) in cgroup_storage_map_alloc()
314 return ERR_PTR(-EINVAL); in cgroup_storage_map_alloc()
316 map = bpf_map_area_alloc(sizeof(struct bpf_cgroup_storage_map), numa_node); in cgroup_storage_map_alloc()
317 if (!map) in cgroup_storage_map_alloc()
318 return ERR_PTR(-ENOMEM); in cgroup_storage_map_alloc()
320 /* copy mandatory map attributes */ in cgroup_storage_map_alloc()
321 bpf_map_init_from_attr(&map->map, attr); in cgroup_storage_map_alloc()
323 spin_lock_init(&map->lock); in cgroup_storage_map_alloc()
324 map->root = RB_ROOT; in cgroup_storage_map_alloc()
325 INIT_LIST_HEAD(&map->list); in cgroup_storage_map_alloc()
327 return &map->map; in cgroup_storage_map_alloc()
332 struct bpf_cgroup_storage_map *map = map_to_storage(_map); in cgroup_storage_map_free() local
333 struct list_head *storages = &map->list; in cgroup_storage_map_free()
345 WARN_ON(!RB_EMPTY_ROOT(&map->root)); in cgroup_storage_map_free()
346 WARN_ON(!list_empty(&map->list)); in cgroup_storage_map_free()
348 bpf_map_area_free(map); in cgroup_storage_map_free()
351 static long cgroup_storage_delete_elem(struct bpf_map *map, void *key) in cgroup_storage_delete_elem() argument
353 return -EINVAL; in cgroup_storage_delete_elem()
356 static int cgroup_storage_check_btf(const struct bpf_map *map, in cgroup_storage_check_btf() argument
361 if (attach_type_isolated(map)) { in cgroup_storage_check_btf()
376 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT || in cgroup_storage_check_btf()
377 BTF_INFO_VLEN(key_type->info) != 2) in cgroup_storage_check_btf()
378 return -EINVAL; in cgroup_storage_check_btf()
386 return -EINVAL; in cgroup_storage_check_btf()
395 return -EINVAL; in cgroup_storage_check_btf()
403 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT) in cgroup_storage_check_btf()
404 return -EINVAL; in cgroup_storage_check_btf()
408 return -EINVAL; in cgroup_storage_check_btf()
414 static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *key, in cgroup_storage_seq_show_elem() argument
419 int cpu; in cgroup_storage_seq_show_elem() local
422 storage = cgroup_storage_lookup(map_to_storage(map), key, false); in cgroup_storage_seq_show_elem()
428 btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); in cgroup_storage_seq_show_elem()
429 stype = cgroup_storage_type(map); in cgroup_storage_seq_show_elem()
432 btf_type_seq_show(map->btf, map->btf_value_type_id, in cgroup_storage_seq_show_elem()
433 &READ_ONCE(storage->buf)->data[0], m); in cgroup_storage_seq_show_elem()
437 for_each_possible_cpu(cpu) { in cgroup_storage_seq_show_elem()
438 seq_printf(m, "\tcpu%d: ", cpu); in cgroup_storage_seq_show_elem()
439 btf_type_seq_show(map->btf, map->btf_value_type_id, in cgroup_storage_seq_show_elem()
440 per_cpu_ptr(storage->percpu_buf, cpu), in cgroup_storage_seq_show_elem()
449 static u64 cgroup_storage_map_usage(const struct bpf_map *map) in cgroup_storage_map_usage() argument
474 if (aux->cgroup_storage[stype] && in bpf_cgroup_storage_assign()
475 aux->cgroup_storage[stype] != _map) in bpf_cgroup_storage_assign()
476 return -EBUSY; in bpf_cgroup_storage_assign()
478 aux->cgroup_storage[stype] = _map; in bpf_cgroup_storage_assign()
482 static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages) in bpf_cgroup_storage_calculate_size() argument
486 if (cgroup_storage_type(map) == BPF_CGROUP_STORAGE_SHARED) { in bpf_cgroup_storage_calculate_size()
487 size = sizeof(struct bpf_storage_buffer) + map->value_size; in bpf_cgroup_storage_calculate_size()
491 size = map->value_size; in bpf_cgroup_storage_calculate_size()
504 struct bpf_map *map; in bpf_cgroup_storage_alloc() local
508 map = prog->aux->cgroup_storage[stype]; in bpf_cgroup_storage_alloc()
509 if (!map) in bpf_cgroup_storage_alloc()
512 size = bpf_cgroup_storage_calculate_size(map, &pages); in bpf_cgroup_storage_alloc()
514 storage = bpf_map_kmalloc_node(map, sizeof(struct bpf_cgroup_storage), in bpf_cgroup_storage_alloc()
515 gfp, map->numa_node); in bpf_cgroup_storage_alloc()
520 storage->buf = bpf_map_kmalloc_node(map, size, gfp, in bpf_cgroup_storage_alloc()
521 map->numa_node); in bpf_cgroup_storage_alloc()
522 if (!storage->buf) in bpf_cgroup_storage_alloc()
524 check_and_init_map_value(map, storage->buf->data); in bpf_cgroup_storage_alloc()
526 storage->percpu_buf = bpf_map_alloc_percpu(map, size, 8, gfp); in bpf_cgroup_storage_alloc()
527 if (!storage->percpu_buf) in bpf_cgroup_storage_alloc()
531 storage->map = (struct bpf_cgroup_storage_map *)map; in bpf_cgroup_storage_alloc()
537 return ERR_PTR(-ENOMEM); in bpf_cgroup_storage_alloc()
545 kfree(storage->buf); in free_shared_cgroup_storage_rcu()
554 free_percpu(storage->percpu_buf); in free_percpu_cgroup_storage_rcu()
561 struct bpf_map *map; in bpf_cgroup_storage_free() local
566 map = &storage->map->map; in bpf_cgroup_storage_free()
567 stype = cgroup_storage_type(map); in bpf_cgroup_storage_free()
569 call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu); in bpf_cgroup_storage_free()
571 call_rcu(&storage->rcu, free_percpu_cgroup_storage_rcu); in bpf_cgroup_storage_free()
578 struct bpf_cgroup_storage_map *map; in bpf_cgroup_storage_link() local
583 storage->key.attach_type = type; in bpf_cgroup_storage_link()
584 storage->key.cgroup_inode_id = cgroup_id(cgroup); in bpf_cgroup_storage_link()
586 map = storage->map; in bpf_cgroup_storage_link()
588 spin_lock_bh(&map->lock); in bpf_cgroup_storage_link()
589 WARN_ON(cgroup_storage_insert(map, storage)); in bpf_cgroup_storage_link()
590 list_add(&storage->list_map, &map->list); in bpf_cgroup_storage_link()
591 list_add(&storage->list_cg, &cgroup->bpf.storages); in bpf_cgroup_storage_link()
592 spin_unlock_bh(&map->lock); in bpf_cgroup_storage_link()
597 struct bpf_cgroup_storage_map *map; in bpf_cgroup_storage_unlink() local
603 map = storage->map; in bpf_cgroup_storage_unlink()
605 spin_lock_bh(&map->lock); in bpf_cgroup_storage_unlink()
606 root = &map->root; in bpf_cgroup_storage_unlink()
607 rb_erase(&storage->node, root); in bpf_cgroup_storage_unlink()
609 list_del(&storage->list_map); in bpf_cgroup_storage_unlink()
610 list_del(&storage->list_cg); in bpf_cgroup_storage_unlink()
611 spin_unlock_bh(&map->lock); in bpf_cgroup_storage_unlink()