Lines Matching full:storage
235 /* Only task storage has uptrs and task storage in bpf_selem_free()
237 * for task storage, so this bpf_obj_free_fields() won't unpin in bpf_selem_free()
246 /* reuse_now == true only happens when the storage owner in bpf_selem_free()
318 * read if the freeing of the storage is done in bpf_selem_unlink_storage_nolock()
322 * to the caller which then calls then frees the storage after in bpf_selem_unlink_storage_nolock()
488 struct bpf_local_storage *prev_storage, *storage; in bpf_local_storage_alloc() local
492 err = mem_charge(smap, owner, sizeof(*storage)); in bpf_local_storage_alloc()
497 storage = bpf_mem_cache_alloc_flags(&smap->storage_ma, gfp_flags); in bpf_local_storage_alloc()
499 storage = bpf_map_kzalloc(&smap->map, sizeof(*storage), in bpf_local_storage_alloc()
501 if (!storage) { in bpf_local_storage_alloc()
506 RCU_INIT_POINTER(storage->smap, smap); in bpf_local_storage_alloc()
507 INIT_HLIST_HEAD(&storage->list); in bpf_local_storage_alloc()
508 raw_spin_lock_init(&storage->lock); in bpf_local_storage_alloc()
509 storage->owner = owner; in bpf_local_storage_alloc()
511 bpf_selem_link_storage_nolock(storage, first_selem); in bpf_local_storage_alloc()
516 /* Publish storage to the owner. in bpf_local_storage_alloc()
521 * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage) in bpf_local_storage_alloc()
522 * is protected by the storage->lock. Hence, when freeing in bpf_local_storage_alloc()
523 * the owner->storage, the storage->lock must be held before in bpf_local_storage_alloc()
524 * setting owner->storage ptr to NULL. in bpf_local_storage_alloc()
526 prev_storage = cmpxchg(owner_storage_ptr, NULL, storage); in bpf_local_storage_alloc()
546 bpf_local_storage_free(storage, smap, smap->bpf_ma, true); in bpf_local_storage_alloc()
547 mem_uncharge(smap, owner, sizeof(*storage)); in bpf_local_storage_alloc()
767 * intends to remove all local storage. So the last iteration in bpf_local_storage_destroy()
792 * A deadlock free allocator is useful for storage that the bpf prog can easily
794 * The task and cgroup storage fall into this case. The bpf_mem_alloc reuses
883 * to the owner->storage or to the map bucket's list. in bpf_local_storage_map_free()
886 * or when the storage is freed e.g. in bpf_local_storage_map_free()
907 /* While freeing the storage we may still need to access the map. in bpf_local_storage_map_free()
913 * However, while freeing the storage one still needs to access the in bpf_local_storage_map_free()
917 * Hence, wait another rcu grace period for the storage to be freed. in bpf_local_storage_map_free()