Lines Matching refs:lru
144 static void __bpf_lru_list_rotate_active(struct bpf_lru *lru, in __bpf_lru_list_rotate_active() argument
158 if (++i == lru->nr_scans || node == first_node) in __bpf_lru_list_rotate_active()
171 static void __bpf_lru_list_rotate_inactive(struct bpf_lru *lru, in __bpf_lru_list_rotate_inactive() argument
187 while (i < lru->nr_scans) { in __bpf_lru_list_rotate_inactive()
211 __bpf_lru_list_shrink_inactive(struct bpf_lru *lru, in __bpf_lru_list_shrink_inactive() argument
225 } else if (lru->del_from_htab(lru->del_arg, node)) { in __bpf_lru_list_shrink_inactive()
232 if (++i == lru->nr_scans) in __bpf_lru_list_shrink_inactive()
242 static void __bpf_lru_list_rotate(struct bpf_lru *lru, struct bpf_lru_list *l) in __bpf_lru_list_rotate() argument
245 __bpf_lru_list_rotate_active(lru, l); in __bpf_lru_list_rotate()
247 __bpf_lru_list_rotate_inactive(lru, l); in __bpf_lru_list_rotate()
260 static unsigned int __bpf_lru_list_shrink(struct bpf_lru *lru, in __bpf_lru_list_shrink() argument
271 nshrinked = __bpf_lru_list_shrink_inactive(lru, l, tgt_nshrink, in __bpf_lru_list_shrink()
284 if (lru->del_from_htab(lru->del_arg, node)) { in __bpf_lru_list_shrink()
323 static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru, in bpf_lru_list_pop_free_to_local() argument
326 struct bpf_lru_list *l = &lru->common_lru.lru_list; in bpf_lru_list_pop_free_to_local()
334 __bpf_lru_list_rotate(lru, l); in bpf_lru_list_pop_free_to_local()
345 __bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree, in bpf_lru_list_pop_free_to_local()
352 static void __local_list_add_pending(struct bpf_lru *lru, in __local_list_add_pending() argument
358 *(u32 *)((void *)node + lru->hash_offset) = hash; in __local_list_add_pending()
380 __local_list_pop_pending(struct bpf_lru *lru, struct bpf_lru_locallist *loc_l) in __local_list_pop_pending() argument
390 lru->del_from_htab(lru->del_arg, node)) { in __local_list_pop_pending()
404 static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru, in bpf_percpu_lru_pop_free() argument
413 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_percpu_lru_pop_free()
417 __bpf_lru_list_rotate(lru, l); in bpf_percpu_lru_pop_free()
421 __bpf_lru_list_shrink(lru, l, PERCPU_FREE_TARGET, free_list, in bpf_percpu_lru_pop_free()
426 *(u32 *)((void *)node + lru->hash_offset) = hash; in bpf_percpu_lru_pop_free()
436 static struct bpf_lru_node *bpf_common_lru_pop_free(struct bpf_lru *lru, in bpf_common_lru_pop_free() argument
440 struct bpf_common_lru *clru = &lru->common_lru; in bpf_common_lru_pop_free()
452 bpf_lru_list_pop_free_to_local(lru, loc_l); in bpf_common_lru_pop_free()
457 __local_list_add_pending(lru, loc_l, cpu, node, hash); in bpf_common_lru_pop_free()
481 node = __local_list_pop_pending(lru, steal_loc_l); in bpf_common_lru_pop_free()
492 __local_list_add_pending(lru, loc_l, cpu, node, hash); in bpf_common_lru_pop_free()
499 struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash) in bpf_lru_pop_free() argument
501 if (lru->percpu) in bpf_lru_pop_free()
502 return bpf_percpu_lru_pop_free(lru, hash); in bpf_lru_pop_free()
504 return bpf_common_lru_pop_free(lru, hash); in bpf_lru_pop_free()
507 static void bpf_common_lru_push_free(struct bpf_lru *lru, in bpf_common_lru_push_free() argument
520 loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu); in bpf_common_lru_push_free()
538 bpf_lru_list_push_free(&lru->common_lru.lru_list, node); in bpf_common_lru_push_free()
541 static void bpf_percpu_lru_push_free(struct bpf_lru *lru, in bpf_percpu_lru_push_free() argument
547 l = per_cpu_ptr(lru->percpu_lru, node->cpu); in bpf_percpu_lru_push_free()
556 void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node) in bpf_lru_push_free() argument
558 if (lru->percpu) in bpf_lru_push_free()
559 bpf_percpu_lru_push_free(lru, node); in bpf_lru_push_free()
561 bpf_common_lru_push_free(lru, node); in bpf_lru_push_free()
564 static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf, in bpf_common_lru_populate() argument
568 struct bpf_lru_list *l = &lru->common_lru.lru_list; in bpf_common_lru_populate()
582 static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf, in bpf_percpu_lru_populate() argument
597 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_percpu_lru_populate()
613 void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset, in bpf_lru_populate() argument
616 if (lru->percpu) in bpf_lru_populate()
617 bpf_percpu_lru_populate(lru, buf, node_offset, elem_size, in bpf_lru_populate()
620 bpf_common_lru_populate(lru, buf, node_offset, elem_size, in bpf_lru_populate()
651 int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset, in bpf_lru_init() argument
657 lru->percpu_lru = alloc_percpu(struct bpf_lru_list); in bpf_lru_init()
658 if (!lru->percpu_lru) in bpf_lru_init()
664 l = per_cpu_ptr(lru->percpu_lru, cpu); in bpf_lru_init()
667 lru->nr_scans = PERCPU_NR_SCANS; in bpf_lru_init()
669 struct bpf_common_lru *clru = &lru->common_lru; in bpf_lru_init()
683 lru->nr_scans = LOCAL_NR_SCANS; in bpf_lru_init()
686 lru->percpu = percpu; in bpf_lru_init()
687 lru->del_from_htab = del_from_htab; in bpf_lru_init()
688 lru->del_arg = del_arg; in bpf_lru_init()
689 lru->hash_offset = hash_offset; in bpf_lru_init()
694 void bpf_lru_destroy(struct bpf_lru *lru) in bpf_lru_destroy() argument
696 if (lru->percpu) in bpf_lru_destroy()
697 free_percpu(lru->percpu_lru); in bpf_lru_destroy()
699 free_percpu(lru->common_lru.local_list); in bpf_lru_destroy()