Lines Matching +full:inactive +full:-
1 // SPDX-License-Identifier: GPL-2.0-only
17 #define LOCAL_LIST_IDX(t) ((t) - BPF_LOCAL_LIST_T_OFFSET)
25 return &loc_l->lists[LOCAL_FREE_LIST_IDX];
30 return &loc_l->lists[LOCAL_PENDING_LIST_IDX];
36 return READ_ONCE(node->ref);
41 WRITE_ONCE(node->ref, 0);
48 l->counts[type]++;
55 l->counts[type]--;
63 if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
69 if (&node->list == l->next_inactive_rotation)
70 l->next_inactive_rotation = l->next_inactive_rotation->prev;
72 bpf_lru_list_count_dec(l, node->type);
74 node->type = tgt_free_type;
75 list_move(&node->list, free_list);
83 if (WARN_ON_ONCE(!IS_LOCAL_LIST_TYPE(node->type)) ||
88 node->type = tgt_type;
90 list_move(&node->list, &l->lists[tgt_type]);
93 /* Move nodes between or within active and inactive list (like
94 * active to inactive, inactive to active or tail of active back to
101 if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)) ||
105 if (node->type != tgt_type) {
106 bpf_lru_list_count_dec(l, node->type);
108 node->type = tgt_type;
115 if (&node->list == l->next_inactive_rotation)
116 l->next_inactive_rotation = l->next_inactive_rotation->prev;
118 list_move(&node->list, &l->lists[tgt_type]);
123 return l->counts[BPF_LRU_LIST_T_INACTIVE] <
124 l->counts[BPF_LRU_LIST_T_ACTIVE];
133 * inactive list.
139 struct list_head *active = &l->lists[BPF_LRU_LIST_T_ACTIVE];
150 if (++i == lru->nr_scans || node == first_node)
155 /* Rotate the inactive list. It starts from the next_inactive_rotation
166 struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
167 struct list_head *cur, *last, *next = inactive;
171 if (list_empty(inactive))
174 last = l->next_inactive_rotation->next;
175 if (last == inactive)
176 last = last->next;
178 cur = l->next_inactive_rotation;
179 while (i < lru->nr_scans) {
180 if (cur == inactive) {
181 cur = cur->prev;
186 next = cur->prev;
195 l->next_inactive_rotation = next;
198 /* Shrink the inactive list. It starts from the tail of the
199 * inactive list and only move the nodes without the ref bit
209 struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
214 list_for_each_entry_safe_reverse(node, tmp_node, inactive, list) {
217 } else if (lru->del_from_htab(lru->del_arg, node)) {
224 if (++i == lru->nr_scans)
232 * 2. Always rotate the inactive list
243 * ref-bit-cleared nodes and move them to the designated
248 * one node from either inactive or active list without
249 * honoring the ref-bit. It prefers inactive list to active
269 if (!list_empty(&l->lists[BPF_LRU_LIST_T_INACTIVE]))
270 force_shrink_list = &l->lists[BPF_LRU_LIST_T_INACTIVE];
272 force_shrink_list = &l->lists[BPF_LRU_LIST_T_ACTIVE];
276 if (lru->del_from_htab(lru->del_arg, node)) {
307 if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
310 raw_spin_lock_irqsave(&l->lock, flags);
312 raw_spin_unlock_irqrestore(&l->lock, flags);
318 struct bpf_lru_list *l = &lru->common_lru.lru_list;
322 raw_spin_lock(&l->lock);
328 list_for_each_entry_safe(node, tmp_node, &l->lists[BPF_LRU_LIST_T_FREE],
332 if (++nfree == lru->target_free)
336 if (nfree < lru->target_free)
337 __bpf_lru_list_shrink(lru, l, lru->target_free - nfree,
341 raw_spin_unlock(&l->lock);
350 *(u32 *)((void *)node + lru->hash_offset) = hash;
351 node->cpu = cpu;
352 node->type = BPF_LRU_LOCAL_LIST_T_PENDING;
354 list_add(&node->list, local_pending_list(loc_l));
366 list_del(&node->list);
382 lru->del_from_htab(lru->del_arg, node)) {
383 list_del(&node->list);
405 l = per_cpu_ptr(lru->percpu_lru, cpu);
407 raw_spin_lock_irqsave(&l->lock, flags);
411 free_list = &l->lists[BPF_LRU_LIST_T_FREE];
418 *(u32 *)((void *)node + lru->hash_offset) = hash;
423 raw_spin_unlock_irqrestore(&l->lock, flags);
432 struct bpf_common_lru *clru = &lru->common_lru;
438 loc_l = per_cpu_ptr(clru->local_list, cpu);
440 raw_spin_lock_irqsave(&loc_l->lock, flags);
451 raw_spin_unlock_irqrestore(&loc_l->lock, flags);
461 * with the loc_l->next_steal CPU.
464 first_steal = loc_l->next_steal;
467 steal_loc_l = per_cpu_ptr(clru->local_list, steal);
469 raw_spin_lock_irqsave(&steal_loc_l->lock, flags);
475 raw_spin_unlock_irqrestore(&steal_loc_l->lock, flags);
480 loc_l->next_steal = steal;
483 raw_spin_lock_irqsave(&loc_l->lock, flags);
485 raw_spin_unlock_irqrestore(&loc_l->lock, flags);
493 if (lru->percpu)
502 u8 node_type = READ_ONCE(node->type);
512 loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu);
514 raw_spin_lock_irqsave(&loc_l->lock, flags);
516 if (unlikely(node->type != BPF_LRU_LOCAL_LIST_T_PENDING)) {
517 raw_spin_unlock_irqrestore(&loc_l->lock, flags);
521 node->type = BPF_LRU_LOCAL_LIST_T_FREE;
523 list_move(&node->list, local_free_list(loc_l));
525 raw_spin_unlock_irqrestore(&loc_l->lock, flags);
530 bpf_lru_list_push_free(&lru->common_lru.lru_list, node);
539 l = per_cpu_ptr(lru->percpu_lru, node->cpu);
541 raw_spin_lock_irqsave(&l->lock, flags);
545 raw_spin_unlock_irqrestore(&l->lock, flags);
550 if (lru->percpu)
560 struct bpf_lru_list *l = &lru->common_lru.lru_list;
567 node->type = BPF_LRU_LIST_T_FREE;
569 list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
573 lru->target_free = clamp((nr_elems / num_possible_cpus()) / 2,
592 l = per_cpu_ptr(lru->percpu_lru, cpu);
595 node->cpu = cpu;
596 node->type = BPF_LRU_LIST_T_FREE;
598 list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
611 if (lru->percpu)
624 INIT_LIST_HEAD(&loc_l->lists[i]);
626 loc_l->next_steal = cpu;
628 raw_spin_lock_init(&loc_l->lock);
636 INIT_LIST_HEAD(&l->lists[i]);
639 l->counts[i] = 0;
641 l->next_inactive_rotation = &l->lists[BPF_LRU_LIST_T_INACTIVE];
643 raw_spin_lock_init(&l->lock);
652 lru->percpu_lru = alloc_percpu(struct bpf_lru_list);
653 if (!lru->percpu_lru)
654 return -ENOMEM;
659 l = per_cpu_ptr(lru->percpu_lru, cpu);
662 lru->nr_scans = PERCPU_NR_SCANS;
664 struct bpf_common_lru *clru = &lru->common_lru;
666 clru->local_list = alloc_percpu(struct bpf_lru_locallist);
667 if (!clru->local_list)
668 return -ENOMEM;
673 loc_l = per_cpu_ptr(clru->local_list, cpu);
677 bpf_lru_list_init(&clru->lru_list);
678 lru->nr_scans = LOCAL_NR_SCANS;
681 lru->percpu = percpu;
682 lru->del_from_htab = del_from_htab;
683 lru->del_arg = del_arg;
684 lru->hash_offset = hash_offset;
691 if (lru->percpu)
692 free_percpu(lru->percpu_lru);
694 free_percpu(lru->common_lru.local_list);