xref: /linux/kernel/bpf/bpf_lru_list.c (revision 2fe05e1139a555ae91f00a812cb9520e7d3022ab)
1 /* Copyright (c) 2016 Facebook
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  */
7 #include <linux/cpumask.h>
8 #include <linux/spinlock.h>
9 #include <linux/percpu.h>
10 
11 #include "bpf_lru_list.h"
12 
13 #define LOCAL_FREE_TARGET		(128)
14 #define LOCAL_NR_SCANS			LOCAL_FREE_TARGET
15 
16 #define PERCPU_FREE_TARGET		(4)
17 #define PERCPU_NR_SCANS			PERCPU_FREE_TARGET
18 
19 /* Helpers to get the local list index */
20 #define LOCAL_LIST_IDX(t)	((t) - BPF_LOCAL_LIST_T_OFFSET)
21 #define LOCAL_FREE_LIST_IDX	LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_FREE)
22 #define LOCAL_PENDING_LIST_IDX	LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_PENDING)
23 #define IS_LOCAL_LIST_TYPE(t)	((t) >= BPF_LOCAL_LIST_T_OFFSET)
24 
25 static int get_next_cpu(int cpu)
26 {
27 	cpu = cpumask_next(cpu, cpu_possible_mask);
28 	if (cpu >= nr_cpu_ids)
29 		cpu = cpumask_first(cpu_possible_mask);
30 	return cpu;
31 }
32 
33 /* Local list helpers */
34 static struct list_head *local_free_list(struct bpf_lru_locallist *loc_l)
35 {
36 	return &loc_l->lists[LOCAL_FREE_LIST_IDX];
37 }
38 
39 static struct list_head *local_pending_list(struct bpf_lru_locallist *loc_l)
40 {
41 	return &loc_l->lists[LOCAL_PENDING_LIST_IDX];
42 }
43 
44 /* bpf_lru_node helpers */
45 static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node)
46 {
47 	return node->ref;
48 }
49 
50 static void bpf_lru_list_count_inc(struct bpf_lru_list *l,
51 				   enum bpf_lru_list_type type)
52 {
53 	if (type < NR_BPF_LRU_LIST_COUNT)
54 		l->counts[type]++;
55 }
56 
57 static void bpf_lru_list_count_dec(struct bpf_lru_list *l,
58 				   enum bpf_lru_list_type type)
59 {
60 	if (type < NR_BPF_LRU_LIST_COUNT)
61 		l->counts[type]--;
62 }
63 
64 static void __bpf_lru_node_move_to_free(struct bpf_lru_list *l,
65 					struct bpf_lru_node *node,
66 					struct list_head *free_list,
67 					enum bpf_lru_list_type tgt_free_type)
68 {
69 	if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
70 		return;
71 
72 	/* If the removing node is the next_inactive_rotation candidate,
73 	 * move the next_inactive_rotation pointer also.
74 	 */
75 	if (&node->list == l->next_inactive_rotation)
76 		l->next_inactive_rotation = l->next_inactive_rotation->prev;
77 
78 	bpf_lru_list_count_dec(l, node->type);
79 
80 	node->type = tgt_free_type;
81 	list_move(&node->list, free_list);
82 }
83 
84 /* Move nodes from local list to the LRU list */
85 static void __bpf_lru_node_move_in(struct bpf_lru_list *l,
86 				   struct bpf_lru_node *node,
87 				   enum bpf_lru_list_type tgt_type)
88 {
89 	if (WARN_ON_ONCE(!IS_LOCAL_LIST_TYPE(node->type)) ||
90 	    WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type)))
91 		return;
92 
93 	bpf_lru_list_count_inc(l, tgt_type);
94 	node->type = tgt_type;
95 	node->ref = 0;
96 	list_move(&node->list, &l->lists[tgt_type]);
97 }
98 
99 /* Move nodes between or within active and inactive list (like
100  * active to inactive, inactive to active or tail of active back to
101  * the head of active).
102  */
103 static void __bpf_lru_node_move(struct bpf_lru_list *l,
104 				struct bpf_lru_node *node,
105 				enum bpf_lru_list_type tgt_type)
106 {
107 	if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)) ||
108 	    WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type)))
109 		return;
110 
111 	if (node->type != tgt_type) {
112 		bpf_lru_list_count_dec(l, node->type);
113 		bpf_lru_list_count_inc(l, tgt_type);
114 		node->type = tgt_type;
115 	}
116 	node->ref = 0;
117 
118 	/* If the moving node is the next_inactive_rotation candidate,
119 	 * move the next_inactive_rotation pointer also.
120 	 */
121 	if (&node->list == l->next_inactive_rotation)
122 		l->next_inactive_rotation = l->next_inactive_rotation->prev;
123 
124 	list_move(&node->list, &l->lists[tgt_type]);
125 }
126 
127 static bool bpf_lru_list_inactive_low(const struct bpf_lru_list *l)
128 {
129 	return l->counts[BPF_LRU_LIST_T_INACTIVE] <
130 		l->counts[BPF_LRU_LIST_T_ACTIVE];
131 }
132 
133 /* Rotate the active list:
134  * 1. Start from tail
135  * 2. If the node has the ref bit set, it will be rotated
136  *    back to the head of active list with the ref bit cleared.
137  *    Give this node one more chance to survive in the active list.
138  * 3. If the ref bit is not set, move it to the head of the
139  *    inactive list.
140  * 4. It will at most scan nr_scans nodes
141  */
142 static void __bpf_lru_list_rotate_active(struct bpf_lru *lru,
143 					 struct bpf_lru_list *l)
144 {
145 	struct list_head *active = &l->lists[BPF_LRU_LIST_T_ACTIVE];
146 	struct bpf_lru_node *node, *tmp_node, *first_node;
147 	unsigned int i = 0;
148 
149 	first_node = list_first_entry(active, struct bpf_lru_node, list);
150 	list_for_each_entry_safe_reverse(node, tmp_node, active, list) {
151 		if (bpf_lru_node_is_ref(node))
152 			__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
153 		else
154 			__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
155 
156 		if (++i == lru->nr_scans || node == first_node)
157 			break;
158 	}
159 }
160 
161 /* Rotate the inactive list.  It starts from the next_inactive_rotation
162  * 1. If the node has ref bit set, it will be moved to the head
163  *    of active list with the ref bit cleared.
164  * 2. If the node does not have ref bit set, it will leave it
165  *    at its current location (i.e. do nothing) so that it can
166  *    be considered during the next inactive_shrink.
167  * 3. It will at most scan nr_scans nodes
168  */
169 static void __bpf_lru_list_rotate_inactive(struct bpf_lru *lru,
170 					   struct bpf_lru_list *l)
171 {
172 	struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
173 	struct list_head *cur, *last, *next = inactive;
174 	struct bpf_lru_node *node;
175 	unsigned int i = 0;
176 
177 	if (list_empty(inactive))
178 		return;
179 
180 	last = l->next_inactive_rotation->next;
181 	if (last == inactive)
182 		last = last->next;
183 
184 	cur = l->next_inactive_rotation;
185 	while (i < lru->nr_scans) {
186 		if (cur == inactive) {
187 			cur = cur->prev;
188 			continue;
189 		}
190 
191 		node = list_entry(cur, struct bpf_lru_node, list);
192 		next = cur->prev;
193 		if (bpf_lru_node_is_ref(node))
194 			__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
195 		if (cur == last)
196 			break;
197 		cur = next;
198 		i++;
199 	}
200 
201 	l->next_inactive_rotation = next;
202 }
203 
204 /* Shrink the inactive list.  It starts from the tail of the
205  * inactive list and only move the nodes without the ref bit
206  * set to the designated free list.
207  */
208 static unsigned int
209 __bpf_lru_list_shrink_inactive(struct bpf_lru *lru,
210 			       struct bpf_lru_list *l,
211 			       unsigned int tgt_nshrink,
212 			       struct list_head *free_list,
213 			       enum bpf_lru_list_type tgt_free_type)
214 {
215 	struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
216 	struct bpf_lru_node *node, *tmp_node;
217 	unsigned int nshrinked = 0;
218 	unsigned int i = 0;
219 
220 	list_for_each_entry_safe_reverse(node, tmp_node, inactive, list) {
221 		if (bpf_lru_node_is_ref(node)) {
222 			__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
223 		} else if (lru->del_from_htab(lru->del_arg, node)) {
224 			__bpf_lru_node_move_to_free(l, node, free_list,
225 						    tgt_free_type);
226 			if (++nshrinked == tgt_nshrink)
227 				break;
228 		}
229 
230 		if (++i == lru->nr_scans)
231 			break;
232 	}
233 
234 	return nshrinked;
235 }
236 
237 /* 1. Rotate the active list (if needed)
238  * 2. Always rotate the inactive list
239  */
240 static void __bpf_lru_list_rotate(struct bpf_lru *lru, struct bpf_lru_list *l)
241 {
242 	if (bpf_lru_list_inactive_low(l))
243 		__bpf_lru_list_rotate_active(lru, l);
244 
245 	__bpf_lru_list_rotate_inactive(lru, l);
246 }
247 
248 /* Calls __bpf_lru_list_shrink_inactive() to shrink some
249  * ref-bit-cleared nodes and move them to the designated
250  * free list.
251  *
252  * If it cannot get a free node after calling
253  * __bpf_lru_list_shrink_inactive().  It will just remove
254  * one node from either inactive or active list without
255  * honoring the ref-bit.  It prefers inactive list to active
256  * list in this situation.
257  */
258 static unsigned int __bpf_lru_list_shrink(struct bpf_lru *lru,
259 					  struct bpf_lru_list *l,
260 					  unsigned int tgt_nshrink,
261 					  struct list_head *free_list,
262 					  enum bpf_lru_list_type tgt_free_type)
263 
264 {
265 	struct bpf_lru_node *node, *tmp_node;
266 	struct list_head *force_shrink_list;
267 	unsigned int nshrinked;
268 
269 	nshrinked = __bpf_lru_list_shrink_inactive(lru, l, tgt_nshrink,
270 						   free_list, tgt_free_type);
271 	if (nshrinked)
272 		return nshrinked;
273 
274 	/* Do a force shrink by ignoring the reference bit */
275 	if (!list_empty(&l->lists[BPF_LRU_LIST_T_INACTIVE]))
276 		force_shrink_list = &l->lists[BPF_LRU_LIST_T_INACTIVE];
277 	else
278 		force_shrink_list = &l->lists[BPF_LRU_LIST_T_ACTIVE];
279 
280 	list_for_each_entry_safe_reverse(node, tmp_node, force_shrink_list,
281 					 list) {
282 		if (lru->del_from_htab(lru->del_arg, node)) {
283 			__bpf_lru_node_move_to_free(l, node, free_list,
284 						    tgt_free_type);
285 			return 1;
286 		}
287 	}
288 
289 	return 0;
290 }
291 
292 /* Flush the nodes from the local pending list to the LRU list */
293 static void __local_list_flush(struct bpf_lru_list *l,
294 			       struct bpf_lru_locallist *loc_l)
295 {
296 	struct bpf_lru_node *node, *tmp_node;
297 
298 	list_for_each_entry_safe_reverse(node, tmp_node,
299 					 local_pending_list(loc_l), list) {
300 		if (bpf_lru_node_is_ref(node))
301 			__bpf_lru_node_move_in(l, node, BPF_LRU_LIST_T_ACTIVE);
302 		else
303 			__bpf_lru_node_move_in(l, node,
304 					       BPF_LRU_LIST_T_INACTIVE);
305 	}
306 }
307 
308 static void bpf_lru_list_push_free(struct bpf_lru_list *l,
309 				   struct bpf_lru_node *node)
310 {
311 	unsigned long flags;
312 
313 	if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
314 		return;
315 
316 	raw_spin_lock_irqsave(&l->lock, flags);
317 	__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE);
318 	raw_spin_unlock_irqrestore(&l->lock, flags);
319 }
320 
321 static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru,
322 					   struct bpf_lru_locallist *loc_l)
323 {
324 	struct bpf_lru_list *l = &lru->common_lru.lru_list;
325 	struct bpf_lru_node *node, *tmp_node;
326 	unsigned int nfree = 0;
327 
328 	raw_spin_lock(&l->lock);
329 
330 	__local_list_flush(l, loc_l);
331 
332 	__bpf_lru_list_rotate(lru, l);
333 
334 	list_for_each_entry_safe(node, tmp_node, &l->lists[BPF_LRU_LIST_T_FREE],
335 				 list) {
336 		__bpf_lru_node_move_to_free(l, node, local_free_list(loc_l),
337 					    BPF_LRU_LOCAL_LIST_T_FREE);
338 		if (++nfree == LOCAL_FREE_TARGET)
339 			break;
340 	}
341 
342 	if (nfree < LOCAL_FREE_TARGET)
343 		__bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree,
344 				      local_free_list(loc_l),
345 				      BPF_LRU_LOCAL_LIST_T_FREE);
346 
347 	raw_spin_unlock(&l->lock);
348 }
349 
350 static void __local_list_add_pending(struct bpf_lru *lru,
351 				     struct bpf_lru_locallist *loc_l,
352 				     int cpu,
353 				     struct bpf_lru_node *node,
354 				     u32 hash)
355 {
356 	*(u32 *)((void *)node + lru->hash_offset) = hash;
357 	node->cpu = cpu;
358 	node->type = BPF_LRU_LOCAL_LIST_T_PENDING;
359 	node->ref = 0;
360 	list_add(&node->list, local_pending_list(loc_l));
361 }
362 
363 static struct bpf_lru_node *
364 __local_list_pop_free(struct bpf_lru_locallist *loc_l)
365 {
366 	struct bpf_lru_node *node;
367 
368 	node = list_first_entry_or_null(local_free_list(loc_l),
369 					struct bpf_lru_node,
370 					list);
371 	if (node)
372 		list_del(&node->list);
373 
374 	return node;
375 }
376 
377 static struct bpf_lru_node *
378 __local_list_pop_pending(struct bpf_lru *lru, struct bpf_lru_locallist *loc_l)
379 {
380 	struct bpf_lru_node *node;
381 	bool force = false;
382 
383 ignore_ref:
384 	/* Get from the tail (i.e. older element) of the pending list. */
385 	list_for_each_entry_reverse(node, local_pending_list(loc_l),
386 				    list) {
387 		if ((!bpf_lru_node_is_ref(node) || force) &&
388 		    lru->del_from_htab(lru->del_arg, node)) {
389 			list_del(&node->list);
390 			return node;
391 		}
392 	}
393 
394 	if (!force) {
395 		force = true;
396 		goto ignore_ref;
397 	}
398 
399 	return NULL;
400 }
401 
402 static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru,
403 						    u32 hash)
404 {
405 	struct list_head *free_list;
406 	struct bpf_lru_node *node = NULL;
407 	struct bpf_lru_list *l;
408 	unsigned long flags;
409 	int cpu = raw_smp_processor_id();
410 
411 	l = per_cpu_ptr(lru->percpu_lru, cpu);
412 
413 	raw_spin_lock_irqsave(&l->lock, flags);
414 
415 	__bpf_lru_list_rotate(lru, l);
416 
417 	free_list = &l->lists[BPF_LRU_LIST_T_FREE];
418 	if (list_empty(free_list))
419 		__bpf_lru_list_shrink(lru, l, PERCPU_FREE_TARGET, free_list,
420 				      BPF_LRU_LIST_T_FREE);
421 
422 	if (!list_empty(free_list)) {
423 		node = list_first_entry(free_list, struct bpf_lru_node, list);
424 		*(u32 *)((void *)node + lru->hash_offset) = hash;
425 		node->ref = 0;
426 		__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
427 	}
428 
429 	raw_spin_unlock_irqrestore(&l->lock, flags);
430 
431 	return node;
432 }
433 
434 static struct bpf_lru_node *bpf_common_lru_pop_free(struct bpf_lru *lru,
435 						    u32 hash)
436 {
437 	struct bpf_lru_locallist *loc_l, *steal_loc_l;
438 	struct bpf_common_lru *clru = &lru->common_lru;
439 	struct bpf_lru_node *node;
440 	int steal, first_steal;
441 	unsigned long flags;
442 	int cpu = raw_smp_processor_id();
443 
444 	loc_l = per_cpu_ptr(clru->local_list, cpu);
445 
446 	raw_spin_lock_irqsave(&loc_l->lock, flags);
447 
448 	node = __local_list_pop_free(loc_l);
449 	if (!node) {
450 		bpf_lru_list_pop_free_to_local(lru, loc_l);
451 		node = __local_list_pop_free(loc_l);
452 	}
453 
454 	if (node)
455 		__local_list_add_pending(lru, loc_l, cpu, node, hash);
456 
457 	raw_spin_unlock_irqrestore(&loc_l->lock, flags);
458 
459 	if (node)
460 		return node;
461 
462 	/* No free nodes found from the local free list and
463 	 * the global LRU list.
464 	 *
465 	 * Steal from the local free/pending list of the
466 	 * current CPU and remote CPU in RR.  It starts
467 	 * with the loc_l->next_steal CPU.
468 	 */
469 
470 	first_steal = loc_l->next_steal;
471 	steal = first_steal;
472 	do {
473 		steal_loc_l = per_cpu_ptr(clru->local_list, steal);
474 
475 		raw_spin_lock_irqsave(&steal_loc_l->lock, flags);
476 
477 		node = __local_list_pop_free(steal_loc_l);
478 		if (!node)
479 			node = __local_list_pop_pending(lru, steal_loc_l);
480 
481 		raw_spin_unlock_irqrestore(&steal_loc_l->lock, flags);
482 
483 		steal = get_next_cpu(steal);
484 	} while (!node && steal != first_steal);
485 
486 	loc_l->next_steal = steal;
487 
488 	if (node) {
489 		raw_spin_lock_irqsave(&loc_l->lock, flags);
490 		__local_list_add_pending(lru, loc_l, cpu, node, hash);
491 		raw_spin_unlock_irqrestore(&loc_l->lock, flags);
492 	}
493 
494 	return node;
495 }
496 
497 struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash)
498 {
499 	if (lru->percpu)
500 		return bpf_percpu_lru_pop_free(lru, hash);
501 	else
502 		return bpf_common_lru_pop_free(lru, hash);
503 }
504 
505 static void bpf_common_lru_push_free(struct bpf_lru *lru,
506 				     struct bpf_lru_node *node)
507 {
508 	unsigned long flags;
509 
510 	if (WARN_ON_ONCE(node->type == BPF_LRU_LIST_T_FREE) ||
511 	    WARN_ON_ONCE(node->type == BPF_LRU_LOCAL_LIST_T_FREE))
512 		return;
513 
514 	if (node->type == BPF_LRU_LOCAL_LIST_T_PENDING) {
515 		struct bpf_lru_locallist *loc_l;
516 
517 		loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu);
518 
519 		raw_spin_lock_irqsave(&loc_l->lock, flags);
520 
521 		if (unlikely(node->type != BPF_LRU_LOCAL_LIST_T_PENDING)) {
522 			raw_spin_unlock_irqrestore(&loc_l->lock, flags);
523 			goto check_lru_list;
524 		}
525 
526 		node->type = BPF_LRU_LOCAL_LIST_T_FREE;
527 		node->ref = 0;
528 		list_move(&node->list, local_free_list(loc_l));
529 
530 		raw_spin_unlock_irqrestore(&loc_l->lock, flags);
531 		return;
532 	}
533 
534 check_lru_list:
535 	bpf_lru_list_push_free(&lru->common_lru.lru_list, node);
536 }
537 
538 static void bpf_percpu_lru_push_free(struct bpf_lru *lru,
539 				     struct bpf_lru_node *node)
540 {
541 	struct bpf_lru_list *l;
542 	unsigned long flags;
543 
544 	l = per_cpu_ptr(lru->percpu_lru, node->cpu);
545 
546 	raw_spin_lock_irqsave(&l->lock, flags);
547 
548 	__bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE);
549 
550 	raw_spin_unlock_irqrestore(&l->lock, flags);
551 }
552 
553 void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node)
554 {
555 	if (lru->percpu)
556 		bpf_percpu_lru_push_free(lru, node);
557 	else
558 		bpf_common_lru_push_free(lru, node);
559 }
560 
561 static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf,
562 				    u32 node_offset, u32 elem_size,
563 				    u32 nr_elems)
564 {
565 	struct bpf_lru_list *l = &lru->common_lru.lru_list;
566 	u32 i;
567 
568 	for (i = 0; i < nr_elems; i++) {
569 		struct bpf_lru_node *node;
570 
571 		node = (struct bpf_lru_node *)(buf + node_offset);
572 		node->type = BPF_LRU_LIST_T_FREE;
573 		node->ref = 0;
574 		list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
575 		buf += elem_size;
576 	}
577 }
578 
579 static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf,
580 				    u32 node_offset, u32 elem_size,
581 				    u32 nr_elems)
582 {
583 	u32 i, pcpu_entries;
584 	int cpu;
585 	struct bpf_lru_list *l;
586 
587 	pcpu_entries = nr_elems / num_possible_cpus();
588 
589 	i = 0;
590 
591 	for_each_possible_cpu(cpu) {
592 		struct bpf_lru_node *node;
593 
594 		l = per_cpu_ptr(lru->percpu_lru, cpu);
595 again:
596 		node = (struct bpf_lru_node *)(buf + node_offset);
597 		node->cpu = cpu;
598 		node->type = BPF_LRU_LIST_T_FREE;
599 		node->ref = 0;
600 		list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
601 		i++;
602 		buf += elem_size;
603 		if (i == nr_elems)
604 			break;
605 		if (i % pcpu_entries)
606 			goto again;
607 	}
608 }
609 
610 void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
611 		      u32 elem_size, u32 nr_elems)
612 {
613 	if (lru->percpu)
614 		bpf_percpu_lru_populate(lru, buf, node_offset, elem_size,
615 					nr_elems);
616 	else
617 		bpf_common_lru_populate(lru, buf, node_offset, elem_size,
618 					nr_elems);
619 }
620 
621 static void bpf_lru_locallist_init(struct bpf_lru_locallist *loc_l, int cpu)
622 {
623 	int i;
624 
625 	for (i = 0; i < NR_BPF_LRU_LOCAL_LIST_T; i++)
626 		INIT_LIST_HEAD(&loc_l->lists[i]);
627 
628 	loc_l->next_steal = cpu;
629 
630 	raw_spin_lock_init(&loc_l->lock);
631 }
632 
633 static void bpf_lru_list_init(struct bpf_lru_list *l)
634 {
635 	int i;
636 
637 	for (i = 0; i < NR_BPF_LRU_LIST_T; i++)
638 		INIT_LIST_HEAD(&l->lists[i]);
639 
640 	for (i = 0; i < NR_BPF_LRU_LIST_COUNT; i++)
641 		l->counts[i] = 0;
642 
643 	l->next_inactive_rotation = &l->lists[BPF_LRU_LIST_T_INACTIVE];
644 
645 	raw_spin_lock_init(&l->lock);
646 }
647 
648 int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
649 		 del_from_htab_func del_from_htab, void *del_arg)
650 {
651 	int cpu;
652 
653 	if (percpu) {
654 		lru->percpu_lru = alloc_percpu(struct bpf_lru_list);
655 		if (!lru->percpu_lru)
656 			return -ENOMEM;
657 
658 		for_each_possible_cpu(cpu) {
659 			struct bpf_lru_list *l;
660 
661 			l = per_cpu_ptr(lru->percpu_lru, cpu);
662 			bpf_lru_list_init(l);
663 		}
664 		lru->nr_scans = PERCPU_NR_SCANS;
665 	} else {
666 		struct bpf_common_lru *clru = &lru->common_lru;
667 
668 		clru->local_list = alloc_percpu(struct bpf_lru_locallist);
669 		if (!clru->local_list)
670 			return -ENOMEM;
671 
672 		for_each_possible_cpu(cpu) {
673 			struct bpf_lru_locallist *loc_l;
674 
675 			loc_l = per_cpu_ptr(clru->local_list, cpu);
676 			bpf_lru_locallist_init(loc_l, cpu);
677 		}
678 
679 		bpf_lru_list_init(&clru->lru_list);
680 		lru->nr_scans = LOCAL_NR_SCANS;
681 	}
682 
683 	lru->percpu = percpu;
684 	lru->del_from_htab = del_from_htab;
685 	lru->del_arg = del_arg;
686 	lru->hash_offset = hash_offset;
687 
688 	return 0;
689 }
690 
691 void bpf_lru_destroy(struct bpf_lru *lru)
692 {
693 	if (lru->percpu)
694 		free_percpu(lru->percpu_lru);
695 	else
696 		free_percpu(lru->common_lru.local_list);
697 }
698