xref: /linux/mm/list_lru.c (revision 660e4b18a72efe555c9b7ff9a80cfd4777af9609)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
4  * Authors: David Chinner and Glauber Costa
5  *
6  * Generic LRU infrastructure
7  */
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/list_lru.h>
12 #include <linux/slab.h>
13 #include <linux/mutex.h>
14 #include <linux/memcontrol.h>
15 #include "slab.h"
16 #include "internal.h"
17 
18 #ifdef CONFIG_MEMCG
19 static LIST_HEAD(memcg_list_lrus);
20 static DEFINE_MUTEX(list_lrus_mutex);
21 
list_lru_memcg_aware(struct list_lru * lru)22 static inline bool list_lru_memcg_aware(struct list_lru *lru)
23 {
24 	return lru->memcg_aware;
25 }
26 
list_lru_register(struct list_lru * lru)27 static void list_lru_register(struct list_lru *lru)
28 {
29 	if (!list_lru_memcg_aware(lru))
30 		return;
31 
32 	mutex_lock(&list_lrus_mutex);
33 	list_add(&lru->list, &memcg_list_lrus);
34 	mutex_unlock(&list_lrus_mutex);
35 }
36 
list_lru_unregister(struct list_lru * lru)37 static void list_lru_unregister(struct list_lru *lru)
38 {
39 	if (!list_lru_memcg_aware(lru))
40 		return;
41 
42 	mutex_lock(&list_lrus_mutex);
43 	list_del(&lru->list);
44 	mutex_unlock(&list_lrus_mutex);
45 }
46 
lru_shrinker_id(struct list_lru * lru)47 static int lru_shrinker_id(struct list_lru *lru)
48 {
49 	return lru->shrinker_id;
50 }
51 
52 static inline struct list_lru_one *
list_lru_from_memcg_idx(struct list_lru * lru,int nid,int idx)53 list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
54 {
55 	if (list_lru_memcg_aware(lru) && idx >= 0) {
56 		struct list_lru_memcg *mlru = xa_load(&lru->xa, idx);
57 
58 		return mlru ? &mlru->node[nid] : NULL;
59 	}
60 	return &lru->node[nid].lru;
61 }
62 #else
list_lru_register(struct list_lru * lru)63 static void list_lru_register(struct list_lru *lru)
64 {
65 }
66 
list_lru_unregister(struct list_lru * lru)67 static void list_lru_unregister(struct list_lru *lru)
68 {
69 }
70 
lru_shrinker_id(struct list_lru * lru)71 static int lru_shrinker_id(struct list_lru *lru)
72 {
73 	return -1;
74 }
75 
list_lru_memcg_aware(struct list_lru * lru)76 static inline bool list_lru_memcg_aware(struct list_lru *lru)
77 {
78 	return false;
79 }
80 
81 static inline struct list_lru_one *
list_lru_from_memcg_idx(struct list_lru * lru,int nid,int idx)82 list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx)
83 {
84 	return &lru->node[nid].lru;
85 }
86 #endif /* CONFIG_MEMCG */
87 
88 /* The caller must ensure the memcg lifetime. */
list_lru_add(struct list_lru * lru,struct list_head * item,int nid,struct mem_cgroup * memcg)89 bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid,
90 		    struct mem_cgroup *memcg)
91 {
92 	struct list_lru_node *nlru = &lru->node[nid];
93 	struct list_lru_one *l;
94 
95 	spin_lock(&nlru->lock);
96 	if (list_empty(item)) {
97 		l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
98 		list_add_tail(item, &l->list);
99 		/* Set shrinker bit if the first element was added */
100 		if (!l->nr_items++)
101 			set_shrinker_bit(memcg, nid, lru_shrinker_id(lru));
102 		nlru->nr_items++;
103 		spin_unlock(&nlru->lock);
104 		return true;
105 	}
106 	spin_unlock(&nlru->lock);
107 	return false;
108 }
109 EXPORT_SYMBOL_GPL(list_lru_add);
110 
list_lru_add_obj(struct list_lru * lru,struct list_head * item)111 bool list_lru_add_obj(struct list_lru *lru, struct list_head *item)
112 {
113 	bool ret;
114 	int nid = page_to_nid(virt_to_page(item));
115 
116 	if (list_lru_memcg_aware(lru)) {
117 		rcu_read_lock();
118 		ret = list_lru_add(lru, item, nid, mem_cgroup_from_slab_obj(item));
119 		rcu_read_unlock();
120 	} else {
121 		ret = list_lru_add(lru, item, nid, NULL);
122 	}
123 
124 	return ret;
125 }
126 EXPORT_SYMBOL_GPL(list_lru_add_obj);
127 
128 /* The caller must ensure the memcg lifetime. */
list_lru_del(struct list_lru * lru,struct list_head * item,int nid,struct mem_cgroup * memcg)129 bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid,
130 		    struct mem_cgroup *memcg)
131 {
132 	struct list_lru_node *nlru = &lru->node[nid];
133 	struct list_lru_one *l;
134 
135 	spin_lock(&nlru->lock);
136 	if (!list_empty(item)) {
137 		l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
138 		list_del_init(item);
139 		l->nr_items--;
140 		nlru->nr_items--;
141 		spin_unlock(&nlru->lock);
142 		return true;
143 	}
144 	spin_unlock(&nlru->lock);
145 	return false;
146 }
147 EXPORT_SYMBOL_GPL(list_lru_del);
148 
list_lru_del_obj(struct list_lru * lru,struct list_head * item)149 bool list_lru_del_obj(struct list_lru *lru, struct list_head *item)
150 {
151 	bool ret;
152 	int nid = page_to_nid(virt_to_page(item));
153 
154 	if (list_lru_memcg_aware(lru)) {
155 		rcu_read_lock();
156 		ret = list_lru_del(lru, item, nid, mem_cgroup_from_slab_obj(item));
157 		rcu_read_unlock();
158 	} else {
159 		ret = list_lru_del(lru, item, nid, NULL);
160 	}
161 
162 	return ret;
163 }
164 EXPORT_SYMBOL_GPL(list_lru_del_obj);
165 
list_lru_isolate(struct list_lru_one * list,struct list_head * item)166 void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
167 {
168 	list_del_init(item);
169 	list->nr_items--;
170 }
171 EXPORT_SYMBOL_GPL(list_lru_isolate);
172 
list_lru_isolate_move(struct list_lru_one * list,struct list_head * item,struct list_head * head)173 void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
174 			   struct list_head *head)
175 {
176 	list_move(item, head);
177 	list->nr_items--;
178 }
179 EXPORT_SYMBOL_GPL(list_lru_isolate_move);
180 
list_lru_count_one(struct list_lru * lru,int nid,struct mem_cgroup * memcg)181 unsigned long list_lru_count_one(struct list_lru *lru,
182 				 int nid, struct mem_cgroup *memcg)
183 {
184 	struct list_lru_one *l;
185 	long count;
186 
187 	rcu_read_lock();
188 	l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg));
189 	count = l ? READ_ONCE(l->nr_items) : 0;
190 	rcu_read_unlock();
191 
192 	if (unlikely(count < 0))
193 		count = 0;
194 
195 	return count;
196 }
197 EXPORT_SYMBOL_GPL(list_lru_count_one);
198 
list_lru_count_node(struct list_lru * lru,int nid)199 unsigned long list_lru_count_node(struct list_lru *lru, int nid)
200 {
201 	struct list_lru_node *nlru;
202 
203 	nlru = &lru->node[nid];
204 	return nlru->nr_items;
205 }
206 EXPORT_SYMBOL_GPL(list_lru_count_node);
207 
208 static unsigned long
__list_lru_walk_one(struct list_lru * lru,int nid,int memcg_idx,list_lru_walk_cb isolate,void * cb_arg,unsigned long * nr_to_walk)209 __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
210 		    list_lru_walk_cb isolate, void *cb_arg,
211 		    unsigned long *nr_to_walk)
212 {
213 	struct list_lru_node *nlru = &lru->node[nid];
214 	struct list_lru_one *l;
215 	struct list_head *item, *n;
216 	unsigned long isolated = 0;
217 
218 restart:
219 	l = list_lru_from_memcg_idx(lru, nid, memcg_idx);
220 	if (!l)
221 		goto out;
222 
223 	list_for_each_safe(item, n, &l->list) {
224 		enum lru_status ret;
225 
226 		/*
227 		 * decrement nr_to_walk first so that we don't livelock if we
228 		 * get stuck on large numbers of LRU_RETRY items
229 		 */
230 		if (!*nr_to_walk)
231 			break;
232 		--*nr_to_walk;
233 
234 		ret = isolate(item, l, &nlru->lock, cb_arg);
235 		switch (ret) {
236 		case LRU_REMOVED_RETRY:
237 			assert_spin_locked(&nlru->lock);
238 			fallthrough;
239 		case LRU_REMOVED:
240 			isolated++;
241 			nlru->nr_items--;
242 			/*
243 			 * If the lru lock has been dropped, our list
244 			 * traversal is now invalid and so we have to
245 			 * restart from scratch.
246 			 */
247 			if (ret == LRU_REMOVED_RETRY)
248 				goto restart;
249 			break;
250 		case LRU_ROTATE:
251 			list_move_tail(item, &l->list);
252 			break;
253 		case LRU_SKIP:
254 			break;
255 		case LRU_RETRY:
256 			/*
257 			 * The lru lock has been dropped, our list traversal is
258 			 * now invalid and so we have to restart from scratch.
259 			 */
260 			assert_spin_locked(&nlru->lock);
261 			goto restart;
262 		case LRU_STOP:
263 			assert_spin_locked(&nlru->lock);
264 			goto out;
265 		default:
266 			BUG();
267 		}
268 	}
269 out:
270 	return isolated;
271 }
272 
273 unsigned long
list_lru_walk_one(struct list_lru * lru,int nid,struct mem_cgroup * memcg,list_lru_walk_cb isolate,void * cb_arg,unsigned long * nr_to_walk)274 list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
275 		  list_lru_walk_cb isolate, void *cb_arg,
276 		  unsigned long *nr_to_walk)
277 {
278 	struct list_lru_node *nlru = &lru->node[nid];
279 	unsigned long ret;
280 
281 	spin_lock(&nlru->lock);
282 	ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate,
283 				  cb_arg, nr_to_walk);
284 	spin_unlock(&nlru->lock);
285 	return ret;
286 }
287 EXPORT_SYMBOL_GPL(list_lru_walk_one);
288 
289 unsigned long
list_lru_walk_one_irq(struct list_lru * lru,int nid,struct mem_cgroup * memcg,list_lru_walk_cb isolate,void * cb_arg,unsigned long * nr_to_walk)290 list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
291 		      list_lru_walk_cb isolate, void *cb_arg,
292 		      unsigned long *nr_to_walk)
293 {
294 	struct list_lru_node *nlru = &lru->node[nid];
295 	unsigned long ret;
296 
297 	spin_lock_irq(&nlru->lock);
298 	ret = __list_lru_walk_one(lru, nid, memcg_kmem_id(memcg), isolate,
299 				  cb_arg, nr_to_walk);
300 	spin_unlock_irq(&nlru->lock);
301 	return ret;
302 }
303 
list_lru_walk_node(struct list_lru * lru,int nid,list_lru_walk_cb isolate,void * cb_arg,unsigned long * nr_to_walk)304 unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
305 				 list_lru_walk_cb isolate, void *cb_arg,
306 				 unsigned long *nr_to_walk)
307 {
308 	long isolated = 0;
309 
310 	isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg,
311 				      nr_to_walk);
312 
313 #ifdef CONFIG_MEMCG
314 	if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
315 		struct list_lru_memcg *mlru;
316 		unsigned long index;
317 
318 		xa_for_each(&lru->xa, index, mlru) {
319 			struct list_lru_node *nlru = &lru->node[nid];
320 
321 			spin_lock(&nlru->lock);
322 			isolated += __list_lru_walk_one(lru, nid, index,
323 							isolate, cb_arg,
324 							nr_to_walk);
325 			spin_unlock(&nlru->lock);
326 
327 			if (*nr_to_walk <= 0)
328 				break;
329 		}
330 	}
331 #endif
332 
333 	return isolated;
334 }
335 EXPORT_SYMBOL_GPL(list_lru_walk_node);
336 
init_one_lru(struct list_lru_one * l)337 static void init_one_lru(struct list_lru_one *l)
338 {
339 	INIT_LIST_HEAD(&l->list);
340 	l->nr_items = 0;
341 }
342 
343 #ifdef CONFIG_MEMCG
memcg_init_list_lru_one(gfp_t gfp)344 static struct list_lru_memcg *memcg_init_list_lru_one(gfp_t gfp)
345 {
346 	int nid;
347 	struct list_lru_memcg *mlru;
348 
349 	mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp);
350 	if (!mlru)
351 		return NULL;
352 
353 	for_each_node(nid)
354 		init_one_lru(&mlru->node[nid]);
355 
356 	return mlru;
357 }
358 
memcg_list_lru_free(struct list_lru * lru,int src_idx)359 static void memcg_list_lru_free(struct list_lru *lru, int src_idx)
360 {
361 	struct list_lru_memcg *mlru = xa_erase_irq(&lru->xa, src_idx);
362 
363 	/*
364 	 * The __list_lru_walk_one() can walk the list of this node.
365 	 * We need kvfree_rcu() here. And the walking of the list
366 	 * is under lru->node[nid]->lock, which can serve as a RCU
367 	 * read-side critical section.
368 	 */
369 	if (mlru)
370 		kvfree_rcu(mlru, rcu);
371 }
372 
memcg_init_list_lru(struct list_lru * lru,bool memcg_aware)373 static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
374 {
375 	if (memcg_aware)
376 		xa_init_flags(&lru->xa, XA_FLAGS_LOCK_IRQ);
377 	lru->memcg_aware = memcg_aware;
378 }
379 
memcg_destroy_list_lru(struct list_lru * lru)380 static void memcg_destroy_list_lru(struct list_lru *lru)
381 {
382 	XA_STATE(xas, &lru->xa, 0);
383 	struct list_lru_memcg *mlru;
384 
385 	if (!list_lru_memcg_aware(lru))
386 		return;
387 
388 	xas_lock_irq(&xas);
389 	xas_for_each(&xas, mlru, ULONG_MAX) {
390 		kfree(mlru);
391 		xas_store(&xas, NULL);
392 	}
393 	xas_unlock_irq(&xas);
394 }
395 
memcg_reparent_list_lru_node(struct list_lru * lru,int nid,int src_idx,struct mem_cgroup * dst_memcg)396 static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid,
397 					 int src_idx, struct mem_cgroup *dst_memcg)
398 {
399 	struct list_lru_node *nlru = &lru->node[nid];
400 	int dst_idx = dst_memcg->kmemcg_id;
401 	struct list_lru_one *src, *dst;
402 
403 	/*
404 	 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
405 	 * we have to use IRQ-safe primitives here to avoid deadlock.
406 	 */
407 	spin_lock_irq(&nlru->lock);
408 
409 	src = list_lru_from_memcg_idx(lru, nid, src_idx);
410 	if (!src)
411 		goto out;
412 	dst = list_lru_from_memcg_idx(lru, nid, dst_idx);
413 
414 	list_splice_init(&src->list, &dst->list);
415 
416 	if (src->nr_items) {
417 		dst->nr_items += src->nr_items;
418 		set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
419 		src->nr_items = 0;
420 	}
421 out:
422 	spin_unlock_irq(&nlru->lock);
423 }
424 
memcg_reparent_list_lru(struct list_lru * lru,int src_idx,struct mem_cgroup * dst_memcg)425 static void memcg_reparent_list_lru(struct list_lru *lru,
426 				    int src_idx, struct mem_cgroup *dst_memcg)
427 {
428 	int i;
429 
430 	for_each_node(i)
431 		memcg_reparent_list_lru_node(lru, i, src_idx, dst_memcg);
432 
433 	memcg_list_lru_free(lru, src_idx);
434 }
435 
memcg_reparent_list_lrus(struct mem_cgroup * memcg,struct mem_cgroup * parent)436 void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent)
437 {
438 	struct cgroup_subsys_state *css;
439 	struct list_lru *lru;
440 	int src_idx = memcg->kmemcg_id;
441 
442 	/*
443 	 * Change kmemcg_id of this cgroup and all its descendants to the
444 	 * parent's id, and then move all entries from this cgroup's list_lrus
445 	 * to ones of the parent.
446 	 *
447 	 * After we have finished, all list_lrus corresponding to this cgroup
448 	 * are guaranteed to remain empty. So we can safely free this cgroup's
449 	 * list lrus in memcg_list_lru_free().
450 	 *
451 	 * Changing ->kmemcg_id to the parent can prevent memcg_list_lru_alloc()
452 	 * from allocating list lrus for this cgroup after memcg_list_lru_free()
453 	 * call.
454 	 */
455 	rcu_read_lock();
456 	css_for_each_descendant_pre(css, &memcg->css) {
457 		struct mem_cgroup *child;
458 
459 		child = mem_cgroup_from_css(css);
460 		WRITE_ONCE(child->kmemcg_id, parent->kmemcg_id);
461 	}
462 	rcu_read_unlock();
463 
464 	mutex_lock(&list_lrus_mutex);
465 	list_for_each_entry(lru, &memcg_list_lrus, list)
466 		memcg_reparent_list_lru(lru, src_idx, parent);
467 	mutex_unlock(&list_lrus_mutex);
468 }
469 
memcg_list_lru_allocated(struct mem_cgroup * memcg,struct list_lru * lru)470 static inline bool memcg_list_lru_allocated(struct mem_cgroup *memcg,
471 					    struct list_lru *lru)
472 {
473 	int idx = memcg->kmemcg_id;
474 
475 	return idx < 0 || xa_load(&lru->xa, idx);
476 }
477 
memcg_list_lru_alloc(struct mem_cgroup * memcg,struct list_lru * lru,gfp_t gfp)478 int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
479 			 gfp_t gfp)
480 {
481 	int i;
482 	unsigned long flags;
483 	struct list_lru_memcg_table {
484 		struct list_lru_memcg *mlru;
485 		struct mem_cgroup *memcg;
486 	} *table;
487 	XA_STATE(xas, &lru->xa, 0);
488 
489 	if (!list_lru_memcg_aware(lru) || memcg_list_lru_allocated(memcg, lru))
490 		return 0;
491 
492 	gfp &= GFP_RECLAIM_MASK;
493 	table = kmalloc_array(memcg->css.cgroup->level, sizeof(*table), gfp);
494 	if (!table)
495 		return -ENOMEM;
496 
497 	/*
498 	 * Because the list_lru can be reparented to the parent cgroup's
499 	 * list_lru, we should make sure that this cgroup and all its
500 	 * ancestors have allocated list_lru_memcg.
501 	 */
502 	for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) {
503 		if (memcg_list_lru_allocated(memcg, lru))
504 			break;
505 
506 		table[i].memcg = memcg;
507 		table[i].mlru = memcg_init_list_lru_one(gfp);
508 		if (!table[i].mlru) {
509 			while (i--)
510 				kfree(table[i].mlru);
511 			kfree(table);
512 			return -ENOMEM;
513 		}
514 	}
515 
516 	xas_lock_irqsave(&xas, flags);
517 	while (i--) {
518 		int index = READ_ONCE(table[i].memcg->kmemcg_id);
519 		struct list_lru_memcg *mlru = table[i].mlru;
520 
521 		xas_set(&xas, index);
522 retry:
523 		if (unlikely(index < 0 || xas_error(&xas) || xas_load(&xas))) {
524 			kfree(mlru);
525 		} else {
526 			xas_store(&xas, mlru);
527 			if (xas_error(&xas) == -ENOMEM) {
528 				xas_unlock_irqrestore(&xas, flags);
529 				if (xas_nomem(&xas, gfp))
530 					xas_set_err(&xas, 0);
531 				xas_lock_irqsave(&xas, flags);
532 				/*
533 				 * The xas lock has been released, this memcg
534 				 * can be reparented before us. So reload
535 				 * memcg id. More details see the comments
536 				 * in memcg_reparent_list_lrus().
537 				 */
538 				index = READ_ONCE(table[i].memcg->kmemcg_id);
539 				if (index < 0)
540 					xas_set_err(&xas, 0);
541 				else if (!xas_error(&xas) && index != xas.xa_index)
542 					xas_set(&xas, index);
543 				goto retry;
544 			}
545 		}
546 	}
547 	/* xas_nomem() is used to free memory instead of memory allocation. */
548 	if (xas.xa_alloc)
549 		xas_nomem(&xas, gfp);
550 	xas_unlock_irqrestore(&xas, flags);
551 	kfree(table);
552 
553 	return xas_error(&xas);
554 }
555 #else
memcg_init_list_lru(struct list_lru * lru,bool memcg_aware)556 static inline void memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
557 {
558 }
559 
memcg_destroy_list_lru(struct list_lru * lru)560 static void memcg_destroy_list_lru(struct list_lru *lru)
561 {
562 }
563 #endif /* CONFIG_MEMCG */
564 
__list_lru_init(struct list_lru * lru,bool memcg_aware,struct lock_class_key * key,struct shrinker * shrinker)565 int __list_lru_init(struct list_lru *lru, bool memcg_aware,
566 		    struct lock_class_key *key, struct shrinker *shrinker)
567 {
568 	int i;
569 
570 #ifdef CONFIG_MEMCG
571 	if (shrinker)
572 		lru->shrinker_id = shrinker->id;
573 	else
574 		lru->shrinker_id = -1;
575 
576 	if (mem_cgroup_kmem_disabled())
577 		memcg_aware = false;
578 #endif
579 
580 	lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL);
581 	if (!lru->node)
582 		return -ENOMEM;
583 
584 	for_each_node(i) {
585 		spin_lock_init(&lru->node[i].lock);
586 		if (key)
587 			lockdep_set_class(&lru->node[i].lock, key);
588 		init_one_lru(&lru->node[i].lru);
589 	}
590 
591 	memcg_init_list_lru(lru, memcg_aware);
592 	list_lru_register(lru);
593 
594 	return 0;
595 }
596 EXPORT_SYMBOL_GPL(__list_lru_init);
597 
list_lru_destroy(struct list_lru * lru)598 void list_lru_destroy(struct list_lru *lru)
599 {
600 	/* Already destroyed or not yet initialized? */
601 	if (!lru->node)
602 		return;
603 
604 	list_lru_unregister(lru);
605 
606 	memcg_destroy_list_lru(lru);
607 	kfree(lru->node);
608 	lru->node = NULL;
609 
610 #ifdef CONFIG_MEMCG
611 	lru->shrinker_id = -1;
612 #endif
613 }
614 EXPORT_SYMBOL_GPL(list_lru_destroy);
615