1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 3 #ifndef __MM_MEMCONTROL_V1_H 4 #define __MM_MEMCONTROL_V1_H 5 6 #include <linux/cgroup-defs.h> 7 8 /* Cgroup v1 and v2 common declarations */ 9 10 /* 11 * Iteration constructs for visiting all cgroups (under a tree). If 12 * loops are exited prematurely (break), mem_cgroup_iter_break() must 13 * be used for reference counting. 14 */ 15 #define for_each_mem_cgroup_tree(iter, root) \ 16 for (iter = mem_cgroup_iter(root, NULL, NULL); \ 17 iter != NULL; \ 18 iter = mem_cgroup_iter(root, iter, NULL)) 19 20 #define for_each_mem_cgroup(iter) \ 21 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \ 22 iter != NULL; \ 23 iter = mem_cgroup_iter(NULL, iter, NULL)) 24 25 void drain_all_stock(struct mem_cgroup *root_memcg); 26 27 unsigned long memcg_events(struct mem_cgroup *memcg, int event); 28 unsigned long memcg_page_state_output(struct mem_cgroup *memcg, int item); 29 int memory_stat_show(struct seq_file *m, void *v); 30 31 void mem_cgroup_private_id_get_many(struct mem_cgroup *memcg, unsigned int n); 32 struct mem_cgroup *mem_cgroup_private_id_get_online(struct mem_cgroup *memcg); 33 34 /* Cgroup v1-specific declarations */ 35 #ifdef CONFIG_MEMCG_V1 36 37 /* Whether legacy memory+swap accounting is active */ 38 static inline bool do_memsw_account(void) 39 { 40 return !cgroup_subsys_on_dfl(memory_cgrp_subsys); 41 } 42 43 unsigned long memcg_events_local(struct mem_cgroup *memcg, int event); 44 unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx); 45 unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item); 46 bool memcg1_alloc_events(struct mem_cgroup *memcg); 47 void memcg1_free_events(struct mem_cgroup *memcg); 48 49 void memcg1_memcg_init(struct mem_cgroup *memcg); 50 void memcg1_remove_from_trees(struct mem_cgroup *memcg); 51 52 static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg) 53 { 54 WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX); 55 } 56 57 struct cgroup_taskset; 58 void memcg1_css_offline(struct mem_cgroup *memcg); 59 60 /* for encoding cft->private value on file */ 61 enum res_type { 62 _MEM, 63 _MEMSWAP, 64 _KMEM, 65 _TCP, 66 }; 67 68 bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked); 69 void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked); 70 void memcg1_oom_recover(struct mem_cgroup *memcg); 71 72 void memcg1_commit_charge(struct folio *folio, struct mem_cgroup *memcg); 73 void memcg1_uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, 74 unsigned long nr_memory, int nid); 75 76 void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s); 77 78 void memcg1_account_kmem(struct mem_cgroup *memcg, int nr_pages); 79 static inline bool memcg1_tcpmem_active(struct mem_cgroup *memcg) 80 { 81 return memcg->tcpmem_active; 82 } 83 bool memcg1_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, 84 gfp_t gfp_mask); 85 static inline void memcg1_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) 86 { 87 page_counter_uncharge(&memcg->tcpmem, nr_pages); 88 } 89 90 extern struct cftype memsw_files[]; 91 extern struct cftype mem_cgroup_legacy_files[]; 92 93 #else /* CONFIG_MEMCG_V1 */ 94 95 static inline bool do_memsw_account(void) { return false; } 96 static inline bool memcg1_alloc_events(struct mem_cgroup *memcg) { return true; } 97 static inline void memcg1_free_events(struct mem_cgroup *memcg) {} 98 99 static inline void memcg1_memcg_init(struct mem_cgroup *memcg) {} 100 static inline void memcg1_remove_from_trees(struct mem_cgroup *memcg) {} 101 static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg) {} 102 static inline void memcg1_css_offline(struct mem_cgroup *memcg) {} 103 104 static inline bool memcg1_oom_prepare(struct mem_cgroup *memcg, bool *locked) { return true; } 105 static inline void memcg1_oom_finish(struct mem_cgroup *memcg, bool locked) {} 106 static inline void memcg1_oom_recover(struct mem_cgroup *memcg) {} 107 108 static inline void memcg1_commit_charge(struct folio *folio, 109 struct mem_cgroup *memcg) {} 110 111 static inline void memcg1_uncharge_batch(struct mem_cgroup *memcg, 112 unsigned long pgpgout, 113 unsigned long nr_memory, int nid) {} 114 115 static inline void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) {} 116 117 static inline void memcg1_account_kmem(struct mem_cgroup *memcg, int nr_pages) {} 118 static inline bool memcg1_tcpmem_active(struct mem_cgroup *memcg) { return false; } 119 static inline bool memcg1_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, 120 gfp_t gfp_mask) { return true; } 121 static inline void memcg1_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) {} 122 123 #endif /* CONFIG_MEMCG_V1 */ 124 125 #endif /* __MM_MEMCONTROL_V1_H */ 126