| /linux/mm/ |
| H A D | memcontrol.c | 115 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) in memcg_to_vmpressure() argument 117 if (!memcg) in memcg_to_vmpressure() 118 memcg = root_mem_cgroup; in memcg_to_vmpressure() 119 return &memcg->vmpressure; in memcg_to_vmpressure() 138 static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages); 172 struct mem_cgroup *memcg; in obj_cgroup_release() local 174 memcg = get_mem_cgroup_from_objcg(objcg); in obj_cgroup_release() 175 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); in obj_cgroup_release() 176 memcg1_account_kmem(memcg, -nr_pages); in obj_cgroup_release() 177 if (!mem_cgroup_is_root(memcg)) in obj_cgroup_release() [all …]
|
| H A D | memcontrol-v1.c | 56 struct mem_cgroup *memcg; member 70 int (*register_event)(struct mem_cgroup *memcg, 77 void (*unregister_event)(struct mem_cgroup *memcg, 167 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) in soft_limit_excess() argument 169 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess() 170 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); in soft_limit_excess() 179 static void memcg1_update_tree(struct mem_cgroup *memcg, int nid) in memcg1_update_tree() argument 186 if (soft_limit_excess(memcg)) in memcg1_update_tree() 187 lru_gen_soft_reclaim(memcg, nid); in memcg1_update_tree() 198 for (; memcg; memcg = parent_mem_cgroup(memcg)) { in memcg1_update_tree() [all …]
|
| H A D | shrinker.c | 62 void free_shrinker_info(struct mem_cgroup *memcg) in free_shrinker_info() argument 69 pn = memcg->nodeinfo[nid]; in free_shrinker_info() 77 int alloc_shrinker_info(struct mem_cgroup *memcg) in alloc_shrinker_info() argument 94 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info); in alloc_shrinker_info() 102 free_shrinker_info(memcg); in alloc_shrinker_info() 106 static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg, in shrinker_info_protected() argument 109 return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info, in shrinker_info_protected() 113 static int expand_one_shrinker_info(struct mem_cgroup *memcg, int new_size, in expand_one_shrinker_info() argument 121 pn = memcg->nodeinfo[nid]; in expand_one_shrinker_info() 122 old = shrinker_info_protected(memcg, nid); in expand_one_shrinker_info() [all …]
|
| H A D | list_lru.c | 80 lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg, in lock_list_lru_of_memcg() argument 87 l = list_lru_from_memcg_idx(lru, nid, memcg_kmem_id(memcg)); in lock_list_lru_of_memcg() 100 VM_WARN_ON(!css_is_dying(&memcg->css)); in lock_list_lru_of_memcg() 101 memcg = parent_mem_cgroup(memcg); in lock_list_lru_of_memcg() 138 lock_list_lru_of_memcg(struct list_lru *lru, int nid, struct mem_cgroup *memcg, in lock_list_lru_of_memcg() argument 162 struct mem_cgroup *memcg) in list_lru_add() argument 167 l = lock_list_lru_of_memcg(lru, nid, memcg, false, false); in list_lru_add() 174 set_shrinker_bit(memcg, nid, lru_shrinker_id(lru)); in list_lru_add() 202 struct mem_cgroup *memcg) in list_lru_del() argument 206 l = lock_list_lru_of_memcg(lru, nid, memcg, false, false); in list_lru_del() [all …]
|
| H A D | vmpressure.c | 77 struct mem_cgroup *memcg = vmpressure_to_memcg(vmpr); in vmpressure_parent() local 79 memcg = parent_mem_cgroup(memcg); in vmpressure_parent() 80 if (!memcg) in vmpressure_parent() 82 return memcg_to_vmpressure(memcg); in vmpressure_parent() 239 void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure() argument 255 vmpr = memcg_to_vmpressure(memcg); in vmpressure() 295 if (!memcg || mem_cgroup_is_root(memcg)) in vmpressure() 319 mem_cgroup_set_socket_pressure(memcg); in vmpressure() 335 void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio) in vmpressure_prio() argument 351 vmpressure(gfp, memcg, true, vmpressure_win, 0); in vmpressure_prio() [all …]
|
| H A D | zswap.c | 625 struct mem_cgroup *memcg; in zswap_lru_add() local 639 memcg = mem_cgroup_from_entry(entry); in zswap_lru_add() 641 list_lru_add(list_lru, &entry->lru, nid, memcg); in zswap_lru_add() 648 struct mem_cgroup *memcg; in zswap_lru_del() local 651 memcg = mem_cgroup_from_entry(entry); in zswap_lru_del() 653 list_lru_del(list_lru, &entry->lru, nid, memcg); in zswap_lru_del() 682 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) in zswap_memcg_offline_cleanup() argument 686 if (zswap_next_shrink == memcg) { in zswap_memcg_offline_cleanup() 1188 !mem_cgroup_zswap_writeback_enabled(sc->memcg)) { in zswap_shrinker_scan() 1205 struct mem_cgroup *memcg = sc->memcg; in zswap_shrinker_count() local [all …]
|
| H A D | vmscan.c | 244 static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg) in sc_swappiness() argument 248 return mem_cgroup_swappiness(memcg); in sc_swappiness() 266 static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg) in sc_swappiness() argument 344 struct mem_cgroup *memcg) in can_demote() argument 359 mem_cgroup_node_filter_allowed(memcg, &allowed_mask); in can_demote() 363 static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg, in can_reclaim_anon_pages() argument 367 if (memcg == NULL) { in can_reclaim_anon_pages() 376 if (mem_cgroup_get_nr_swap_pages(memcg) > 0) in can_reclaim_anon_pages() 385 return can_demote(nid, sc, memcg); in can_reclaim_anon_pages() 431 struct mem_cgroup *memcg = NULL; in drop_slab_node() local [all …]
|
| H A D | workingset.c | 244 struct mem_cgroup *memcg = folio_memcg(folio); in lru_gen_eviction() local 249 lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_eviction() 257 return pack_shadow(mem_cgroup_private_id(memcg), pgdat, token, workingset); in lru_gen_eviction() 269 struct mem_cgroup *memcg; in lru_gen_test_recent() local 274 memcg = mem_cgroup_from_private_id(memcg_id); in lru_gen_test_recent() 275 *lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_test_recent() 444 * Look up the memcg associated with the stored ID. It might in workingset_test_recent() 473 * XXX: With per-memcg flushing and thresholding, is ratelimiting in workingset_test_recent() 531 * evicted folio in the context of the node and the memcg whose memory 538 struct mem_cgroup *memcg; in workingset_refault() local [all...] |
| H A D | oom_kill.c | 74 return oc->memcg != NULL; in is_memcg_oom() 261 oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1; in constrained_alloc() 370 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); in select_bad_process() 420 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes 431 mem_cgroup_scan_tasks(oc->memcg, dump_task, oc); in dump_tasks() 454 mem_cgroup_print_oom_context(oc->memcg, victim); in dump_oom_victim() 469 mem_cgroup_print_oom_meminfo(oc->memcg); in dump_header() 475 mem_cgroup_show_protected_memory(oc->memcg); in dump_header() 773 /* OOM killer might race with memcg OOM */ 1056 oom_group = mem_cgroup_get_oom_group(victim, oc->memcg); in oom_kill_process() [all...] |
| H A D | huge_memory.c | 1102 static struct deferred_split *memcg_split_queue(int nid, struct mem_cgroup *memcg) in memcg_split_queue() argument 1104 return memcg ? &memcg->deferred_split_queue : split_queue_node(nid); in memcg_split_queue() 1114 static struct deferred_split *memcg_split_queue(int nid, struct mem_cgroup *memcg) in memcg_split_queue() argument 1120 static struct deferred_split *split_queue_lock(int nid, struct mem_cgroup *memcg) in split_queue_lock() argument 1125 queue = memcg_split_queue(nid, memcg); in split_queue_lock() 1132 if (unlikely(memcg_is_dying(memcg))) { in split_queue_lock() 1134 memcg = parent_mem_cgroup(memcg); in split_queue_lock() 1142 split_queue_lock_irqsave(int nid, struct mem_cgroup *memcg, unsigned long *flags) in split_queue_lock_irqsave() argument 1147 queue = memcg_split_queue(nid, memcg); in split_queue_lock_irqsave() 1149 if (unlikely(memcg_is_dying(memcg))) { in split_queue_lock_irqsave() [all …]
|
| /linux/include/linux/ |
| H A D | memcontrol.h | 90 struct mem_cgroup *memcg; /* Back pointer, we cannot */ member 176 struct mem_cgroup *memcg; member 382 return READ_ONCE(objcg->memcg); in obj_cgroup_memcg() 514 struct mem_cgroup *memcg; in get_mem_cgroup_from_objcg() local 518 memcg = obj_cgroup_memcg(objcg); in get_mem_cgroup_from_objcg() 519 if (unlikely(!css_tryget(&memcg->css))) in get_mem_cgroup_from_objcg() 523 return memcg; in get_mem_cgroup_from_objcg() 546 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) in mem_cgroup_is_root() argument 548 return (memcg == root_mem_cgroup); in mem_cgroup_is_root() 557 struct mem_cgroup *memcg, in mem_cgroup_protection() argument [all …]
|
| H A D | vmpressure.h | 33 extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, 35 extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); 39 extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); 41 extern int vmpressure_register_event(struct mem_cgroup *memcg, 44 extern void vmpressure_unregister_event(struct mem_cgroup *memcg, 47 static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure() argument 49 static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, in vmpressure_prio() argument
|
| H A D | swap.h | 383 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 562 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) in folio_free_swap() 569 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) in add_swap_extent() 572 return READ_ONCE(memcg->swappiness); in free_swap_and_cache() 613 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); in folio_throttle_swaprate() 627 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) in mem_cgroup_uncharge_swap() 583 mem_cgroup_swappiness(struct mem_cgroup * memcg) mem_cgroup_swappiness() argument 648 mem_cgroup_get_nr_swap_pages(struct mem_cgroup * memcg) mem_cgroup_get_nr_swap_pages() argument
|
| H A D | mmzone.h | 621 void lru_gen_init_memcg(struct mem_cgroup *memcg); 622 void lru_gen_exit_memcg(struct mem_cgroup *memcg); 623 void lru_gen_online_memcg(struct mem_cgroup *memcg); 624 void lru_gen_offline_memcg(struct mem_cgroup *memcg); 625 void lru_gen_release_memcg(struct mem_cgroup *memcg); 626 void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid); 643 static inline void lru_gen_init_memcg(struct mem_cgroup *memcg) in lru_gen_init_memcg() argument 647 static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg) in lru_gen_exit_memcg() argument 651 static inline void lru_gen_online_memcg(struct mem_cgroup *memcg) in lru_gen_online_memcg() argument 655 static inline void lru_gen_offline_memcg(struct mem_cgroup *memcg) in lru_gen_offline_memcg() argument [all …]
|
| /linux/tools/testing/selftests/cgroup/ |
| H A D | test_memcontrol.c | 250 char *memcg; in test_memcg_current_peak() 255 memcg = cg_name(root, "memcg_test"); in test_memcg_current_peak() 256 if (!memcg) in test_memcg_current_peak() 259 if (cg_create(memcg)) in test_memcg_current_peak() 262 current = cg_read_long(memcg, "memory.current"); in test_memcg_current_peak() 266 peak = cg_read_long(memcg, "memory.peak"); in test_memcg_current_peak() 270 if (cg_run(memcg, alloc_anon_50M_check, NULL)) in test_memcg_current_peak() 273 peak = cg_read_long(memcg, "memory.peak"); in test_memcg_current_peak() 282 peak_fd = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC); in test_memcg_current_peak() 303 peak_fd2 = cg_open(memcg, "memor in test_memcg_current_peak() 248 char *memcg; test_memcg_current_peak() local 703 char *memcg; test_memcg_high() local 766 char *memcg; test_memcg_high_sync() local 828 char *memcg; test_memcg_max() local 886 reclaim_until(const char * memcg,long goal) reclaim_until() argument 922 char *memcg; test_memcg_reclaim() local 1026 char *memcg; test_memcg_swap_max_peak() local 1225 char *memcg; test_memcg_oom_events() local 1386 char *memcg; test_memcg_sock() local 1584 char *memcg; test_memcg_oom_group_score_events() local [all...] |
| H A D | memcg_protection.m | 5 % This script simulates reclaim protection behavior on a single level of memcg 65 % commit 1bc63fb1272b ("mm, memcg: make scan aggression always exclude protection")
|
| /linux/tools/cgroup/ |
| H A D | memcg_slabinfo.py | 42 memcg = container_of(css, 'struct mem_cgroup', 'css') 43 MEMCGS[css.cgroup.kn.id.value_()] = memcg 170 memcg = MEMCGS[cgroup_id] 186 obj_cgroups.add(memcg.objcg.value_()) 188 memcg.objcg_list.address_of_(), 220 memcg.kmem_caches.address_of_(),
|
| /linux/Documentation/admin-guide/cgroup-v1/ |
| H A D | memcg_test.rst | 9 Because VM is getting complex (one of reasons is memcg...), memcg's behavior 10 is complex. This is a document for memcg's internal behavior. 61 At commit(), the page is associated with the memcg. 114 But brief explanation of the behavior of memcg around shmem will be 136 Each memcg has its own vector of LRUs (inactive anon, active anon, 138 each LRU handled under a single lru_lock for that memcg and node. 145 9.1 Small limit to memcg. 148 When you do test to do racy case, it's good test to set memcg's limit 158 Historically, memcg's shmem handling was poor and we saw some amount 248 Besides management of swap is one of complicated parts of memcg, [all …]
|
| /linux/include/linux/sched/ |
| H A D | mm.h | 492 set_active_memcg(struct mem_cgroup *memcg) in set_active_memcg() argument 498 this_cpu_write(int_active_memcg, memcg); in set_active_memcg() 501 current->active_memcg = memcg; in set_active_memcg() 508 set_active_memcg(struct mem_cgroup *memcg) in set_active_memcg() argument
|
| /linux/Documentation/translations/zh_CN/mm/ |
| H A D | hwpoison.rst | 119 corrupt-filter-memcg 120 限制注入到memgroup拥有的页面。由memcg的inode号指定。 130 echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg
|
| /linux/mm/damon/ |
| H A D | ops-common.c | 261 struct mem_cgroup *memcg; in damos_folio_filter_match() local 273 memcg = folio_memcg_check(folio); in damos_folio_filter_match() 274 if (!memcg) in damos_folio_filter_match() 277 matched = filter->memcg_id == mem_cgroup_id(memcg); in damos_folio_filter_match()
|
| /linux/Documentation/admin-guide/mm/ |
| H A D | shrinker_debugfs.rst | 14 trigger *count_objects()* and *scan_objects()* callbacks for each memcg and 59 If the shrinker is not memcg-aware or CONFIG_MEMCG is off, 0 is printed 112 For a non-memcg-aware shrinker or on a system with no memory
|
| /linux/Documentation/mm/ |
| H A D | multigen_lru.rst | 162 An ``mm_struct`` list is maintained for each memcg, and an 163 ``mm_struct`` follows its owner task to the new memcg when this task 173 ``mm_struct`` was migrated, pages left in the previous memcg will be 174 ignored when the current memcg is under reclaim. Similarly, page table 225 An memcg LRU is a per-node LRU of memcgs. It is also an LRU of LRUs, 226 since each node and memcg combination has an LRU of folios (see 229 data centers. Note that memcg LRU only applies to global reclaim. 231 The basic structure of an memcg LRU can be understood by an analogy to 238 3. Other events trigger similar operations, e.g., offlining an memcg 243 1. Sharding, which allows each thread to start at a random memcg (in
|
| /linux/kernel/bpf/ |
| H A D | memalloc.c | 209 struct mem_cgroup *memcg = NULL, *old_memcg; in alloc_bulk() local 240 memcg = get_memcg(c); in alloc_bulk() 241 old_memcg = set_active_memcg(memcg); in alloc_bulk() 254 mem_cgroup_put(memcg); in alloc_bulk() 1001 struct mem_cgroup *memcg, *old_memcg; in bpf_mem_cache_alloc_flags() local 1003 memcg = get_memcg(c); in bpf_mem_cache_alloc_flags() 1004 old_memcg = set_active_memcg(memcg); in bpf_mem_cache_alloc_flags() 1009 mem_cgroup_put(memcg); in bpf_mem_cache_alloc_flags()
|
| /linux/arch/x86/kernel/cpu/sgx/ |
| H A D | encl.c | 1003 struct mem_cgroup *memcg = NULL; in sgx_encl_get_mem_cgroup() local 1025 memcg = get_mem_cgroup_from_mm(encl_mm->mm); in sgx_encl_get_mem_cgroup() 1040 if (!memcg) in sgx_encl_get_mem_cgroup() 1043 return memcg; in sgx_encl_get_mem_cgroup() 1067 struct mem_cgroup *memcg = set_active_memcg(encl_memcg); in sgx_encl_alloc_backing() local 1072 set_active_memcg(memcg); in sgx_encl_alloc_backing()
|