| /linux/mm/ |
| H A D | memcontrol.c | 113 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg) in memcg_to_vmpressure() argument 115 if (!memcg) in memcg_to_vmpressure() 116 memcg = root_mem_cgroup; in memcg_to_vmpressure() 117 return &memcg->vmpressure; in memcg_to_vmpressure() 136 static void memcg_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages); 170 struct mem_cgroup *memcg; in obj_cgroup_release() local 172 memcg = get_mem_cgroup_from_objcg(objcg); in obj_cgroup_release() 173 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); in obj_cgroup_release() 174 memcg1_account_kmem(memcg, -nr_pages); in obj_cgroup_release() 175 if (!mem_cgroup_is_root(memcg)) in obj_cgroup_release() [all …]
|
| H A D | memcontrol-v1.c | 56 struct mem_cgroup *memcg; member 70 int (*register_event)(struct mem_cgroup *memcg, 77 void (*unregister_event)(struct mem_cgroup *memcg, 167 static unsigned long soft_limit_excess(struct mem_cgroup *memcg) in soft_limit_excess() argument 169 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess() 170 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); in soft_limit_excess() 179 static void memcg1_update_tree(struct mem_cgroup *memcg, int nid) in memcg1_update_tree() argument 186 if (soft_limit_excess(memcg)) in memcg1_update_tree() 187 lru_gen_soft_reclaim(memcg, nid); in memcg1_update_tree() 198 for (; memcg; memcg = parent_mem_cgroup(memcg)) { in memcg1_update_tree() [all …]
|
| H A D | shrinker.c | 62 void free_shrinker_info(struct mem_cgroup *memcg) in free_shrinker_info() argument 69 pn = memcg->nodeinfo[nid]; in free_shrinker_info() 77 int alloc_shrinker_info(struct mem_cgroup *memcg) in alloc_shrinker_info() argument 94 rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info); in alloc_shrinker_info() 102 free_shrinker_info(memcg); in alloc_shrinker_info() 106 static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg, in shrinker_info_protected() argument 109 return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info, in shrinker_info_protected() 113 static int expand_one_shrinker_info(struct mem_cgroup *memcg, int new_size, in expand_one_shrinker_info() argument 121 pn = memcg->nodeinfo[nid]; in expand_one_shrinker_info() 122 old = shrinker_info_protected(memcg, nid); in expand_one_shrinker_info() [all …]
|
| H A D | zswap.c | 625 struct mem_cgroup *memcg; in zswap_lru_add() local 639 memcg = mem_cgroup_from_entry(entry); in zswap_lru_add() 641 list_lru_add(list_lru, &entry->lru, nid, memcg); in zswap_lru_add() 648 struct mem_cgroup *memcg; in zswap_lru_del() local 651 memcg = mem_cgroup_from_entry(entry); in zswap_lru_del() 653 list_lru_del(list_lru, &entry->lru, nid, memcg); in zswap_lru_del() 682 void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg) in zswap_memcg_offline_cleanup() argument 686 if (zswap_next_shrink == memcg) { in zswap_memcg_offline_cleanup() 1194 !mem_cgroup_zswap_writeback_enabled(sc->memcg)) { in zswap_shrinker_scan() 1211 struct mem_cgroup *memcg = sc->memcg; in zswap_shrinker_count() local [all …]
|
| H A D | vmscan.c | 245 static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg) in sc_swappiness() argument 249 return mem_cgroup_swappiness(memcg); in sc_swappiness() 267 static int sc_swappiness(struct scan_control *sc, struct mem_cgroup *memcg) in sc_swappiness() argument 345 struct mem_cgroup *memcg) in can_demote() argument 359 return mem_cgroup_node_allowed(memcg, demotion_nid); in can_demote() 362 static inline bool can_reclaim_anon_pages(struct mem_cgroup *memcg, in can_reclaim_anon_pages() argument 366 if (memcg == NULL) { in can_reclaim_anon_pages() 375 if (mem_cgroup_get_nr_swap_pages(memcg) > 0) in can_reclaim_anon_pages() 384 return can_demote(nid, sc, memcg); in can_reclaim_anon_pages() 430 struct mem_cgroup *memcg = NULL; in drop_slab_node() local [all …]
|
| H A D | workingset.c | 244 struct mem_cgroup *memcg = folio_memcg(folio); in lru_gen_eviction() local 249 lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_eviction() 257 return pack_shadow(mem_cgroup_id(memcg), pgdat, token, workingset); in lru_gen_eviction() 269 struct mem_cgroup *memcg; in lru_gen_test_recent() local 274 memcg = mem_cgroup_from_id(memcg_id); in lru_gen_test_recent() 275 *lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_test_recent() 538 struct mem_cgroup *memcg; in workingset_refault() local 560 memcg = folio_memcg(folio); in workingset_refault() 562 lruvec = mem_cgroup_lruvec(memcg, pgdat); in workingset_refault() 674 if (sc->memcg) { in count_shadow_nodes() [all …]
|
| H A D | oom_kill.c | 74 return oc->memcg != NULL; in is_memcg_oom() 261 oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1; in constrained_alloc() 370 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc); in select_bad_process() 431 mem_cgroup_scan_tasks(oc->memcg, dump_task, oc); in dump_tasks() 454 mem_cgroup_print_oom_context(oc->memcg, victim); in dump_oom_victim() 469 mem_cgroup_print_oom_meminfo(oc->memcg); in dump_header() 475 mem_cgroup_show_protected_memory(oc->memcg); in dump_header() 1056 oom_group = mem_cgroup_get_oom_group(victim, oc->memcg); in oom_kill_process()
|
| H A D | huge_memory.c | 1099 static struct deferred_split *memcg_split_queue(int nid, struct mem_cgroup *memcg) in memcg_split_queue() argument 1101 return memcg ? &memcg->deferred_split_queue : split_queue_node(nid); in memcg_split_queue() 1111 static struct deferred_split *memcg_split_queue(int nid, struct mem_cgroup *memcg) in memcg_split_queue() argument 1117 static struct deferred_split *split_queue_lock(int nid, struct mem_cgroup *memcg) in split_queue_lock() argument 1122 queue = memcg_split_queue(nid, memcg); in split_queue_lock() 1129 if (unlikely(memcg_is_dying(memcg))) { in split_queue_lock() 1131 memcg = parent_mem_cgroup(memcg); in split_queue_lock() 1139 split_queue_lock_irqsave(int nid, struct mem_cgroup *memcg, unsigned long *flags) in split_queue_lock_irqsave() argument 1144 queue = memcg_split_queue(nid, memcg); in split_queue_lock_irqsave() 1146 if (unlikely(memcg_is_dying(memcg))) { in split_queue_lock_irqsave() [all …]
|
| H A D | page_owner.c | 516 struct mem_cgroup *memcg; in print_page_owner_memcg() local 529 memcg = page_memcg_check(page); in print_page_owner_memcg() 530 if (!memcg) in print_page_owner_memcg() 533 online = (memcg->css.flags & CSS_ONLINE); in print_page_owner_memcg() 534 cgroup_name(memcg->css.cgroup, name, sizeof(name)); in print_page_owner_memcg()
|
| H A D | page_io.c | 308 struct mem_cgroup *memcg; in bio_associate_blkg_from_page() local 310 memcg = folio_memcg(folio); in bio_associate_blkg_from_page() 311 if (!memcg) in bio_associate_blkg_from_page() 315 css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys); in bio_associate_blkg_from_page()
|
| /linux/include/linux/ |
| H A D | memcontrol.h | 90 struct mem_cgroup *memcg; /* Back pointer, we cannot */ member 176 struct mem_cgroup *memcg; member 383 return READ_ONCE(objcg->memcg); in obj_cgroup_memcg() 515 struct mem_cgroup *memcg; in get_mem_cgroup_from_objcg() local 519 memcg = obj_cgroup_memcg(objcg); in get_mem_cgroup_from_objcg() 520 if (unlikely(!css_tryget(&memcg->css))) in get_mem_cgroup_from_objcg() 524 return memcg; in get_mem_cgroup_from_objcg() 547 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) in mem_cgroup_is_root() argument 549 return (memcg == root_mem_cgroup); in mem_cgroup_is_root() 558 struct mem_cgroup *memcg, in mem_cgroup_protection() argument [all …]
|
| H A D | vmpressure.h | 33 extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, 35 extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); 39 extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); 41 extern int vmpressure_register_event(struct mem_cgroup *memcg, 44 extern void vmpressure_unregister_event(struct mem_cgroup *memcg, 47 static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure() argument 49 static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, in vmpressure_prio() argument
|
| H A D | swap.h | 385 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, 583 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) in mem_cgroup_swappiness() argument 590 if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) in mem_cgroup_swappiness() 593 return READ_ONCE(memcg->swappiness); in mem_cgroup_swappiness() 634 extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); 648 static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg) in mem_cgroup_get_nr_swap_pages() argument
|
| H A D | mmzone.h | 621 void lru_gen_init_memcg(struct mem_cgroup *memcg); 622 void lru_gen_exit_memcg(struct mem_cgroup *memcg); 623 void lru_gen_online_memcg(struct mem_cgroup *memcg); 624 void lru_gen_offline_memcg(struct mem_cgroup *memcg); 625 void lru_gen_release_memcg(struct mem_cgroup *memcg); 626 void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid); 643 static inline void lru_gen_init_memcg(struct mem_cgroup *memcg) in lru_gen_init_memcg() argument 647 static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg) in lru_gen_exit_memcg() argument 651 static inline void lru_gen_online_memcg(struct mem_cgroup *memcg) in lru_gen_online_memcg() argument 655 static inline void lru_gen_offline_memcg(struct mem_cgroup *memcg) in lru_gen_offline_memcg() argument [all …]
|
| H A D | backing-dev.h | 146 void wb_memcg_offline(struct mem_cgroup *memcg); 352 static inline void wb_memcg_offline(struct mem_cgroup *memcg) in wb_memcg_offline() argument
|
| H A D | oom.h | 36 struct mem_cgroup *memcg; member
|
| /linux/tools/testing/selftests/cgroup/ |
| H A D | test_memcontrol.c | 248 char *memcg; in test_memcg_current_peak() local 253 memcg = cg_name(root, "memcg_test"); in test_memcg_current_peak() 254 if (!memcg) in test_memcg_current_peak() 257 if (cg_create(memcg)) in test_memcg_current_peak() 260 current = cg_read_long(memcg, "memory.current"); in test_memcg_current_peak() 264 peak = cg_read_long(memcg, "memory.peak"); in test_memcg_current_peak() 268 if (cg_run(memcg, alloc_anon_50M_check, NULL)) in test_memcg_current_peak() 271 peak = cg_read_long(memcg, "memory.peak"); in test_memcg_current_peak() 280 peak_fd = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC); in test_memcg_current_peak() 301 peak_fd2 = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC); in test_memcg_current_peak() [all …]
|
| H A D | memcg_protection.m | 5 % This script simulates reclaim protection behavior on a single level of memcg 65 % commit 1bc63fb1272b ("mm, memcg: make scan aggression always exclude protection")
|
| /linux/tools/cgroup/ |
| H A D | memcg_slabinfo.py | 42 memcg = container_of(css, 'struct mem_cgroup', 'css') 43 MEMCGS[css.cgroup.kn.id.value_()] = memcg 170 memcg = MEMCGS[cgroup_id] 186 obj_cgroups.add(memcg.objcg.value_()) 188 memcg.objcg_list.address_of_(), 220 memcg.kmem_caches.address_of_(),
|
| /linux/Documentation/admin-guide/cgroup-v1/ |
| H A D | memcg_test.rst | 9 Because VM is getting complex (one of reasons is memcg...), memcg's behavior 10 is complex. This is a document for memcg's internal behavior. 61 At commit(), the page is associated with the memcg. 114 But brief explanation of the behavior of memcg around shmem will be 136 Each memcg has its own vector of LRUs (inactive anon, active anon, 138 each LRU handled under a single lru_lock for that memcg and node. 145 9.1 Small limit to memcg. 148 When you do test to do racy case, it's good test to set memcg's limit 158 Historically, memcg's shmem handling was poor and we saw some amount 248 Besides management of swap is one of complicated parts of memcg, [all …]
|
| /linux/include/linux/sched/ |
| H A D | mm.h | 491 set_active_memcg(struct mem_cgroup *memcg) in set_active_memcg() argument 497 this_cpu_write(int_active_memcg, memcg); in set_active_memcg() 500 current->active_memcg = memcg; in set_active_memcg() 507 set_active_memcg(struct mem_cgroup *memcg) in set_active_memcg() argument
|
| /linux/Documentation/translations/zh_CN/mm/ |
| H A D | hwpoison.rst | 119 corrupt-filter-memcg 120 限制注入到memgroup拥有的页面。由memcg的inode号指定。 130 echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg
|
| /linux/mm/damon/ |
| H A D | ops-common.c | 261 struct mem_cgroup *memcg; in damos_folio_filter_match() local 273 memcg = folio_memcg_check(folio); in damos_folio_filter_match() 274 if (!memcg) in damos_folio_filter_match() 277 matched = filter->memcg_id == mem_cgroup_id(memcg); in damos_folio_filter_match()
|
| /linux/Documentation/admin-guide/mm/ |
| H A D | shrinker_debugfs.rst | 14 trigger *count_objects()* and *scan_objects()* callbacks for each memcg and 59 If the shrinker is not memcg-aware or CONFIG_MEMCG is off, 0 is printed 112 For a non-memcg-aware shrinker or on a system with no memory
|
| /linux/Documentation/mm/ |
| H A D | multigen_lru.rst | 162 An ``mm_struct`` list is maintained for each memcg, and an 163 ``mm_struct`` follows its owner task to the new memcg when this task 173 ``mm_struct`` was migrated, pages left in the previous memcg will be 174 ignored when the current memcg is under reclaim. Similarly, page table 225 An memcg LRU is a per-node LRU of memcgs. It is also an LRU of LRUs, 226 since each node and memcg combination has an LRU of folios (see 229 data centers. Note that memcg LRU only applies to global reclaim. 231 The basic structure of an memcg LRU can be understood by an analogy to 238 3. Other events trigger similar operations, e.g., offlining an memcg 243 1. Sharding, which allows each thread to start at a random memcg (in
|