Lines Matching +full:usecase +full:- +full:specific
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
28 #include <linux/cgroup-defs.h>
40 #include <linux/page-flags.h>
41 #include <linux/backing-dev.h>
70 #include "memcontrol-v1.h"
108 (current->flags & PF_EXITING); in task_is_dying()
116 return &memcg->vmpressure; in memcg_to_vmpressure()
146 * objcg->nr_charged_bytes can't have an arbitrary byte value. in obj_cgroup_release()
150 * 1) CPU0: objcg == stock->cached_objcg in obj_cgroup_release()
155 * objcg->nr_charged_bytes = PAGE_SIZE - 92 in obj_cgroup_release()
157 * 92 bytes are added to stock->nr_bytes in obj_cgroup_release()
159 * 92 bytes are added to objcg->nr_charged_bytes in obj_cgroup_release()
164 nr_bytes = atomic_read(&objcg->nr_charged_bytes); in obj_cgroup_release()
165 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1)); in obj_cgroup_release()
172 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); in obj_cgroup_release()
173 memcg1_account_kmem(memcg, -nr_pages); in obj_cgroup_release()
180 list_del(&objcg->list); in obj_cgroup_release()
196 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0, in obj_cgroup_alloc()
202 INIT_LIST_HEAD(&objcg->list); in obj_cgroup_alloc()
211 objcg = rcu_replace_pointer(memcg->objcg, NULL, true); in memcg_reparent_objcgs()
216 list_add(&objcg->list, &memcg->objcg_list); in memcg_reparent_objcgs()
218 list_for_each_entry(iter, &memcg->objcg_list, list) in memcg_reparent_objcgs()
219 WRITE_ONCE(iter->memcg, parent); in memcg_reparent_objcgs()
221 list_splice(&memcg->objcg_list, &parent->objcg_list); in memcg_reparent_objcgs()
225 percpu_ref_kill(&objcg->refcnt); in memcg_reparent_objcgs()
241 * mem_cgroup_css_from_folio - css of the memcg associated with a folio
258 return &memcg->css; in mem_cgroup_css_from_folio()
262 * page_cgroup_ino - return inode number of the memcg a page is charged to
283 while (memcg && !(memcg->css.flags & CSS_ONLINE)) in page_cgroup_ino()
286 ino = cgroup_ino(memcg->css.cgroup); in page_cgroup_ino()
383 /* Non-hierarchical (CPU aggregated) state */
404 x = READ_ONCE(pn->lruvec_stats->state[i]); in lruvec_page_state()
427 x = READ_ONCE(pn->lruvec_stats->state_local[i]); in lruvec_page_state_local()
525 /* Non-hierarchical (CPU aggregated) page state & events */
560 return atomic_read(&vmstats->stats_updates) > in memcg_vmstats_needs_flush()
574 css_rstat_updated(&memcg->css, cpu); in memcg_rstat_updated()
575 statc_pcpu = memcg->vmstats_percpu; in memcg_rstat_updated()
576 for (; statc_pcpu; statc_pcpu = statc->parent_pcpu) { in memcg_rstat_updated()
583 if (memcg_vmstats_needs_flush(statc->vmstats)) in memcg_rstat_updated()
586 stats_updates = this_cpu_add_return(statc_pcpu->stats_updates, in memcg_rstat_updated()
591 stats_updates = this_cpu_xchg(statc_pcpu->stats_updates, 0); in memcg_rstat_updated()
592 atomic_add(stats_updates, &statc->vmstats->stats_updates); in memcg_rstat_updated()
598 bool needs_flush = memcg_vmstats_needs_flush(memcg->vmstats); in __mem_cgroup_flush_stats()
600 trace_memcg_flush_stats(memcg, atomic_read(&memcg->vmstats->stats_updates), in __mem_cgroup_flush_stats()
609 css_rstat_flush(&memcg->css); in __mem_cgroup_flush_stats()
613 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
643 * in latency-sensitive paths is as cheap as possible. in flush_memcg_stats_dwork()
657 x = READ_ONCE(memcg->vmstats->state[i]); in memcg_page_state()
669 * up non-zero sub-page updates to 1 page as zero page updates are ignored.
682 * mod_memcg_state - update cgroup memory statistics
684 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
701 this_cpu_add(memcg->vmstats_percpu->state[i], val); in mod_memcg_state()
719 x = READ_ONCE(memcg->vmstats->state_local[i]); in memcg_page_state_local()
741 memcg = pn->memcg; in mod_memcg_lruvec_state()
746 this_cpu_add(memcg->vmstats_percpu->state[i], val); in mod_memcg_lruvec_state()
749 this_cpu_add(pn->lruvec_stats_percpu->state[i], val); in mod_memcg_lruvec_state()
759 * __mod_lruvec_state - update lruvec memory statistics
766 * change of state at this level: per-node, per-cgroup, per-lruvec.
813 * when we free the slab object, we need to update the per-memcg in __mod_lruvec_kmem_state()
826 * count_memcg_events - account VM events in a cgroup
845 this_cpu_add(memcg->vmstats_percpu->events[i], count); in count_memcg_events()
859 return READ_ONCE(memcg->vmstats->events[i]); in memcg_events()
870 return READ_ONCE(memcg->vmstats->events_local[i]); in memcg_events_local()
877 * mm_update_next_owner() may clear mm->owner to NULL in mem_cgroup_from_task()
893 return current->active_memcg; in active_memcg()
900 * Obtain a reference on mm->memcg and returns it if successful. If mm
903 * 2) current->mm->memcg, if available
927 css_get(&memcg->css); in get_mem_cgroup_from_mm()
930 mm = current->mm; in get_mem_cgroup_from_mm()
937 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); in get_mem_cgroup_from_mm()
940 } while (!css_tryget(&memcg->css)); in get_mem_cgroup_from_mm()
947 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
959 if (!css_tryget(&memcg->css)) { in get_mem_cgroup_from_current()
968 * get_mem_cgroup_from_folio - Obtain a reference on a given folio's memcg.
979 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css))) in get_mem_cgroup_from_folio()
986 * mem_cgroup_iter - iterate over memory cgroup hierarchy
992 * @root itself, or %NULL after a full round-trip.
996 * to cancel a hierarchy walk before the round-trip is complete.
1023 int nid = reclaim->pgdat->node_id; in mem_cgroup_iter()
1025 iter = &root->nodeinfo[nid]->iter; in mem_cgroup_iter()
1026 gen = atomic_read(&iter->generation); in mem_cgroup_iter()
1033 reclaim->generation = gen; in mem_cgroup_iter()
1034 else if (reclaim->generation != gen) in mem_cgroup_iter()
1037 pos = READ_ONCE(iter->position); in mem_cgroup_iter()
1041 css = pos ? &pos->css : NULL; in mem_cgroup_iter()
1043 while ((css = css_next_descendant_pre(css, &root->css))) { in mem_cgroup_iter()
1049 if (css == &root->css || css_tryget(css)) in mem_cgroup_iter()
1061 if (cmpxchg(&iter->position, pos, next) != pos) { in mem_cgroup_iter()
1062 if (css && css != &root->css) in mem_cgroup_iter()
1068 atomic_inc(&iter->generation); in mem_cgroup_iter()
1073 * the hierarchy - make sure they see at least in mem_cgroup_iter()
1084 css_put(&prev->css); in mem_cgroup_iter()
1090 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1100 css_put(&prev->css); in mem_cgroup_iter_break()
1111 mz = from->nodeinfo[nid]; in __invalidate_reclaim_iterators()
1112 iter = &mz->iter; in __invalidate_reclaim_iterators()
1113 cmpxchg(&iter->position, dead_memcg, NULL); in __invalidate_reclaim_iterators()
1128 * When cgroup1 non-hierarchy mode is used, in invalidate_reclaim_iterators()
1139 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1145 * descendants and calls @fn for each task. If @fn returns a non-zero
1163 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); in mem_cgroup_scan_tasks()
1195 * folio_lruvec_lock - Lock the lruvec for a folio.
1199 * - folio locked
1200 * - folio_test_lru false
1201 * - folio frozen (refcount of 0)
1209 spin_lock(&lruvec->lru_lock); in folio_lruvec_lock()
1216 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1220 * - folio locked
1221 * - folio_test_lru false
1222 * - folio frozen (refcount of 0)
1231 spin_lock_irq(&lruvec->lru_lock); in folio_lruvec_lock_irq()
1238 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1243 * - folio locked
1244 * - folio_test_lru false
1245 * - folio frozen (refcount of 0)
1255 spin_lock_irqsave(&lruvec->lru_lock, *flags); in folio_lruvec_lock_irqsave()
1262 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1282 lru_size = &mz->lru_zone_size[zid][lru]; in mem_cgroup_update_lru_size()
1300 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1312 count = page_counter_read(&memcg->memory); in mem_cgroup_margin()
1313 limit = READ_ONCE(memcg->memory.max); in mem_cgroup_margin()
1315 margin = limit - count; in mem_cgroup_margin()
1318 count = page_counter_read(&memcg->memsw); in mem_cgroup_margin()
1319 limit = READ_ONCE(memcg->memsw.max); in mem_cgroup_margin()
1321 margin = min(margin, limit - count); in mem_cgroup_margin()
1471 * 1) generic big picture -> specifics and details in memcg_stat_format()
1472 * 2) reflecting userspace activity -> reflecting kernel heuristics in memcg_stat_format()
1545 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_context()
1570 memory_failcnt = atomic_long_read(&memcg->memory_events[MEMCG_MAX]); in mem_cgroup_print_oom_meminfo()
1572 memory_failcnt = memcg->memory.failcnt; in mem_cgroup_print_oom_meminfo()
1575 K((u64)page_counter_read(&memcg->memory)), in mem_cgroup_print_oom_meminfo()
1576 K((u64)READ_ONCE(memcg->memory.max)), memory_failcnt); in mem_cgroup_print_oom_meminfo()
1579 K((u64)page_counter_read(&memcg->swap)), in mem_cgroup_print_oom_meminfo()
1580 K((u64)READ_ONCE(memcg->swap.max)), in mem_cgroup_print_oom_meminfo()
1581 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); in mem_cgroup_print_oom_meminfo()
1585 K((u64)page_counter_read(&memcg->memsw)), in mem_cgroup_print_oom_meminfo()
1586 K((u64)memcg->memsw.max), memcg->memsw.failcnt); in mem_cgroup_print_oom_meminfo()
1588 K((u64)page_counter_read(&memcg->kmem)), in mem_cgroup_print_oom_meminfo()
1589 K((u64)memcg->kmem.max), memcg->kmem.failcnt); in mem_cgroup_print_oom_meminfo()
1594 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_meminfo()
1606 unsigned long max = READ_ONCE(memcg->memory.max); in mem_cgroup_get_max()
1611 unsigned long swap = READ_ONCE(memcg->memsw.max) - max; in mem_cgroup_get_max()
1617 max += min(READ_ONCE(memcg->swap.max), in mem_cgroup_get_max()
1625 return page_counter_read(&memcg->memory); in mem_cgroup_size()
1681 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1683 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1686 * by killing all belonging OOM-killable tasks.
1688 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1719 * highest-level memory cgroup with oom.group set. in mem_cgroup_get_oom_group()
1722 if (READ_ONCE(memcg->oom_group)) in mem_cgroup_get_oom_group()
1730 css_get(&oom_group->css); in mem_cgroup_get_oom_group()
1740 pr_cont_cgroup_path(memcg->css.cgroup); in mem_cgroup_print_oom_group()
1810 if (memcg != READ_ONCE(stock->cached[i])) in consume_stock()
1813 stock_pages = READ_ONCE(stock->nr_pages[i]); in consume_stock()
1815 WRITE_ONCE(stock->nr_pages[i], stock_pages - nr_pages); in consume_stock()
1828 page_counter_uncharge(&memcg->memory, nr_pages); in memcg_uncharge()
1830 page_counter_uncharge(&memcg->memsw, nr_pages); in memcg_uncharge()
1838 struct mem_cgroup *old = READ_ONCE(stock->cached[i]); in drain_stock()
1844 stock_pages = READ_ONCE(stock->nr_pages[i]); in drain_stock()
1847 WRITE_ONCE(stock->nr_pages[i], 0); in drain_stock()
1850 css_put(&old->css); in drain_stock()
1851 WRITE_ONCE(stock->cached[i], NULL); in drain_stock()
1866 if (WARN_ONCE(!in_task(), "drain in non-task context")) in drain_local_memcg_stock()
1873 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); in drain_local_memcg_stock()
1882 if (WARN_ONCE(!in_task(), "drain in non-task context")) in drain_local_obj_stock()
1889 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); in drain_local_obj_stock()
1900 int empty_slot = -1; in refill_stock()
1924 cached = READ_ONCE(stock->cached[i]); in refill_stock()
1925 if (!cached && empty_slot == -1) in refill_stock()
1927 if (memcg == READ_ONCE(stock->cached[i])) { in refill_stock()
1928 stock_pages = READ_ONCE(stock->nr_pages[i]) + nr_pages; in refill_stock()
1929 WRITE_ONCE(stock->nr_pages[i], stock_pages); in refill_stock()
1939 if (i == -1) { in refill_stock()
1943 css_get(&memcg->css); in refill_stock()
1944 WRITE_ONCE(stock->cached[i], memcg); in refill_stock()
1945 WRITE_ONCE(stock->nr_pages[i], nr_pages); in refill_stock()
1960 memcg = READ_ONCE(stock->cached[i]); in is_memcg_drain_needed()
1964 if (READ_ONCE(stock->nr_pages[i]) && in is_memcg_drain_needed()
1975 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1986 * Notify other cpus that system-wide "drain" is running in drain_all_stock()
1989 * per-cpu data. CPU up doesn't touch memcg_stock at all. in drain_all_stock()
1997 if (!test_bit(FLUSHING_CACHED_CHARGE, &memcg_st->flags) && in drain_all_stock()
2000 &memcg_st->flags)) { in drain_all_stock()
2002 drain_local_memcg_stock(&memcg_st->work); in drain_all_stock()
2004 schedule_work_on(cpu, &memcg_st->work); in drain_all_stock()
2007 if (!test_bit(FLUSHING_CACHED_CHARGE, &obj_st->flags) && in drain_all_stock()
2010 &obj_st->flags)) { in drain_all_stock()
2012 drain_local_obj_stock(&obj_st->work); in drain_all_stock()
2014 schedule_work_on(cpu, &obj_st->work); in drain_all_stock()
2039 if (page_counter_read(&memcg->memory) <= in reclaim_high()
2040 READ_ONCE(memcg->memory.high)) in reclaim_high()
2077 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2079 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2084 * reasonable delay curve compared to precision-adjusted overage, not
2089 * +-------+------------------------+
2091 * +-------+------------------------+
2113 * +-------+------------------------+
2131 overage = usage - high; in calculate_overage()
2141 overage = calculate_overage(page_counter_read(&memcg->memory), in mem_find_max_overage()
2142 READ_ONCE(memcg->memory.high)); in mem_find_max_overage()
2155 overage = calculate_overage(page_counter_read(&memcg->swap), in swap_find_max_overage()
2156 READ_ONCE(memcg->swap.high)); in swap_find_max_overage()
2193 * N-sized allocations are throttled approximately the same as one in calculate_high_delay()
2194 * 4N-sized allocation. in calculate_high_delay()
2212 unsigned int nr_pages = current->memcg_nr_pages_over_high; in __mem_cgroup_handle_over_high()
2217 memcg = get_mem_cgroup_from_mm(current->mm); in __mem_cgroup_handle_over_high()
2218 current->memcg_nr_pages_over_high = 0; in __mem_cgroup_handle_over_high()
2276 if (nr_reclaimed || nr_retries--) { in __mem_cgroup_handle_over_high()
2287 * need to account for any ill-begotten jiffies to pay them off later. in __mem_cgroup_handle_over_high()
2294 css_put(&memcg->css); in __mem_cgroup_handle_over_high()
2321 page_counter_try_charge(&memcg->memsw, batch, &counter)) { in try_charge_memcg()
2322 if (page_counter_try_charge(&memcg->memory, batch, &counter)) in try_charge_memcg()
2325 page_counter_uncharge(&memcg->memsw, batch); in try_charge_memcg()
2343 if (unlikely(current->flags & PF_MEMALLOC)) in try_charge_memcg()
2383 if (nr_retries--) in try_charge_memcg()
2412 return -ENOMEM; in try_charge_memcg()
2426 page_counter_charge(&memcg->memory, nr_pages); in try_charge_memcg()
2428 page_counter_charge(&memcg->memsw, nr_pages); in try_charge_memcg()
2434 refill_stock(memcg, batch - nr_pages); in try_charge_memcg()
2448 mem_high = page_counter_read(&memcg->memory) > in try_charge_memcg()
2449 READ_ONCE(memcg->memory.high); in try_charge_memcg()
2450 swap_high = page_counter_read(&memcg->swap) > in try_charge_memcg()
2451 READ_ONCE(memcg->swap.high); in try_charge_memcg()
2456 schedule_work(&memcg->high_work); in try_charge_memcg()
2468 * Target some best-effort fairness between the tasks, in try_charge_memcg()
2472 current->memcg_nr_pages_over_high += batch; in try_charge_memcg()
2485 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH && in try_charge_memcg()
2486 !(current->flags & PF_MEMALLOC) && in try_charge_memcg()
2507 * - the page lock in commit_charge()
2508 * - LRU isolation in commit_charge()
2509 * - exclusive reference in commit_charge()
2511 folio->memcg_data = (unsigned long)memcg; in commit_charge()
2525 struct mem_cgroup_per_node *pn = memcg->nodeinfo[pgdat->node_id]; in account_slab_nmi_safe()
2528 css_rstat_updated(&memcg->css, smp_processor_id()); in account_slab_nmi_safe()
2530 atomic_add(nr, &pn->slab_reclaimable); in account_slab_nmi_safe()
2532 atomic_add(nr, &pn->slab_unreclaimable); in account_slab_nmi_safe()
2563 * Slab objects are accounted individually, not per-page. in mem_cgroup_from_obj_folio()
2565 * slab->obj_exts. in mem_cgroup_from_obj_folio()
2577 off = obj_to_index(slab->slab_cache, slab, p); in mem_cgroup_from_obj_folio()
2587 * slab->obj_exts has not been freed yet in mem_cgroup_from_obj_folio()
2616 objcg = rcu_dereference(memcg->objcg); in __get_obj_cgroup_from_memcg()
2631 old = xchg(¤t->objcg, NULL); in current_objcg_update()
2641 if (!current->mm || (current->flags & PF_KTHREAD)) in current_objcg_update()
2670 } while (!try_cmpxchg(¤t->objcg, &old, objcg)); in current_objcg_update()
2684 memcg = current->active_memcg; in current_obj_cgroup()
2688 objcg = READ_ONCE(current->objcg); in current_obj_cgroup()
2713 objcg = rcu_dereference_check(memcg->objcg, 1); in current_obj_cgroup()
2752 css_rstat_updated(&memcg->css, smp_processor_id()); in account_kmem_nmi_safe()
2753 atomic_add(val, &memcg->kmem_stat); in account_kmem_nmi_safe()
2775 account_kmem_nmi_safe(memcg, -nr_pages); in obj_cgroup_uncharge_pages()
2776 memcg1_account_kmem(memcg, -nr_pages); in obj_cgroup_uncharge_pages()
2780 css_put(&memcg->css); in obj_cgroup_uncharge_pages()
2806 css_put(&memcg->css); in obj_cgroup_charge_pages()
2813 unsigned long memcg_data = page->memcg_data; in page_objcg()
2820 return (struct obj_cgroup *)(memcg_data - MEMCG_DATA_KMEM); in page_objcg()
2825 page->memcg_data = (unsigned long)objcg | MEMCG_DATA_KMEM; in page_set_objcg()
2867 page->memcg_data = 0; in __memcg_kmem_uncharge_page()
2881 if (stock->cached_pgdat != pgdat) { in __account_obj_stock()
2883 struct pglist_data *oldpg = stock->cached_pgdat; in __account_obj_stock()
2885 if (stock->nr_slab_reclaimable_b) { in __account_obj_stock()
2887 stock->nr_slab_reclaimable_b); in __account_obj_stock()
2888 stock->nr_slab_reclaimable_b = 0; in __account_obj_stock()
2890 if (stock->nr_slab_unreclaimable_b) { in __account_obj_stock()
2892 stock->nr_slab_unreclaimable_b); in __account_obj_stock()
2893 stock->nr_slab_unreclaimable_b = 0; in __account_obj_stock()
2895 stock->cached_pgdat = pgdat; in __account_obj_stock()
2898 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b in __account_obj_stock()
2899 : &stock->nr_slab_unreclaimable_b; in __account_obj_stock()
2930 if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) { in consume_obj_stock()
2931 stock->nr_bytes -= nr_bytes; in consume_obj_stock()
2945 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg); in drain_obj_stock()
2950 if (stock->nr_bytes) { in drain_obj_stock()
2951 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; in drain_obj_stock()
2952 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1); in drain_obj_stock()
2959 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); in drain_obj_stock()
2960 memcg1_account_kmem(memcg, -nr_pages); in drain_obj_stock()
2964 css_put(&memcg->css); in drain_obj_stock()
2968 * The leftover is flushed to the centralized per-memcg value. in drain_obj_stock()
2970 * to a per-cpu stock (probably, on an other CPU), see in drain_obj_stock()
2973 * How often it's flushed is a trade-off between the memory in drain_obj_stock()
2977 atomic_add(nr_bytes, &old->nr_charged_bytes); in drain_obj_stock()
2978 stock->nr_bytes = 0; in drain_obj_stock()
2984 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) { in drain_obj_stock()
2985 if (stock->nr_slab_reclaimable_b) { in drain_obj_stock()
2986 mod_objcg_mlstate(old, stock->cached_pgdat, in drain_obj_stock()
2988 stock->nr_slab_reclaimable_b); in drain_obj_stock()
2989 stock->nr_slab_reclaimable_b = 0; in drain_obj_stock()
2991 if (stock->nr_slab_unreclaimable_b) { in drain_obj_stock()
2992 mod_objcg_mlstate(old, stock->cached_pgdat, in drain_obj_stock()
2994 stock->nr_slab_unreclaimable_b); in drain_obj_stock()
2995 stock->nr_slab_unreclaimable_b = 0; in drain_obj_stock()
2997 stock->cached_pgdat = NULL; in drain_obj_stock()
3000 WRITE_ONCE(stock->cached_objcg, NULL); in drain_obj_stock()
3007 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg); in obj_stock_flush_required()
3033 nr_bytes = nr_bytes & (PAGE_SIZE - 1); in refill_obj_stock()
3034 atomic_add(nr_bytes, &objcg->nr_charged_bytes); in refill_obj_stock()
3039 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */ in refill_obj_stock()
3042 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) in refill_obj_stock()
3043 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; in refill_obj_stock()
3044 WRITE_ONCE(stock->cached_objcg, objcg); in refill_obj_stock()
3048 stock->nr_bytes += nr_bytes; in refill_obj_stock()
3053 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) { in refill_obj_stock()
3054 nr_pages = stock->nr_bytes >> PAGE_SHIFT; in refill_obj_stock()
3055 stock->nr_bytes &= (PAGE_SIZE - 1); in refill_obj_stock()
3074 * In theory, objcg->nr_charged_bytes can have enough in obj_cgroup_charge_account()
3075 * pre-charged bytes to satisfy the allocation. However, in obj_cgroup_charge_account()
3076 * flushing objcg->nr_charged_bytes requires two atomic in obj_cgroup_charge_account()
3077 * operations, and objcg->nr_charged_bytes can't be big. in obj_cgroup_charge_account()
3078 * The shared objcg->nr_charged_bytes can also become a in obj_cgroup_charge_account()
3082 * objcg->nr_charged_bytes later on when objcg changes. in obj_cgroup_charge_account()
3084 * The stock's nr_bytes may contain enough pre-charged bytes in obj_cgroup_charge_account()
3086 * on the pre-charged bytes not being changed outside of in obj_cgroup_charge_account()
3088 * pre-charged bytes as well when charging pages. To avoid a in obj_cgroup_charge_account()
3091 * to temporarily allow the pre-charged bytes to exceed the page in obj_cgroup_charge_account()
3092 * size limit. The maximum reachable value of the pre-charged in obj_cgroup_charge_account()
3093 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data in obj_cgroup_charge_account()
3097 nr_bytes = size & (PAGE_SIZE - 1); in obj_cgroup_charge_account()
3104 refill_obj_stock(objcg, nr_bytes ? PAGE_SIZE - nr_bytes : 0, in obj_cgroup_charge_account()
3126 return s->size + sizeof(struct obj_cgroup *); in obj_full_size()
3163 css_put(&memcg->css); in __memcg_slab_post_alloc_hook()
3215 refill_obj_stock(objcg, obj_size, true, -obj_size, in __memcg_slab_free_hook()
3236 obj_cgroup_get_many(objcg, nr - 1); in split_page_memcg()
3247 new_refs = (1 << (old_order - new_order)) - 1; in folio_split_memcg_refs()
3248 css_get_many(&__folio_memcg(folio)->css, new_refs); in folio_split_memcg_refs()
3263 val += total_swap_pages - get_nr_swap_pages(); in mem_cgroup_usage()
3266 val = page_counter_read(&memcg->memory); in mem_cgroup_usage()
3268 val = page_counter_read(&memcg->memsw); in mem_cgroup_usage()
3285 return -ENOMEM; in memcg_online_kmem()
3287 objcg->memcg = memcg; in memcg_online_kmem()
3288 rcu_assign_pointer(memcg->objcg, objcg); in memcg_online_kmem()
3290 memcg->orig_objcg = objcg; in memcg_online_kmem()
3294 memcg->kmemcg_id = memcg->id.id; in memcg_online_kmem()
3328 return wb_domain_init(&memcg->cgwb_domain, gfp); in memcg_wb_domain_init()
3333 wb_domain_exit(&memcg->cgwb_domain); in memcg_wb_domain_exit()
3338 wb_domain_size_changed(&memcg->cgwb_domain); in memcg_wb_domain_size_changed()
3343 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_domain()
3345 if (!memcg->css.parent) in mem_cgroup_wb_domain()
3348 return &memcg->cgwb_domain; in mem_cgroup_wb_domain()
3352 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3360 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3363 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3373 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_wb_stats()
3385 unsigned long ceiling = min(READ_ONCE(memcg->memory.max), in mem_cgroup_wb_stats()
3386 READ_ONCE(memcg->memory.high)); in mem_cgroup_wb_stats()
3387 unsigned long used = page_counter_read(&memcg->memory); in mem_cgroup_wb_stats()
3389 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used)); in mem_cgroup_wb_stats()
3398 * tracks ownership per-page while the latter per-inode. This was a
3399 * deliberate design decision because honoring per-page ownership in the
3401 * and deemed unnecessary given that write-sharing an inode across
3402 * different cgroups isn't a common use-case.
3404 * Combined with inode majority-writer ownership switching, this works well
3425 * page - a page whose memcg and writeback ownerships don't match - is
3431 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
3445 int oldest = -1; in mem_cgroup_track_foreign_dirty_slowpath()
3456 frn = &memcg->cgwb_frn[i]; in mem_cgroup_track_foreign_dirty_slowpath()
3457 if (frn->bdi_id == wb->bdi->id && in mem_cgroup_track_foreign_dirty_slowpath()
3458 frn->memcg_id == wb->memcg_css->id) in mem_cgroup_track_foreign_dirty_slowpath()
3460 if (time_before64(frn->at, oldest_at) && in mem_cgroup_track_foreign_dirty_slowpath()
3461 atomic_read(&frn->done.cnt) == 1) { in mem_cgroup_track_foreign_dirty_slowpath()
3463 oldest_at = frn->at; in mem_cgroup_track_foreign_dirty_slowpath()
3469 * Re-using an existing one. Update timestamp lazily to in mem_cgroup_track_foreign_dirty_slowpath()
3471 * reasonably up-to-date and significantly shorter than in mem_cgroup_track_foreign_dirty_slowpath()
3479 if (time_before64(frn->at, now - update_intv)) in mem_cgroup_track_foreign_dirty_slowpath()
3480 frn->at = now; in mem_cgroup_track_foreign_dirty_slowpath()
3483 frn = &memcg->cgwb_frn[oldest]; in mem_cgroup_track_foreign_dirty_slowpath()
3484 frn->bdi_id = wb->bdi->id; in mem_cgroup_track_foreign_dirty_slowpath()
3485 frn->memcg_id = wb->memcg_css->id; in mem_cgroup_track_foreign_dirty_slowpath()
3486 frn->at = now; in mem_cgroup_track_foreign_dirty_slowpath()
3493 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); in mem_cgroup_flush_foreign()
3499 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i]; in mem_cgroup_flush_foreign()
3507 if (time_after64(frn->at, now - intv) && in mem_cgroup_flush_foreign()
3508 atomic_read(&frn->done.cnt) == 1) { in mem_cgroup_flush_foreign()
3509 frn->at = 0; in mem_cgroup_flush_foreign()
3510 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id); in mem_cgroup_flush_foreign()
3511 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, in mem_cgroup_flush_foreign()
3513 &frn->done); in mem_cgroup_flush_foreign()
3538 * Swap-out records and page cache shadow entries need to store memcg
3541 * memory-controlled cgroups to 64k.
3548 * even when there are much fewer than 64k cgroups - possibly none.
3550 * Maintain a private 16-bit ID space for memcg, and allow the ID to
3559 #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
3564 if (memcg->id.id > 0) { in mem_cgroup_id_remove()
3565 xa_erase(&mem_cgroup_ids, memcg->id.id); in mem_cgroup_id_remove()
3566 memcg->id.id = 0; in mem_cgroup_id_remove()
3573 refcount_add(n, &memcg->id.ref); in mem_cgroup_id_get_many()
3578 if (refcount_sub_and_test(n, &memcg->id.ref)) { in mem_cgroup_id_put_many()
3582 css_put(&memcg->css); in mem_cgroup_id_put_many()
3593 while (!refcount_inc_not_zero(&memcg->id.ref)) { in mem_cgroup_id_get_online()
3610 * mem_cgroup_from_id - look up a memcg from a memcg id
3636 memcg = ERR_PTR(-ENOENT); in mem_cgroup_get_from_ino()
3649 free_percpu(pn->lruvec_stats_percpu); in free_mem_cgroup_per_node_info()
3650 kfree(pn->lruvec_stats); in free_mem_cgroup_per_node_info()
3663 pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats), in alloc_mem_cgroup_per_node_info()
3665 if (!pn->lruvec_stats) in alloc_mem_cgroup_per_node_info()
3668 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu, in alloc_mem_cgroup_per_node_info()
3670 if (!pn->lruvec_stats_percpu) in alloc_mem_cgroup_per_node_info()
3673 lruvec_init(&pn->lruvec); in alloc_mem_cgroup_per_node_info()
3674 pn->memcg = memcg; in alloc_mem_cgroup_per_node_info()
3676 memcg->nodeinfo[node] = pn; in alloc_mem_cgroup_per_node_info()
3687 obj_cgroup_put(memcg->orig_objcg); in __mem_cgroup_free()
3690 free_mem_cgroup_per_node_info(memcg->nodeinfo[node]); in __mem_cgroup_free()
3692 kfree(memcg->vmstats); in __mem_cgroup_free()
3693 free_percpu(memcg->vmstats_percpu); in __mem_cgroup_free()
3715 return ERR_PTR(-ENOMEM); in mem_cgroup_alloc()
3717 error = xa_alloc(&mem_cgroup_ids, &memcg->id.id, NULL, in mem_cgroup_alloc()
3721 error = -ENOMEM; in mem_cgroup_alloc()
3723 memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), in mem_cgroup_alloc()
3725 if (!memcg->vmstats) in mem_cgroup_alloc()
3728 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu, in mem_cgroup_alloc()
3730 if (!memcg->vmstats_percpu) in mem_cgroup_alloc()
3738 pstatc_pcpu = parent->vmstats_percpu; in mem_cgroup_alloc()
3739 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); in mem_cgroup_alloc()
3740 statc->parent_pcpu = parent ? pstatc_pcpu : NULL; in mem_cgroup_alloc()
3741 statc->vmstats = memcg->vmstats; in mem_cgroup_alloc()
3751 INIT_WORK(&memcg->high_work, high_work_func); in mem_cgroup_alloc()
3752 vmpressure_init(&memcg->vmpressure); in mem_cgroup_alloc()
3753 INIT_LIST_HEAD(&memcg->memory_peaks); in mem_cgroup_alloc()
3754 INIT_LIST_HEAD(&memcg->swap_peaks); in mem_cgroup_alloc()
3755 spin_lock_init(&memcg->peaks_lock); in mem_cgroup_alloc()
3756 memcg->socket_pressure = get_jiffies_64(); in mem_cgroup_alloc()
3758 seqlock_init(&memcg->socket_pressure_seqlock); in mem_cgroup_alloc()
3761 memcg->kmemcg_id = -1; in mem_cgroup_alloc()
3762 INIT_LIST_HEAD(&memcg->objcg_list); in mem_cgroup_alloc()
3764 INIT_LIST_HEAD(&memcg->cgwb_list); in mem_cgroup_alloc()
3766 memcg->cgwb_frn[i].done = in mem_cgroup_alloc()
3770 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock); in mem_cgroup_alloc()
3771 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue); in mem_cgroup_alloc()
3772 memcg->deferred_split_queue.split_queue_len = 0; in mem_cgroup_alloc()
3795 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
3798 memcg->zswap_max = PAGE_COUNTER_MAX; in mem_cgroup_css_alloc()
3799 WRITE_ONCE(memcg->zswap_writeback, true); in mem_cgroup_css_alloc()
3801 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_alloc()
3803 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent)); in mem_cgroup_css_alloc()
3805 page_counter_init(&memcg->memory, &parent->memory, memcg_on_dfl); in mem_cgroup_css_alloc()
3806 page_counter_init(&memcg->swap, &parent->swap, false); in mem_cgroup_css_alloc()
3808 memcg->memory.track_failcnt = !memcg_on_dfl; in mem_cgroup_css_alloc()
3809 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable)); in mem_cgroup_css_alloc()
3810 page_counter_init(&memcg->kmem, &parent->kmem, false); in mem_cgroup_css_alloc()
3811 page_counter_init(&memcg->tcpmem, &parent->tcpmem, false); in mem_cgroup_css_alloc()
3816 page_counter_init(&memcg->memory, NULL, true); in mem_cgroup_css_alloc()
3817 page_counter_init(&memcg->swap, NULL, false); in mem_cgroup_css_alloc()
3819 page_counter_init(&memcg->kmem, NULL, false); in mem_cgroup_css_alloc()
3820 page_counter_init(&memcg->tcpmem, NULL, false); in mem_cgroup_css_alloc()
3823 return &memcg->css; in mem_cgroup_css_alloc()
3832 return &memcg->css; in mem_cgroup_css_alloc()
3856 refcount_set(&memcg->id.ref, 1); in mem_cgroup_css_online()
3869 xa_store(&mem_cgroup_ids, memcg->id.id, memcg, GFP_KERNEL); in mem_cgroup_css_online()
3876 return -ENOMEM; in mem_cgroup_css_online()
3885 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_offline()
3886 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_offline()
3915 wb_wait_for_completion(&memcg->cgwb_frn[i].done); in mem_cgroup_css_free()
3926 vmpressure_cleanup(&memcg->vmpressure); in mem_cgroup_css_free()
3927 cancel_work_sync(&memcg->high_work); in mem_cgroup_css_free()
3934 * mem_cgroup_css_reset - reset the states of a mem_cgroup
3950 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
3951 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
3953 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
3954 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
3956 page_counter_set_min(&memcg->memory, 0); in mem_cgroup_css_reset()
3957 page_counter_set_low(&memcg->memory, 0); in mem_cgroup_css_reset()
3958 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
3960 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX); in mem_cgroup_css_reset()
3967 /* pointer to the non-hierarchichal (CPU aggregated) counters */
3986 for (i = 0; i < ac->size; i++) { in mem_cgroup_stat_aggregate()
3989 * below us. We're in a per-cpu loop here and this is in mem_cgroup_stat_aggregate()
3992 delta = ac->pending[i]; in mem_cgroup_stat_aggregate()
3994 ac->pending[i] = 0; in mem_cgroup_stat_aggregate()
3998 v = READ_ONCE(ac->cstat[i]); in mem_cgroup_stat_aggregate()
3999 if (v != ac->cstat_prev[i]) { in mem_cgroup_stat_aggregate()
4000 delta_cpu = v - ac->cstat_prev[i]; in mem_cgroup_stat_aggregate()
4002 ac->cstat_prev[i] = v; in mem_cgroup_stat_aggregate()
4007 ac->local[i] += delta_cpu; in mem_cgroup_stat_aggregate()
4010 ac->aggregate[i] += delta; in mem_cgroup_stat_aggregate()
4011 if (ac->ppending) in mem_cgroup_stat_aggregate()
4012 ac->ppending[i] += delta; in mem_cgroup_stat_aggregate()
4023 if (atomic_read(&memcg->kmem_stat)) { in flush_nmi_stats()
4024 int kmem = atomic_xchg(&memcg->kmem_stat, 0); in flush_nmi_stats()
4027 memcg->vmstats->state[index] += kmem; in flush_nmi_stats()
4029 parent->vmstats->state_pending[index] += kmem; in flush_nmi_stats()
4033 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; in flush_nmi_stats()
4034 struct lruvec_stats *lstats = pn->lruvec_stats; in flush_nmi_stats()
4038 plstats = parent->nodeinfo[nid]->lruvec_stats; in flush_nmi_stats()
4040 if (atomic_read(&pn->slab_reclaimable)) { in flush_nmi_stats()
4041 int slab = atomic_xchg(&pn->slab_reclaimable, 0); in flush_nmi_stats()
4044 lstats->state[index] += slab; in flush_nmi_stats()
4046 plstats->state_pending[index] += slab; in flush_nmi_stats()
4048 if (atomic_read(&pn->slab_unreclaimable)) { in flush_nmi_stats()
4049 int slab = atomic_xchg(&pn->slab_unreclaimable, 0); in flush_nmi_stats()
4052 lstats->state[index] += slab; in flush_nmi_stats()
4054 plstats->state_pending[index] += slab; in flush_nmi_stats()
4074 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu); in mem_cgroup_css_rstat_flush()
4077 .aggregate = memcg->vmstats->state, in mem_cgroup_css_rstat_flush()
4078 .local = memcg->vmstats->state_local, in mem_cgroup_css_rstat_flush()
4079 .pending = memcg->vmstats->state_pending, in mem_cgroup_css_rstat_flush()
4080 .ppending = parent ? parent->vmstats->state_pending : NULL, in mem_cgroup_css_rstat_flush()
4081 .cstat = statc->state, in mem_cgroup_css_rstat_flush()
4082 .cstat_prev = statc->state_prev, in mem_cgroup_css_rstat_flush()
4088 .aggregate = memcg->vmstats->events, in mem_cgroup_css_rstat_flush()
4089 .local = memcg->vmstats->events_local, in mem_cgroup_css_rstat_flush()
4090 .pending = memcg->vmstats->events_pending, in mem_cgroup_css_rstat_flush()
4091 .ppending = parent ? parent->vmstats->events_pending : NULL, in mem_cgroup_css_rstat_flush()
4092 .cstat = statc->events, in mem_cgroup_css_rstat_flush()
4093 .cstat_prev = statc->events_prev, in mem_cgroup_css_rstat_flush()
4099 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid]; in mem_cgroup_css_rstat_flush()
4100 struct lruvec_stats *lstats = pn->lruvec_stats; in mem_cgroup_css_rstat_flush()
4105 plstats = parent->nodeinfo[nid]->lruvec_stats; in mem_cgroup_css_rstat_flush()
4107 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu); in mem_cgroup_css_rstat_flush()
4110 .aggregate = lstats->state, in mem_cgroup_css_rstat_flush()
4111 .local = lstats->state_local, in mem_cgroup_css_rstat_flush()
4112 .pending = lstats->state_pending, in mem_cgroup_css_rstat_flush()
4113 .ppending = plstats ? plstats->state_pending : NULL, in mem_cgroup_css_rstat_flush()
4114 .cstat = lstatc->state, in mem_cgroup_css_rstat_flush()
4115 .cstat_prev = lstatc->state_prev, in mem_cgroup_css_rstat_flush()
4121 WRITE_ONCE(statc->stats_updates, 0); in mem_cgroup_css_rstat_flush()
4122 /* We are in a per-cpu loop here, only do the atomic write once */ in mem_cgroup_css_rstat_flush()
4123 if (atomic_read(&memcg->vmstats->stats_updates)) in mem_cgroup_css_rstat_flush()
4124 atomic_set(&memcg->vmstats->stats_updates, 0); in mem_cgroup_css_rstat_flush()
4130 * Set the update flag to cause task->objcg to be initialized lazily in mem_cgroup_fork()
4135 task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG; in mem_cgroup_fork()
4140 struct obj_cgroup *objcg = task->objcg; in mem_cgroup_exit()
4152 task->objcg = NULL; in mem_cgroup_exit()
4169 if (task->mm && READ_ONCE(task->mm->owner) == task) in mem_cgroup_lru_gen_attach()
4170 lru_gen_migrate_mm(task->mm); in mem_cgroup_lru_gen_attach()
4184 set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg); in mem_cgroup_kmem_attach()
4209 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE; in memory_current_read()
4212 #define OFP_PEAK_UNSET (((-1UL)))
4216 struct cgroup_of_peak *ofp = of_peak(sf->private); in peak_show()
4217 u64 fd_peak = READ_ONCE(ofp->value), peak; in peak_show()
4221 peak = pc->watermark; in peak_show()
4223 peak = max(fd_peak, READ_ONCE(pc->local_watermark)); in peak_show()
4233 return peak_show(sf, v, &memcg->memory); in memory_peak_show()
4240 ofp->value = OFP_PEAK_UNSET; in peak_open()
4249 if (ofp->value == OFP_PEAK_UNSET) { in peak_release()
4253 spin_lock(&memcg->peaks_lock); in peak_release()
4254 list_del(&ofp->list); in peak_release()
4255 spin_unlock(&memcg->peaks_lock); in peak_release()
4267 spin_lock(&memcg->peaks_lock); in peak_write()
4270 WRITE_ONCE(pc->local_watermark, usage); in peak_write()
4273 if (usage > peer_ctx->value) in peak_write()
4274 WRITE_ONCE(peer_ctx->value, usage); in peak_write()
4277 if (ofp->value == OFP_PEAK_UNSET) in peak_write()
4278 list_add(&ofp->list, watchers); in peak_write()
4280 WRITE_ONCE(ofp->value, usage); in peak_write()
4281 spin_unlock(&memcg->peaks_lock); in peak_write()
4291 return peak_write(of, buf, nbytes, off, &memcg->memory, in memory_peak_write()
4292 &memcg->memory_peaks); in memory_peak_write()
4300 READ_ONCE(mem_cgroup_from_seq(m)->memory.min)); in memory_min_show()
4315 page_counter_set_min(&memcg->memory, min); in memory_min_write()
4323 READ_ONCE(mem_cgroup_from_seq(m)->memory.low)); in memory_low_show()
4338 page_counter_set_low(&memcg->memory, low); in memory_low_write()
4346 READ_ONCE(mem_cgroup_from_seq(m)->memory.high)); in memory_high_show()
4363 page_counter_set_high(&memcg->memory, high); in memory_high_write()
4365 if (of->file->f_flags & O_NONBLOCK) in memory_high_write()
4369 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_high_write()
4384 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, in memory_high_write()
4387 if (!reclaimed && !nr_retries--) in memory_high_write()
4398 READ_ONCE(mem_cgroup_from_seq(m)->memory.max)); in memory_max_show()
4415 xchg(&memcg->memory.max, max); in memory_max_write()
4417 if (of->file->f_flags & O_NONBLOCK) in memory_max_write()
4421 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_max_write()
4436 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, in memory_max_write()
4438 nr_reclaims--; in memory_max_write()
4472 __memory_events_show(m, memcg->memory_events); in memory_events_show()
4480 __memory_events_show(m, memcg->memory_events_local); in memory_events_local_show()
4491 return -ENOMEM; in memory_stat_show()
4541 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group)); in memory_oom_group_show()
4554 return -EINVAL; in memory_oom_group_write()
4561 return -EINVAL; in memory_oom_group_write()
4563 WRITE_ONCE(memcg->oom_group, oom_group); in memory_oom_group_write()
4674 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
4675 * @root: the top ancestor of the sub-tree being checked
4679 * of a top-down tree iteration, not for isolated queries.
4693 page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection); in mem_cgroup_calculate_protection()
4705 css_get(&memcg->css); in charge_memcg()
4719 css_put(&memcg->css); in __mem_cgroup_charge()
4725 * mem_cgroup_charge_hugetlb - charge the memcg for a hugetlb folio
4743 * system-level stats via lruvec_stat_mod_folio. Return 0, and skip in mem_cgroup_charge_hugetlb()
4751 ret = -ENOMEM; in mem_cgroup_charge_hugetlb()
4759 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
4783 if (!memcg || !css_tryget_online(&memcg->css)) in mem_cgroup_swapin_charge_folio()
4789 css_put(&memcg->css); in mem_cgroup_swapin_charge_folio()
4808 if (ug->nr_memory) { in uncharge_batch()
4809 memcg_uncharge(ug->memcg, ug->nr_memory); in uncharge_batch()
4810 if (ug->nr_kmem) { in uncharge_batch()
4811 mod_memcg_state(ug->memcg, MEMCG_KMEM, -ug->nr_kmem); in uncharge_batch()
4812 memcg1_account_kmem(ug->memcg, -ug->nr_kmem); in uncharge_batch()
4814 memcg1_oom_recover(ug->memcg); in uncharge_batch()
4817 memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid); in uncharge_batch()
4820 css_put(&ug->memcg->css); in uncharge_batch()
4850 if (ug->memcg != memcg) { in uncharge_folio()
4851 if (ug->memcg) { in uncharge_folio()
4855 ug->memcg = memcg; in uncharge_folio()
4856 ug->nid = folio_nid(folio); in uncharge_folio()
4859 css_get(&memcg->css); in uncharge_folio()
4865 ug->nr_memory += nr_pages; in uncharge_folio()
4866 ug->nr_kmem += nr_pages; in uncharge_folio()
4868 folio->memcg_data = 0; in uncharge_folio()
4873 ug->nr_memory += nr_pages; in uncharge_folio()
4874 ug->pgpgout++; in uncharge_folio()
4877 folio->memcg_data = 0; in uncharge_folio()
4880 css_put(&memcg->css); in uncharge_folio()
4887 /* Don't touch folio->lru of any random page, pre-check: */ in __mem_cgroup_uncharge()
4902 for (i = 0; i < folios->nr; i++) in __mem_cgroup_uncharge_folios()
4903 uncharge_folio(folios->folios[i], &ug); in __mem_cgroup_uncharge_folios()
4909 * mem_cgroup_replace_folio - Charge a folio's replacement.
4916 * Both folios must be locked, @new->mapping must be set up.
4940 /* Force-charge the new page. The old one will be freed soon */ in mem_cgroup_replace_folio()
4942 page_counter_charge(&memcg->memory, nr_pages); in mem_cgroup_replace_folio()
4944 page_counter_charge(&memcg->memsw, nr_pages); in mem_cgroup_replace_folio()
4947 css_get(&memcg->css); in mem_cgroup_replace_folio()
4953 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
4961 * Both folios must be locked, @new->mapping must be set up.
4989 /* Warning should never happen, so don't worry about refcount non-0 */ in mem_cgroup_migrate()
4991 old->memcg_data = 0; in mem_cgroup_migrate()
5014 if (css_tryget(&memcg->css)) in mem_cgroup_sk_alloc()
5015 sk->sk_memcg = memcg; in mem_cgroup_sk_alloc()
5025 css_put(&memcg->css); in mem_cgroup_sk_free()
5032 if (sk->sk_memcg == newsk->sk_memcg) in mem_cgroup_sk_inherit()
5039 css_get(&memcg->css); in mem_cgroup_sk_inherit()
5041 newsk->sk_memcg = sk->sk_memcg; in mem_cgroup_sk_inherit()
5045 * mem_cgroup_sk_charge - charge socket memory
5070 * mem_cgroup_sk_uncharge - uncharge socket memory
5083 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); in mem_cgroup_sk_uncharge()
5110 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
5111 * basically everything that doesn't depend on a specific mem_cgroup structure
5121 * used for per-memcg-per-cpu caching of per-node statistics. In order in mem_cgroup_init()
5131 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, in mem_cgroup_init()
5133 INIT_WORK(&per_cpu_ptr(&obj_stock, cpu)->work, in mem_cgroup_init()
5149 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
5155 * Returns 0 on success, -ENOMEM on failure.
5180 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { in __mem_cgroup_try_charge_swap()
5184 return -ENOMEM; in __mem_cgroup_try_charge_swap()
5189 mem_cgroup_id_get_many(memcg, nr_pages - 1); in __mem_cgroup_try_charge_swap()
5198 * __mem_cgroup_uncharge_swap - uncharge swap space
5213 page_counter_uncharge(&memcg->memsw, nr_pages); in __mem_cgroup_uncharge_swap()
5215 page_counter_uncharge(&memcg->swap, nr_pages); in __mem_cgroup_uncharge_swap()
5217 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); in __mem_cgroup_uncharge_swap()
5231 READ_ONCE(memcg->swap.max) - in mem_cgroup_get_nr_swap_pages()
5232 page_counter_read(&memcg->swap)); in mem_cgroup_get_nr_swap_pages()
5252 unsigned long usage = page_counter_read(&memcg->swap); in mem_cgroup_swap_full()
5254 if (usage * 2 >= READ_ONCE(memcg->swap.high) || in mem_cgroup_swap_full()
5255 usage * 2 >= READ_ONCE(memcg->swap.max)) in mem_cgroup_swap_full()
5269 "Please report your usecase to linux-mm@kvack.org if you " in setup_swap_account()
5280 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; in swap_current_read()
5287 return peak_show(sf, v, &memcg->swap); in swap_peak_show()
5295 return peak_write(of, buf, nbytes, off, &memcg->swap, in swap_peak_write()
5296 &memcg->swap_peaks); in swap_peak_write()
5302 READ_ONCE(mem_cgroup_from_seq(m)->swap.high)); in swap_high_show()
5317 page_counter_set_high(&memcg->swap, high); in swap_high_write()
5325 READ_ONCE(mem_cgroup_from_seq(m)->swap.max)); in swap_max_show()
5340 xchg(&memcg->swap.max, max); in swap_max_write()
5350 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH])); in swap_events_show()
5352 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX])); in swap_events_show()
5354 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL])); in swap_events_show()
5396 * obj_cgroup_may_zswap - check if this cgroup can zswap
5401 * This doesn't check for specific headroom, and it is not atomic
5403 * once compression has occurred, and this optimistic pre-check avoids
5418 unsigned long max = READ_ONCE(memcg->zswap_max); in obj_cgroup_may_zswap()
5441 * obj_cgroup_charge_zswap - charge compression backend memory
5455 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC)); in obj_cgroup_charge_zswap()
5469 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
5486 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size); in obj_cgroup_uncharge_zswap()
5487 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1); in obj_cgroup_uncharge_zswap()
5498 if (!READ_ONCE(memcg->zswap_writeback)) in mem_cgroup_zswap_writeback_enabled()
5516 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max)); in zswap_max_show()
5531 xchg(&memcg->zswap_max, max); in zswap_max_write()
5540 seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback)); in zswap_writeback_show()
5555 return -EINVAL; in zswap_writeback_write()
5557 WRITE_ONCE(memcg->zswap_writeback, zswap_writeback); in zswap_writeback_write()
5602 return memcg ? cpuset_node_allowed(memcg->css.cgroup, nid) : true; in mem_cgroup_node_allowed()