Lines Matching +full:charge +full:- +full:current +full:- +full:limit +full:- +full:mapping
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
19 * Charge lifetime sanitation
28 #include <linux/cgroup-defs.h>
39 #include <linux/page-flags.h>
40 #include <linux/backing-dev.h>
70 #include "memcontrol-v1.h"
104 return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
105 (current->flags & PF_EXITING);
113 return &memcg->vmpressure;
144 * objcg->nr_charged_bytes can't have an arbitrary byte value.
148 * 1) CPU0: objcg == stock->cached_objcg
153 * objcg->nr_charged_bytes = PAGE_SIZE - 92
155 * 92 bytes are added to stock->nr_bytes
157 * 92 bytes are added to objcg->nr_charged_bytes
162 nr_bytes = atomic_read(&objcg->nr_charged_bytes);
163 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
170 list_del(&objcg->list);
186 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
192 INIT_LIST_HEAD(&objcg->list);
201 objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
206 list_add(&objcg->list, &memcg->objcg_list);
208 list_for_each_entry(iter, &memcg->objcg_list, list)
209 WRITE_ONCE(iter->memcg, parent);
211 list_splice(&memcg->objcg_list, &parent->objcg_list);
215 percpu_ref_kill(&objcg->refcnt);
231 * mem_cgroup_css_from_folio - css of the memcg associated with a folio
248 return &memcg->css;
252 * page_cgroup_ino - return inode number of the memcg a page is charged to
273 while (memcg && !(memcg->css.flags & CSS_ONLINE))
276 ino = cgroup_ino(memcg->css.cgroup);
371 /* Non-hierarchical (CPU aggregated) state */
392 x = READ_ONCE(pn->lruvec_stats->state[i]);
415 x = READ_ONCE(pn->lruvec_stats->state_local[i]);
511 /* Non-hierarchical (CPU aggregated) page state & events */
569 return atomic64_read(&vmstats->stats_updates) >
582 cgroup_rstat_updated(memcg->css.cgroup, cpu);
583 statc = this_cpu_ptr(memcg->vmstats_percpu);
584 for (; statc; statc = statc->parent) {
585 stats_updates = READ_ONCE(statc->stats_updates) + abs(val);
586 WRITE_ONCE(statc->stats_updates, stats_updates);
591 * If @memcg is already flush-able, increasing stats_updates is
594 if (!memcg_vmstats_needs_flush(statc->vmstats))
596 &statc->vmstats->stats_updates);
597 WRITE_ONCE(statc->stats_updates, 0);
603 bool needs_flush = memcg_vmstats_needs_flush(memcg->vmstats);
605 trace_memcg_flush_stats(memcg, atomic64_read(&memcg->vmstats->stats_updates),
614 cgroup_rstat_flush(memcg->css.cgroup);
618 * mem_cgroup_flush_stats - flush the stats of a memory cgroup subtree
648 * in latency-sensitive paths is as cheap as possible.
662 x = READ_ONCE(memcg->vmstats->state[i]);
674 * up non-zero sub-page updates to 1 page as zero page updates are ignored.
687 * __mod_memcg_state - update cgroup memory statistics
689 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
703 __this_cpu_add(memcg->vmstats_percpu->state[i], val);
718 x = READ_ONCE(memcg->vmstats->state_local[i]);
738 memcg = pn->memcg;
742 * update their counter from in-interrupt context. For these two
760 __this_cpu_add(memcg->vmstats_percpu->state[i], val);
763 __this_cpu_add(pn->lruvec_stats_percpu->state[i], val);
772 * __mod_lruvec_state - update lruvec memory statistics
779 * change of state at this level: per-node, per-cgroup, per-lruvec.
826 * when we free the slab object, we need to update the per-memcg
839 * __count_memcg_events - account VM events in a cgroup
856 __this_cpu_add(memcg->vmstats_percpu->events[i], count);
869 return READ_ONCE(memcg->vmstats->events[i]);
879 return READ_ONCE(memcg->vmstats->events_local[i]);
885 * mm_update_next_owner() may clear mm->owner to NULL
901 return current->active_memcg;
908 * Obtain a reference on mm->memcg and returns it if successful. If mm
911 * 2) current->mm->memcg, if available
935 css_get(&memcg->css);
938 mm = current->mm;
945 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
948 } while (!css_tryget(&memcg->css));
955 * get_mem_cgroup_from_current - Obtain a reference on current task's memcg.
966 memcg = mem_cgroup_from_task(current);
967 if (!css_tryget(&memcg->css)) {
976 * get_mem_cgroup_from_folio - Obtain a reference on a given folio's memcg.
987 if (!memcg || WARN_ON_ONCE(!css_tryget(&memcg->css)))
994 * mem_cgroup_iter - iterate over memory cgroup hierarchy
1000 * @root itself, or %NULL after a full round-trip.
1004 * to cancel a hierarchy walk before the round-trip is complete.
1031 int nid = reclaim->pgdat->node_id;
1033 iter = &root->nodeinfo[nid]->iter;
1034 gen = atomic_read(&iter->generation);
1037 * On start, join the current reclaim iteration cycle.
1041 reclaim->generation = gen;
1042 else if (reclaim->generation != gen)
1045 pos = READ_ONCE(iter->position);
1049 css = pos ? &pos->css : NULL;
1051 while ((css = css_next_descendant_pre(css, &root->css))) {
1057 if (css == &root->css || css_tryget(css))
1069 if (cmpxchg(&iter->position, pos, next) != pos) {
1070 if (css && css != &root->css)
1076 atomic_inc(&iter->generation);
1081 * the hierarchy - make sure they see at least
1092 css_put(&prev->css);
1098 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1108 css_put(&prev->css);
1119 mz = from->nodeinfo[nid];
1120 iter = &mz->iter;
1121 cmpxchg(&iter->position, dead_memcg, NULL);
1136 * When cgroup1 non-hierarchy mode is used,
1147 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1153 * descendants and calls @fn for each task. If @fn returns a non-zero
1171 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1200 * folio_lruvec_lock - Lock the lruvec for a folio.
1204 * - folio locked
1205 * - folio_test_lru false
1206 * - folio frozen (refcount of 0)
1214 spin_lock(&lruvec->lru_lock);
1221 * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1225 * - folio locked
1226 * - folio_test_lru false
1227 * - folio frozen (refcount of 0)
1236 spin_lock_irq(&lruvec->lru_lock);
1243 * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1248 * - folio locked
1249 * - folio_test_lru false
1250 * - folio frozen (refcount of 0)
1260 spin_lock_irqsave(&lruvec->lru_lock, *flags);
1267 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1287 lru_size = &mz->lru_zone_size[zid][lru];
1305 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1315 unsigned long limit;
1317 count = page_counter_read(&memcg->memory);
1318 limit = READ_ONCE(memcg->memory.max);
1319 if (count < limit)
1320 margin = limit - count;
1323 count = page_counter_read(&memcg->memsw);
1324 limit = READ_ONCE(memcg->memsw.max);
1325 if (count < limit)
1326 margin = min(margin, limit - count);
1460 * 1) generic big picture -> specifics and details
1461 * 2) reflecting userspace activity -> reflecting kernel heuristics
1463 * Current memory state:
1520 * @memcg: The memory cgroup that went over limit
1532 pr_cont_cgroup_path(memcg->css.cgroup);
1545 * @memcg: The memory cgroup that went over limit
1555 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1556 K((u64)page_counter_read(&memcg->memory)),
1557 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1559 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1560 K((u64)page_counter_read(&memcg->swap)),
1561 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1564 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1565 K((u64)page_counter_read(&memcg->memsw)),
1566 K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1567 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1568 K((u64)page_counter_read(&memcg->kmem)),
1569 K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1574 pr_cont_cgroup_path(memcg->css.cgroup);
1582 * Return the memory (and swap, if configured) limit for a memcg.
1586 unsigned long max = READ_ONCE(memcg->memory.max);
1590 /* Calculate swap excess capacity from memsw limit */
1591 unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1597 max += min(READ_ONCE(memcg->swap.max),
1605 return page_counter_read(&memcg->memory);
1661 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1663 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1666 * by killing all belonging OOM-killable tasks.
1668 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1699 * highest-level memory cgroup with oom.group set.
1702 if (READ_ONCE(memcg->oom_group))
1710 css_get(&oom_group->css);
1720 pr_cont_cgroup_path(memcg->css.cgroup);
1749 * consume_stock: Try to consume stocked charge on this cpu.
1751 * @nr_pages: how many pages to charge.
1753 * The charges will only happen if @memcg matches the current cpu's memcg
1772 stock_pages = READ_ONCE(stock->nr_pages);
1773 if (memcg == READ_ONCE(stock->cached) && stock_pages >= nr_pages) {
1774 WRITE_ONCE(stock->nr_pages, stock_pages - nr_pages);
1788 unsigned int stock_pages = READ_ONCE(stock->nr_pages);
1789 struct mem_cgroup *old = READ_ONCE(stock->cached);
1795 page_counter_uncharge(&old->memory, stock_pages);
1797 page_counter_uncharge(&old->memsw, stock_pages);
1799 WRITE_ONCE(stock->nr_pages, 0);
1802 css_put(&old->css);
1803 WRITE_ONCE(stock->cached, NULL);
1822 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1838 if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
1840 css_get(&memcg->css);
1841 WRITE_ONCE(stock->cached, memcg);
1843 stock_pages = READ_ONCE(stock->nr_pages) + nr_pages;
1844 WRITE_ONCE(stock->nr_pages, stock_pages);
1860 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1871 * Notify other cpus that system-wide "drain" is running
1874 * per-cpu data. CPU up doesn't touch memcg_stock at all.
1884 memcg = READ_ONCE(stock->cached);
1885 if (memcg && READ_ONCE(stock->nr_pages) &&
1893 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1895 drain_local_stock(&stock->work);
1897 schedule_work_on(cpu, &stock->work);
1923 if (page_counter_read(&memcg->memory) <=
1924 READ_ONCE(memcg->memory.high))
1961 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
1963 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
1968 * reasonable delay curve compared to precision-adjusted overage, not
1970 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
1973 * +-------+------------------------+
1975 * +-------+------------------------+
1997 * +-------+------------------------+
2015 overage = usage - high;
2025 overage = calculate_overage(page_counter_read(&memcg->memory),
2026 READ_ONCE(memcg->memory.high));
2039 overage = calculate_overage(page_counter_read(&memcg->swap),
2040 READ_ONCE(memcg->swap.high));
2077 * N-sized allocations are throttled approximately the same as one
2078 * 4N-sized allocation.
2081 * larger the current charge patch is than that.
2087 * Reclaims memory over the high limit. Called directly from
2096 unsigned int nr_pages = current->memcg_nr_pages_over_high;
2104 memcg = get_mem_cgroup_from_mm(current->mm);
2105 current->memcg_nr_pages_over_high = 0;
2163 if (nr_reclaimed || nr_retries--) {
2169 * Reclaim didn't manage to push usage below the limit, slow
2174 * need to account for any ill-begotten jiffies to pay them off later.
2181 css_put(&memcg->css);
2203 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2204 if (page_counter_try_charge(&memcg->memory, batch, &counter))
2207 page_counter_uncharge(&memcg->memsw, batch);
2223 * under the limit over triggering OOM kills in these cases.
2225 if (unlikely(current->flags & PF_MEMALLOC))
2228 if (unlikely(task_in_memcg_oom(current)))
2254 * Even though the limit is exceeded at this point, reclaim
2255 * may have been able to free some pages. Retry the charge
2259 * unlikely to succeed so close to the limit, and we fall back
2265 if (nr_retries--)
2277 * a forward progress or bypass the charge if the oom killer
2294 return -ENOMEM;
2305 * being freed very soon. Allow memory usage go over the limit
2308 page_counter_charge(&memcg->memory, nr_pages);
2310 page_counter_charge(&memcg->memsw, nr_pages);
2316 refill_stock(memcg, batch - nr_pages);
2323 * not recorded as it most likely matches current's and won't
2324 * change in the meantime. As high limit is checked again before
2330 mem_high = page_counter_read(&memcg->memory) >
2331 READ_ONCE(memcg->memory.high);
2332 swap_high = page_counter_read(&memcg->swap) >
2333 READ_ONCE(memcg->swap.high);
2338 schedule_work(&memcg->high_work);
2350 * Target some best-effort fairness between the tasks,
2354 current->memcg_nr_pages_over_high += batch;
2355 set_notify_resume(current);
2367 if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2368 !(current->flags & PF_MEMALLOC) &&
2375 * mem_cgroup_cancel_charge() - cancel an uncommitted try_charge() call.
2384 page_counter_uncharge(&memcg->memory, nr_pages);
2386 page_counter_uncharge(&memcg->memsw, nr_pages);
2395 * - the page lock
2396 * - LRU isolation
2397 * - exclusive reference
2399 folio->memcg_data = (unsigned long)memcg;
2403 * mem_cgroup_commit_charge - commit a previously successful try_charge().
2404 * @folio: folio to commit the charge to.
2409 css_get(&memcg->css);
2432 * Slab objects are accounted individually, not per-page.
2434 * slab->obj_exts.
2446 off = obj_to_index(slab->slab_cache, slab, p);
2456 * slab->obj_exts has not been freed yet
2485 objcg = rcu_dereference(memcg->objcg);
2500 old = xchg(¤t->objcg, NULL);
2510 if (!current->mm || (current->flags & PF_KTHREAD))
2523 * Obtain the new objcg pointer. The current task can be
2530 memcg = mem_cgroup_from_task(current);
2539 } while (!try_cmpxchg(¤t->objcg, &old, objcg));
2550 memcg = current->active_memcg;
2554 objcg = READ_ONCE(current->objcg);
2559 * to use the objcg by the current task.
2579 objcg = rcu_dereference_check(memcg->objcg, 1);
2623 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
2624 memcg1_account_kmem(memcg, -nr_pages);
2627 css_put(&memcg->css);
2631 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
2632 * @objcg: object cgroup to charge
2634 * @nr_pages: number of pages to charge
2653 css_put(&memcg->css);
2659 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
2660 * @page: page to charge
2676 page->memcg_data = (unsigned long)objcg |
2700 folio->memcg_data = 0;
2720 if (READ_ONCE(stock->cached_objcg) != objcg) {
2723 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
2724 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
2725 WRITE_ONCE(stock->cached_objcg, objcg);
2726 stock->cached_pgdat = pgdat;
2727 } else if (stock->cached_pgdat != pgdat) {
2729 struct pglist_data *oldpg = stock->cached_pgdat;
2731 if (stock->nr_slab_reclaimable_b) {
2733 stock->nr_slab_reclaimable_b);
2734 stock->nr_slab_reclaimable_b = 0;
2736 if (stock->nr_slab_unreclaimable_b) {
2738 stock->nr_slab_unreclaimable_b);
2739 stock->nr_slab_unreclaimable_b = 0;
2741 stock->cached_pgdat = pgdat;
2744 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
2745 : &stock->nr_slab_unreclaimable_b;
2778 if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
2779 stock->nr_bytes -= nr_bytes;
2790 struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
2795 if (stock->nr_bytes) {
2796 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
2797 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
2804 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages);
2805 memcg1_account_kmem(memcg, -nr_pages);
2808 css_put(&memcg->css);
2812 * The leftover is flushed to the centralized per-memcg value.
2814 * to a per-cpu stock (probably, on an other CPU), see
2817 * How often it's flushed is a trade-off between the memory
2818 * limit enforcement accuracy and potential CPU contention,
2821 atomic_add(nr_bytes, &old->nr_charged_bytes);
2822 stock->nr_bytes = 0;
2826 * Flush the vmstat data in current stock
2828 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
2829 if (stock->nr_slab_reclaimable_b) {
2830 __mod_objcg_mlstate(old, stock->cached_pgdat,
2832 stock->nr_slab_reclaimable_b);
2833 stock->nr_slab_reclaimable_b = 0;
2835 if (stock->nr_slab_unreclaimable_b) {
2836 __mod_objcg_mlstate(old, stock->cached_pgdat,
2838 stock->nr_slab_unreclaimable_b);
2839 stock->nr_slab_unreclaimable_b = 0;
2841 stock->cached_pgdat = NULL;
2844 WRITE_ONCE(stock->cached_objcg, NULL);
2855 struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
2878 if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
2881 WRITE_ONCE(stock->cached_objcg, objcg);
2882 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
2883 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
2886 stock->nr_bytes += nr_bytes;
2888 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
2889 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
2890 stock->nr_bytes &= (PAGE_SIZE - 1);
2909 * In theory, objcg->nr_charged_bytes can have enough
2910 * pre-charged bytes to satisfy the allocation. However,
2911 * flushing objcg->nr_charged_bytes requires two atomic
2912 * operations, and objcg->nr_charged_bytes can't be big.
2913 * The shared objcg->nr_charged_bytes can also become a
2917 * objcg->nr_charged_bytes later on when objcg changes.
2919 * The stock's nr_bytes may contain enough pre-charged bytes
2921 * on the pre-charged bytes not being changed outside of
2923 * pre-charged bytes as well when charging pages. To avoid a
2924 * page uncharge right after a page charge, we set the
2926 * to temporarily allow the pre-charged bytes to exceed the page
2927 * size limit. The maximum reachable value of the pre-charged
2928 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
2932 nr_bytes = size & (PAGE_SIZE - 1);
2939 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
2953 * to store obj_cgroup membership. Charge it too.
2955 return s->size + sizeof(struct obj_cgroup *);
2967 * The obtained objcg pointer is safe to use within the current scope,
2968 * defined by current task or set_active_memcg() pair.
2992 css_put(&memcg->css);
3035 -obj_full_size(s));
3054 folio_page(folio, i)->memcg_data = folio->memcg_data;
3057 obj_cgroup_get_many(__folio_objcg(folio), old_nr / new_nr - 1);
3059 css_get_many(&folio_memcg(folio)->css, old_nr / new_nr - 1);
3074 val += total_swap_pages - get_nr_swap_pages();
3077 val = page_counter_read(&memcg->memory);
3079 val = page_counter_read(&memcg->memsw);
3096 return -ENOMEM;
3098 objcg->memcg = memcg;
3099 rcu_assign_pointer(memcg->objcg, objcg);
3101 memcg->orig_objcg = objcg;
3105 memcg->kmemcg_id = memcg->id.id;
3139 return wb_domain_init(&memcg->cgwb_domain, gfp);
3144 wb_domain_exit(&memcg->cgwb_domain);
3149 wb_domain_size_changed(&memcg->cgwb_domain);
3154 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3156 if (!memcg->css.parent)
3159 return &memcg->cgwb_domain;
3163 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3171 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3174 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3184 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3196 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
3197 READ_ONCE(memcg->memory.high));
3198 unsigned long used = page_counter_read(&memcg->memory);
3200 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3209 * tracks ownership per-page while the latter per-inode. This was a
3210 * deliberate design decision because honoring per-page ownership in the
3212 * and deemed unnecessary given that write-sharing an inode across
3213 * different cgroups isn't a common use-case.
3215 * Combined with inode majority-writer ownership switching, this works well
3236 * page - a page whose memcg and writeback ownerships don't match - is
3242 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
3256 int oldest = -1;
3267 frn = &memcg->cgwb_frn[i];
3268 if (frn->bdi_id == wb->bdi->id &&
3269 frn->memcg_id == wb->memcg_css->id)
3271 if (time_before64(frn->at, oldest_at) &&
3272 atomic_read(&frn->done.cnt) == 1) {
3274 oldest_at = frn->at;
3280 * Re-using an existing one. Update timestamp lazily to
3282 * reasonably up-to-date and significantly shorter than
3290 if (time_before64(frn->at, now - update_intv))
3291 frn->at = now;
3294 frn = &memcg->cgwb_frn[oldest];
3295 frn->bdi_id = wb->bdi->id;
3296 frn->memcg_id = wb->memcg_css->id;
3297 frn->at = now;
3304 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3310 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
3318 if (time_after64(frn->at, now - intv) &&
3319 atomic_read(&frn->done.cnt) == 1) {
3320 frn->at = 0;
3321 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
3322 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
3324 &frn->done);
3349 * Swap-out records and page cache shadow entries need to store memcg
3352 * memory-controlled cgroups to 64k.
3359 * even when there are much fewer than 64k cgroups - possibly none.
3361 * Maintain a private 16-bit ID space for memcg, and allow the ID to
3370 #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
3375 if (memcg->id.id > 0) {
3376 xa_erase(&mem_cgroup_ids, memcg->id.id);
3377 memcg->id.id = 0;
3384 refcount_add(n, &memcg->id.ref);
3389 if (refcount_sub_and_test(n, &memcg->id.ref)) {
3393 css_put(&memcg->css);
3403 * mem_cgroup_from_id - look up a memcg from a memcg id
3429 memcg = ERR_PTR(-ENOENT);
3445 pn->lruvec_stats = kzalloc_node(sizeof(struct lruvec_stats),
3447 if (!pn->lruvec_stats)
3450 pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
3452 if (!pn->lruvec_stats_percpu)
3455 lruvec_init(&pn->lruvec);
3456 pn->memcg = memcg;
3458 memcg->nodeinfo[node] = pn;
3461 kfree(pn->lruvec_stats);
3468 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
3473 free_percpu(pn->lruvec_stats_percpu);
3474 kfree(pn->lruvec_stats);
3482 obj_cgroup_put(memcg->orig_objcg);
3487 kfree(memcg->vmstats);
3488 free_percpu(memcg->vmstats_percpu);
3509 return ERR_PTR(-ENOMEM);
3511 error = xa_alloc(&mem_cgroup_ids, &memcg->id.id, NULL,
3515 error = -ENOMEM;
3517 memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats),
3519 if (!memcg->vmstats)
3522 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
3524 if (!memcg->vmstats_percpu)
3532 pstatc = per_cpu_ptr(parent->vmstats_percpu, cpu);
3533 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
3534 statc->parent = parent ? pstatc : NULL;
3535 statc->vmstats = memcg->vmstats;
3545 INIT_WORK(&memcg->high_work, high_work_func);
3546 vmpressure_init(&memcg->vmpressure);
3547 INIT_LIST_HEAD(&memcg->memory_peaks);
3548 INIT_LIST_HEAD(&memcg->swap_peaks);
3549 spin_lock_init(&memcg->peaks_lock);
3550 memcg->socket_pressure = jiffies;
3552 memcg->kmemcg_id = -1;
3553 INIT_LIST_HEAD(&memcg->objcg_list);
3555 INIT_LIST_HEAD(&memcg->cgwb_list);
3557 memcg->cgwb_frn[i].done =
3561 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
3562 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
3563 memcg->deferred_split_queue.split_queue_len = 0;
3585 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3588 memcg->zswap_max = PAGE_COUNTER_MAX;
3589 WRITE_ONCE(memcg->zswap_writeback, true);
3591 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3593 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
3595 page_counter_init(&memcg->memory, &parent->memory, true);
3596 page_counter_init(&memcg->swap, &parent->swap, false);
3598 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
3599 page_counter_init(&memcg->kmem, &parent->kmem, false);
3600 page_counter_init(&memcg->tcpmem, &parent->tcpmem, false);
3605 page_counter_init(&memcg->memory, NULL, true);
3606 page_counter_init(&memcg->swap, NULL, false);
3608 page_counter_init(&memcg->kmem, NULL, false);
3609 page_counter_init(&memcg->tcpmem, NULL, false);
3612 return &memcg->css;
3621 return &memcg->css;
3645 refcount_set(&memcg->id.ref, 1);
3658 xa_store(&mem_cgroup_ids, memcg->id.id, memcg, GFP_KERNEL);
3665 return -ENOMEM;
3674 page_counter_set_min(&memcg->memory, 0);
3675 page_counter_set_low(&memcg->memory, 0);
3704 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
3715 vmpressure_cleanup(&memcg->vmpressure);
3716 cancel_work_sync(&memcg->high_work);
3723 * mem_cgroup_css_reset - reset the states of a mem_cgroup
3732 * The current implementation only resets the essential configurations.
3739 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
3740 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
3742 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
3743 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
3745 page_counter_set_min(&memcg->memory, 0);
3746 page_counter_set_low(&memcg->memory, 0);
3747 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
3749 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
3756 /* pointer to the non-hierarchichal (CPU aggregated) counters */
3775 for (i = 0; i < ac->size; i++) {
3778 * below us. We're in a per-cpu loop here and this is
3781 delta = ac->pending[i];
3783 ac->pending[i] = 0;
3787 v = READ_ONCE(ac->cstat[i]);
3788 if (v != ac->cstat_prev[i]) {
3789 delta_cpu = v - ac->cstat_prev[i];
3791 ac->cstat_prev[i] = v;
3796 ac->local[i] += delta_cpu;
3799 ac->aggregate[i] += delta;
3800 if (ac->ppending)
3801 ac->ppending[i] += delta;
3814 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
3817 .aggregate = memcg->vmstats->state,
3818 .local = memcg->vmstats->state_local,
3819 .pending = memcg->vmstats->state_pending,
3820 .ppending = parent ? parent->vmstats->state_pending : NULL,
3821 .cstat = statc->state,
3822 .cstat_prev = statc->state_prev,
3828 .aggregate = memcg->vmstats->events,
3829 .local = memcg->vmstats->events_local,
3830 .pending = memcg->vmstats->events_pending,
3831 .ppending = parent ? parent->vmstats->events_pending : NULL,
3832 .cstat = statc->events,
3833 .cstat_prev = statc->events_prev,
3839 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
3840 struct lruvec_stats *lstats = pn->lruvec_stats;
3845 plstats = parent->nodeinfo[nid]->lruvec_stats;
3847 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
3850 .aggregate = lstats->state,
3851 .local = lstats->state_local,
3852 .pending = lstats->state_pending,
3853 .ppending = plstats ? plstats->state_pending : NULL,
3854 .cstat = lstatc->state,
3855 .cstat_prev = lstatc->state_prev,
3861 WRITE_ONCE(statc->stats_updates, 0);
3862 /* We are in a per-cpu loop here, only do the atomic write once */
3863 if (atomic64_read(&memcg->vmstats->stats_updates))
3864 atomic64_set(&memcg->vmstats->stats_updates, 0);
3870 * Set the update flag to cause task->objcg to be initialized lazily
3872 * because it's always performed on the current task, so does
3875 task->objcg = (struct obj_cgroup *)CURRENT_OBJCG_UPDATE_FLAG;
3880 struct obj_cgroup *objcg = task->objcg;
3889 * because it's always performed on the current task, so does
3892 task->objcg = NULL;
3909 if (task->mm && READ_ONCE(task->mm->owner) == task)
3910 lru_gen_migrate_mm(task->mm);
3924 set_bit(CURRENT_OBJCG_UPDATE_BIT, (unsigned long *)&task->objcg);
3949 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
3952 #define OFP_PEAK_UNSET (((-1UL)))
3956 struct cgroup_of_peak *ofp = of_peak(sf->private);
3957 u64 fd_peak = READ_ONCE(ofp->value), peak;
3961 peak = pc->watermark;
3963 peak = max(fd_peak, READ_ONCE(pc->local_watermark));
3973 return peak_show(sf, v, &memcg->memory);
3980 ofp->value = OFP_PEAK_UNSET;
3989 if (ofp->value == OFP_PEAK_UNSET) {
3993 spin_lock(&memcg->peaks_lock);
3994 list_del(&ofp->list);
3995 spin_unlock(&memcg->peaks_lock);
4007 spin_lock(&memcg->peaks_lock);
4010 WRITE_ONCE(pc->local_watermark, usage);
4013 if (usage > peer_ctx->value)
4014 WRITE_ONCE(peer_ctx->value, usage);
4017 if (ofp->value == -1)
4018 list_add(&ofp->list, watchers);
4020 WRITE_ONCE(ofp->value, usage);
4021 spin_unlock(&memcg->peaks_lock);
4031 return peak_write(of, buf, nbytes, off, &memcg->memory,
4032 &memcg->memory_peaks);
4040 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
4055 page_counter_set_min(&memcg->memory, min);
4063 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
4078 page_counter_set_low(&memcg->memory, low);
4086 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
4103 page_counter_set_high(&memcg->memory, high);
4106 unsigned long nr_pages = page_counter_read(&memcg->memory);
4112 if (signal_pending(current))
4121 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
4124 if (!reclaimed && !nr_retries--)
4135 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
4152 xchg(&memcg->memory.max, max);
4155 unsigned long nr_pages = page_counter_read(&memcg->memory);
4160 if (signal_pending(current))
4170 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
4172 nr_reclaims--;
4205 __memory_events_show(m, memcg->memory_events);
4213 __memory_events_show(m, memcg->memory_events_local);
4224 return -ENOMEM;
4274 seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
4287 return -EINVAL;
4294 return -EINVAL;
4296 WRITE_ONCE(memcg->oom_group, oom_group);
4317 int swappiness = -1;
4327 return -EINVAL;
4337 return -EINVAL;
4339 return -EINVAL;
4342 return -EINVAL;
4349 unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4;
4352 if (signal_pending(current))
4353 return -EINTR;
4366 swappiness == -1 ? NULL : &swappiness);
4368 if (!reclaimed && !nr_retries--)
4369 return -EAGAIN;
4379 .name = "current",
4470 * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
4471 * @root: the top ancestor of the sub-tree being checked
4475 * of a top-down tree iteration, not for isolated queries.
4489 page_counter_calculate_protection(&root->memory, &memcg->memory, recursive_protection);
4513 css_put(&memcg->css);
4519 * mem_cgroup_hugetlb_try_charge - try to charge the memcg for a hugetlb folio
4520 * @memcg: memcg to charge.
4522 * @nr_pages: number of pages to charge.
4525 * the memcg has the capacity for it. It does not commit the charge yet,
4529 * mem_cgroup_commit_charge() to commit the charge. If we fail to obtain the
4540 * but do not attempt to commit charge later (or cancel on error) either.
4545 return -EOPNOTSUPP;
4548 return -ENOMEM;
4554 * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
4555 * @folio: folio to charge.
4578 if (!memcg || !css_tryget_online(&memcg->css))
4584 css_put(&memcg->css);
4589 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
4608 * so this is a non-issue here. Memory and swap charge lifetimes
4609 * correspond 1:1 to page and swap slot lifetimes: we charge the
4616 * memory+swap charge, drop the swap entry duplicate.
4637 if (ug->nr_memory) {
4638 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
4640 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
4641 if (ug->nr_kmem) {
4642 mod_memcg_state(ug->memcg, MEMCG_KMEM, -ug->nr_kmem);
4643 memcg1_account_kmem(ug->memcg, -ug->nr_kmem);
4645 memcg1_oom_recover(ug->memcg);
4648 memcg1_uncharge_batch(ug->memcg, ug->pgpgout, ug->nr_memory, ug->nid);
4651 css_put(&ug->memcg->css);
4681 if (ug->memcg != memcg) {
4682 if (ug->memcg) {
4686 ug->memcg = memcg;
4687 ug->nid = folio_nid(folio);
4690 css_get(&memcg->css);
4696 ug->nr_memory += nr_pages;
4697 ug->nr_kmem += nr_pages;
4699 folio->memcg_data = 0;
4704 ug->nr_memory += nr_pages;
4705 ug->pgpgout++;
4708 folio->memcg_data = 0;
4711 css_put(&memcg->css);
4718 /* Don't touch folio->lru of any random page, pre-check: */
4733 for (i = 0; i < folios->nr; i++)
4734 uncharge_folio(folios->folios[i], &ug);
4740 * mem_cgroup_replace_folio - Charge a folio's replacement.
4744 * Charge @new as a replacement folio for @old. @old will
4747 * Both folios must be locked, @new->mapping must be set up.
4771 /* Force-charge the new page. The old one will be freed soon */
4773 page_counter_charge(&memcg->memory, nr_pages);
4775 page_counter_charge(&memcg->memsw, nr_pages);
4778 css_get(&memcg->css);
4784 * mem_cgroup_migrate - Transfer the memcg data from the old to the new folio.
4792 * Both folios must be locked, @new->mapping must be set up.
4817 /* Transfer the charge and the css ref */
4820 /* Warning should never happen, so don't worry about refcount non-0 */
4822 old->memcg_data = 0;
4840 memcg = mem_cgroup_from_task(current);
4845 if (css_tryget(&memcg->css))
4846 sk->sk_memcg = memcg;
4853 if (sk->sk_memcg)
4854 css_put(&sk->sk_memcg->css);
4858 * mem_cgroup_charge_skmem - charge socket memory
4859 * @memcg: memcg to charge
4860 * @nr_pages: number of pages to charge
4863 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
4864 * @memcg's configured limit, %false if it doesn't.
4881 * mem_cgroup_uncharge_skmem - uncharge socket memory
4892 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
4919 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
4929 * used for per-memcg-per-cpu caching of per-node statistics. In order
4939 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
4949 while (!refcount_inc_not_zero(&memcg->id.ref)) {
4966 * mem_cgroup_swapout - transfer a memsw charge to swap
4967 * @folio: folio whose memsw charge to transfer
4968 * @entry: swap entry to move the charge to
4970 * Transfer the memsw charge of @folio to @entry.
4995 * have an ID allocated to it anymore, charge the closest online
4996 * ancestor for the swap instead and transfer the memory+swap charge.
5002 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
5009 folio->memcg_data = 0;
5012 page_counter_uncharge(&memcg->memory, nr_entries);
5016 page_counter_charge(&swap_memcg->memsw, nr_entries);
5017 page_counter_uncharge(&memcg->memsw, nr_entries);
5021 css_put(&memcg->css);
5025 * __mem_cgroup_try_charge_swap - try charging swap space for a folio
5027 * @entry: swap entry to charge
5029 * Try to charge @folio's memcg for the swap space at @entry.
5031 * Returns 0 on success, -ENOMEM on failure.
5057 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
5061 return -ENOMEM;
5066 mem_cgroup_id_get_many(memcg, nr_pages - 1);
5075 * __mem_cgroup_uncharge_swap - uncharge swap space
5090 page_counter_uncharge(&memcg->memsw, nr_pages);
5092 page_counter_uncharge(&memcg->swap, nr_pages);
5094 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
5108 READ_ONCE(memcg->swap.max) -
5109 page_counter_read(&memcg->swap));
5129 unsigned long usage = page_counter_read(&memcg->swap);
5131 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
5132 usage * 2 >= READ_ONCE(memcg->swap.max))
5146 "Please report your usecase to linux-mm@kvack.org if you "
5157 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
5164 return peak_show(sf, v, &memcg->swap);
5172 return peak_write(of, buf, nbytes, off, &memcg->swap,
5173 &memcg->swap_peaks);
5179 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
5194 page_counter_set_high(&memcg->swap, high);
5202 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
5217 xchg(&memcg->swap.max, max);
5227 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
5229 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
5231 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
5238 .name = "swap.current",
5273 * obj_cgroup_may_zswap - check if this cgroup can zswap
5276 * Check if the hierarchical zswap limit has been reached.
5280 * once compression has occurred, and this optimistic pre-check avoids
5295 unsigned long max = READ_ONCE(memcg->zswap_max);
5318 * obj_cgroup_charge_zswap - charge compression backend memory
5322 * This forces the charge after obj_cgroup_may_zswap() allowed
5332 VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
5346 * obj_cgroup_uncharge_zswap - uncharge compression backend memory
5363 mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
5364 mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
5375 if (!READ_ONCE(memcg->zswap_writeback))
5393 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
5408 xchg(&memcg->zswap_max, max);
5417 seq_printf(m, "%d\n", READ_ONCE(memcg->zswap_writeback));
5432 return -EINVAL;
5434 WRITE_ONCE(memcg->zswap_writeback, zswap_writeback);
5440 .name = "zswap.current",