Lines Matching +full:atomic +full:- +full:threshold +full:- +full:us
1 // SPDX-License-Identifier: GPL-2.0-or-later
7 #include <linux/backing-dev.h>
17 #include "memcontrol-v1.h"
20 * Cgroups above their limits are maintained in a RB-Tree, independent of
113 struct rb_node **p = &mctz->rb_root.rb_node; in __mem_cgroup_insert_exceeded()
118 if (mz->on_tree) in __mem_cgroup_insert_exceeded()
121 mz->usage_in_excess = new_usage_in_excess; in __mem_cgroup_insert_exceeded()
122 if (!mz->usage_in_excess) in __mem_cgroup_insert_exceeded()
128 if (mz->usage_in_excess < mz_node->usage_in_excess) { in __mem_cgroup_insert_exceeded()
129 p = &(*p)->rb_left; in __mem_cgroup_insert_exceeded()
132 p = &(*p)->rb_right; in __mem_cgroup_insert_exceeded()
137 mctz->rb_rightmost = &mz->tree_node; in __mem_cgroup_insert_exceeded()
139 rb_link_node(&mz->tree_node, parent, p); in __mem_cgroup_insert_exceeded()
140 rb_insert_color(&mz->tree_node, &mctz->rb_root); in __mem_cgroup_insert_exceeded()
141 mz->on_tree = true; in __mem_cgroup_insert_exceeded()
147 if (!mz->on_tree) in __mem_cgroup_remove_exceeded()
150 if (&mz->tree_node == mctz->rb_rightmost) in __mem_cgroup_remove_exceeded()
151 mctz->rb_rightmost = rb_prev(&mz->tree_node); in __mem_cgroup_remove_exceeded()
153 rb_erase(&mz->tree_node, &mctz->rb_root); in __mem_cgroup_remove_exceeded()
154 mz->on_tree = false; in __mem_cgroup_remove_exceeded()
162 spin_lock_irqsave(&mctz->lock, flags); in mem_cgroup_remove_exceeded()
164 spin_unlock_irqrestore(&mctz->lock, flags); in mem_cgroup_remove_exceeded()
169 unsigned long nr_pages = page_counter_read(&memcg->memory); in soft_limit_excess()
170 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); in soft_limit_excess()
174 excess = nr_pages - soft_limit; in soft_limit_excess()
199 mz = memcg->nodeinfo[nid]; in memcg1_update_tree()
202 * We have to update the tree if mz is on RB-tree or in memcg1_update_tree()
205 if (excess || mz->on_tree) { in memcg1_update_tree()
208 spin_lock_irqsave(&mctz->lock, flags); in memcg1_update_tree()
209 /* if on-tree, remove it */ in memcg1_update_tree()
210 if (mz->on_tree) in memcg1_update_tree()
213 * Insert again. mz->usage_in_excess will be updated. in memcg1_update_tree()
217 spin_unlock_irqrestore(&mctz->lock, flags); in memcg1_update_tree()
229 mz = memcg->nodeinfo[nid]; in memcg1_remove_from_trees()
243 if (!mctz->rb_rightmost) in __mem_cgroup_largest_soft_limit_node()
246 mz = rb_entry(mctz->rb_rightmost, in __mem_cgroup_largest_soft_limit_node()
254 if (!soft_limit_excess(mz->memcg) || in __mem_cgroup_largest_soft_limit_node()
255 !css_tryget(&mz->memcg->css)) in __mem_cgroup_largest_soft_limit_node()
266 spin_lock_irq(&mctz->lock); in mem_cgroup_largest_soft_limit_node()
268 spin_unlock_irq(&mctz->lock); in mem_cgroup_largest_soft_limit_node()
339 mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id]; in memcg1_soft_limit_reclaim()
346 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root)) in memcg1_soft_limit_reclaim()
362 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat, in memcg1_soft_limit_reclaim()
365 spin_lock_irq(&mctz->lock); in memcg1_soft_limit_reclaim()
375 excess = soft_limit_excess(mz->memcg); in memcg1_soft_limit_reclaim()
386 spin_unlock_irq(&mctz->lock); in memcg1_soft_limit_reclaim()
387 css_put(&mz->memcg->css); in memcg1_soft_limit_reclaim()
400 css_put(&next_mz->memcg->css); in memcg1_soft_limit_reclaim()
415 "Please report your usecase to linux-mm@kvack.org if you " in mem_cgroup_move_charge_write()
419 return -EINVAL; in mem_cgroup_move_charge_write()
426 return -ENOSYS; in mem_cgroup_move_charge_write()
438 t = rcu_dereference(memcg->thresholds.primary); in __mem_cgroup_threshold()
440 t = rcu_dereference(memcg->memsw_thresholds.primary); in __mem_cgroup_threshold()
448 * current_threshold points to threshold just below or equal to usage. in __mem_cgroup_threshold()
449 * If it's not true, a threshold was crossed after last in __mem_cgroup_threshold()
452 i = t->current_threshold; in __mem_cgroup_threshold()
456 * current_threshold and check if a threshold is crossed. in __mem_cgroup_threshold()
460 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--) in __mem_cgroup_threshold()
461 eventfd_signal(t->entries[i].eventfd); in __mem_cgroup_threshold()
468 * current_threshold+1 and check if a threshold is crossed. in __mem_cgroup_threshold()
472 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++) in __mem_cgroup_threshold()
473 eventfd_signal(t->entries[i].eventfd); in __mem_cgroup_threshold()
476 t->current_threshold = i - 1; in __mem_cgroup_threshold()
492 /* Cgroup1: threshold notifications & softlimit tree updates */
518 nr_pages = -nr_pages; /* for event */ in memcg1_charge_statistics()
521 __this_cpu_add(memcg->events_percpu->nr_page_events, nr_pages); in memcg1_charge_statistics()
532 val = __this_cpu_read(memcg->events_percpu->nr_page_events); in memcg1_event_ratelimit()
533 next = __this_cpu_read(memcg->events_percpu->targets[target]); in memcg1_event_ratelimit()
535 if ((long)(next - val) < 0) { in memcg1_event_ratelimit()
546 __this_cpu_write(memcg->events_percpu->targets[target], next); in memcg1_event_ratelimit()
561 /* threshold event is triggered in finer grain than soft limit */ in memcg1_check_events()
585 * memcg1_swapout - transfer a memsw charge to swap
620 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); in memcg1_swapout()
626 folio->memcg_data = 0; in memcg1_swapout()
629 page_counter_uncharge(&memcg->memory, nr_entries); in memcg1_swapout()
633 page_counter_charge(&swap_memcg->memsw, nr_entries); in memcg1_swapout()
634 page_counter_uncharge(&memcg->memsw, nr_entries); in memcg1_swapout()
639 * i_pages lock which is taken with interrupts-off. It is in memcg1_swapout()
641 * only synchronisation we have for updating the per-CPU variables. in memcg1_swapout()
645 memcg1_charge_statistics(memcg, -folio_nr_pages(folio)); in memcg1_swapout()
649 css_put(&memcg->css); in memcg1_swapout()
653 * memcg1_swapin - uncharge swap slot
672 * so this is a non-issue here. Memory and swap charge lifetimes in memcg1_swapin()
693 __this_cpu_add(memcg->events_percpu->nr_page_events, nr_memory); in memcg1_uncharge_batch()
703 if (_a->threshold > _b->threshold) in compare_thresholds()
706 if (_a->threshold < _b->threshold) in compare_thresholds()
707 return -1; in compare_thresholds()
718 list_for_each_entry(ev, &memcg->oom_notify, list) in mem_cgroup_oom_notify_cb()
719 eventfd_signal(ev->eventfd); in mem_cgroup_oom_notify_cb()
738 unsigned long threshold; in __mem_cgroup_usage_register_event() local
742 ret = page_counter_memparse(args, "-1", &threshold); in __mem_cgroup_usage_register_event()
746 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
749 thresholds = &memcg->thresholds; in __mem_cgroup_usage_register_event()
752 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_register_event()
757 /* Check if a threshold crossed before adding a new one */ in __mem_cgroup_usage_register_event()
758 if (thresholds->primary) in __mem_cgroup_usage_register_event()
761 size = thresholds->primary ? thresholds->primary->size + 1 : 1; in __mem_cgroup_usage_register_event()
766 ret = -ENOMEM; in __mem_cgroup_usage_register_event()
769 new->size = size; in __mem_cgroup_usage_register_event()
772 if (thresholds->primary) in __mem_cgroup_usage_register_event()
773 memcpy(new->entries, thresholds->primary->entries, in __mem_cgroup_usage_register_event()
774 flex_array_size(new, entries, size - 1)); in __mem_cgroup_usage_register_event()
776 /* Add new threshold */ in __mem_cgroup_usage_register_event()
777 new->entries[size - 1].eventfd = eventfd; in __mem_cgroup_usage_register_event()
778 new->entries[size - 1].threshold = threshold; in __mem_cgroup_usage_register_event()
780 /* Sort thresholds. Registering of new threshold isn't time-critical */ in __mem_cgroup_usage_register_event()
781 sort(new->entries, size, sizeof(*new->entries), in __mem_cgroup_usage_register_event()
784 /* Find current threshold */ in __mem_cgroup_usage_register_event()
785 new->current_threshold = -1; in __mem_cgroup_usage_register_event()
787 if (new->entries[i].threshold <= usage) { in __mem_cgroup_usage_register_event()
789 * new->current_threshold will not be used until in __mem_cgroup_usage_register_event()
793 ++new->current_threshold; in __mem_cgroup_usage_register_event()
799 kfree(thresholds->spare); in __mem_cgroup_usage_register_event()
800 thresholds->spare = thresholds->primary; in __mem_cgroup_usage_register_event()
802 rcu_assign_pointer(thresholds->primary, new); in __mem_cgroup_usage_register_event()
808 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_register_event()
833 mutex_lock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
836 thresholds = &memcg->thresholds; in __mem_cgroup_usage_unregister_event()
839 thresholds = &memcg->memsw_thresholds; in __mem_cgroup_usage_unregister_event()
844 if (!thresholds->primary) in __mem_cgroup_usage_unregister_event()
847 /* Check if a threshold crossed before removing */ in __mem_cgroup_usage_unregister_event()
850 /* Calculate new number of threshold */ in __mem_cgroup_usage_unregister_event()
852 for (i = 0; i < thresholds->primary->size; i++) { in __mem_cgroup_usage_unregister_event()
853 if (thresholds->primary->entries[i].eventfd != eventfd) in __mem_cgroup_usage_unregister_event()
859 new = thresholds->spare; in __mem_cgroup_usage_unregister_event()
872 new->size = size; in __mem_cgroup_usage_unregister_event()
874 /* Copy thresholds and find current threshold */ in __mem_cgroup_usage_unregister_event()
875 new->current_threshold = -1; in __mem_cgroup_usage_unregister_event()
876 for (i = 0, j = 0; i < thresholds->primary->size; i++) { in __mem_cgroup_usage_unregister_event()
877 if (thresholds->primary->entries[i].eventfd == eventfd) in __mem_cgroup_usage_unregister_event()
880 new->entries[j] = thresholds->primary->entries[i]; in __mem_cgroup_usage_unregister_event()
881 if (new->entries[j].threshold <= usage) { in __mem_cgroup_usage_unregister_event()
883 * new->current_threshold will not be used in __mem_cgroup_usage_unregister_event()
887 ++new->current_threshold; in __mem_cgroup_usage_unregister_event()
894 thresholds->spare = thresholds->primary; in __mem_cgroup_usage_unregister_event()
896 rcu_assign_pointer(thresholds->primary, new); in __mem_cgroup_usage_unregister_event()
903 kfree(thresholds->spare); in __mem_cgroup_usage_unregister_event()
904 thresholds->spare = NULL; in __mem_cgroup_usage_unregister_event()
907 mutex_unlock(&memcg->thresholds_lock); in __mem_cgroup_usage_unregister_event()
929 return -ENOMEM; in mem_cgroup_oom_register_event()
933 event->eventfd = eventfd; in mem_cgroup_oom_register_event()
934 list_add(&event->list, &memcg->oom_notify); in mem_cgroup_oom_register_event()
937 if (memcg->under_oom) in mem_cgroup_oom_register_event()
951 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) { in mem_cgroup_oom_unregister_event()
952 if (ev->eventfd == eventfd) { in mem_cgroup_oom_unregister_event()
953 list_del(&ev->list); in mem_cgroup_oom_unregister_event()
966 * This is way over-engineered. It tries to support fully configurable
983 struct mem_cgroup *memcg = event->memcg; in memcg_event_remove()
985 remove_wait_queue(event->wqh, &event->wait); in memcg_event_remove()
987 event->unregister_event(memcg, event->eventfd); in memcg_event_remove()
990 eventfd_signal(event->eventfd); in memcg_event_remove()
992 eventfd_ctx_put(event->eventfd); in memcg_event_remove()
994 css_put(&memcg->css); in memcg_event_remove()
1000 * Called with wqh->lock held and interrupts disabled.
1007 struct mem_cgroup *memcg = event->memcg; in memcg_event_wake()
1014 * for us. in memcg_event_wake()
1017 * side will require wqh->lock via remove_wait_queue(), in memcg_event_wake()
1020 spin_lock(&memcg->event_list_lock); in memcg_event_wake()
1021 if (!list_empty(&event->list)) { in memcg_event_wake()
1022 list_del_init(&event->list); in memcg_event_wake()
1024 * We are in atomic context, but cgroup_event_remove() in memcg_event_wake()
1027 schedule_work(&event->remove); in memcg_event_wake()
1029 spin_unlock(&memcg->event_list_lock); in memcg_event_wake()
1041 event->wqh = wqh; in memcg_event_ptable_queue_proc()
1042 add_wait_queue(wqh, &event->wait); in memcg_event_ptable_queue_proc()
1067 return -EOPNOTSUPP; in memcg_write_event_control()
1073 return -EINVAL; in memcg_write_event_control()
1082 return -EINVAL; in memcg_write_event_control()
1086 return -EBADF; in memcg_write_event_control()
1092 return -ENOMEM; in memcg_write_event_control()
1094 event->memcg = memcg; in memcg_write_event_control()
1095 INIT_LIST_HEAD(&event->list); in memcg_write_event_control()
1096 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc); in memcg_write_event_control()
1097 init_waitqueue_func_entry(&event->wait, memcg_event_wake); in memcg_write_event_control()
1098 INIT_WORK(&event->remove, memcg_event_remove); in memcg_write_event_control()
1100 event->eventfd = eventfd_ctx_fileget(fd_file(efile)); in memcg_write_event_control()
1101 if (IS_ERR(event->eventfd)) { in memcg_write_event_control()
1102 ret = PTR_ERR(event->eventfd); in memcg_write_event_control()
1107 ret = -EBADF; in memcg_write_event_control()
1121 cdentry = fd_file(cfile)->f_path.dentry; in memcg_write_event_control()
1122 if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) { in memcg_write_event_control()
1123 ret = -EINVAL; in memcg_write_event_control()
1135 name = cdentry->d_name.name; in memcg_write_event_control()
1138 event->register_event = mem_cgroup_usage_register_event; in memcg_write_event_control()
1139 event->unregister_event = mem_cgroup_usage_unregister_event; in memcg_write_event_control()
1142 "Please report your usecase to linux-mm-@kvack.org" in memcg_write_event_control()
1144 event->register_event = mem_cgroup_oom_register_event; in memcg_write_event_control()
1145 event->unregister_event = mem_cgroup_oom_unregister_event; in memcg_write_event_control()
1148 "Please report your usecase to linux-mm-@kvack.org " in memcg_write_event_control()
1150 event->register_event = vmpressure_register_event; in memcg_write_event_control()
1151 event->unregister_event = vmpressure_unregister_event; in memcg_write_event_control()
1153 event->register_event = memsw_cgroup_usage_register_event; in memcg_write_event_control()
1154 event->unregister_event = memsw_cgroup_usage_unregister_event; in memcg_write_event_control()
1156 ret = -EINVAL; in memcg_write_event_control()
1165 cfile_css = css_tryget_online_from_dir(cdentry->d_parent, in memcg_write_event_control()
1167 ret = -EINVAL; in memcg_write_event_control()
1173 ret = event->register_event(memcg, event->eventfd, buf); in memcg_write_event_control()
1177 vfs_poll(fd_file(efile), &event->pt); in memcg_write_event_control()
1179 spin_lock_irq(&memcg->event_list_lock); in memcg_write_event_control()
1180 list_add(&event->list, &memcg->event_list); in memcg_write_event_control()
1181 spin_unlock_irq(&memcg->event_list_lock); in memcg_write_event_control()
1187 eventfd_ctx_put(event->eventfd); in memcg_write_event_control()
1195 INIT_LIST_HEAD(&memcg->oom_notify); in memcg1_memcg_init()
1196 mutex_init(&memcg->thresholds_lock); in memcg1_memcg_init()
1197 INIT_LIST_HEAD(&memcg->event_list); in memcg1_memcg_init()
1198 spin_lock_init(&memcg->event_list_lock); in memcg1_memcg_init()
1210 spin_lock_irq(&memcg->event_list_lock); in memcg1_css_offline()
1211 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) { in memcg1_css_offline()
1212 list_del_init(&event->list); in memcg1_css_offline()
1213 schedule_work(&event->remove); in memcg1_css_offline()
1215 spin_unlock_irq(&memcg->event_list_lock); in memcg1_css_offline()
1219 * Check OOM-Killer is already running under our hierarchy.
1229 if (iter->oom_lock) { in mem_cgroup_oom_trylock()
1238 iter->oom_lock = true; in mem_cgroup_oom_trylock()
1251 iter->oom_lock = false; in mem_cgroup_oom_trylock()
1268 iter->oom_lock = false; in mem_cgroup_oom_unlock()
1278 iter->under_oom++; in mem_cgroup_mark_under_oom()
1292 if (iter->under_oom > 0) in mem_cgroup_unmark_under_oom()
1293 iter->under_oom--; in mem_cgroup_unmark_under_oom()
1312 oom_wait_memcg = oom_wait_info->memcg; in memcg_oom_wake_function()
1323 * For the following lockless ->under_oom test, the only required in memcg1_oom_recover()
1330 if (memcg && memcg->under_oom) in memcg1_oom_recover()
1335 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1353 struct mem_cgroup *memcg = current->memcg_in_oom; in mem_cgroup_oom_synchronize()
1385 current->memcg_in_oom = NULL; in mem_cgroup_oom_synchronize()
1386 css_put(&memcg->css); in mem_cgroup_oom_synchronize()
1403 * On the other hand, in-kernel OOM killer allows for an async victim in memcg1_oom_prepare()
1411 if (READ_ONCE(memcg->oom_kill_disable)) { in memcg1_oom_prepare()
1412 if (current->in_user_fault) { in memcg1_oom_prepare()
1413 css_get(&memcg->css); in memcg1_oom_prepare()
1414 current->memcg_in_oom = memcg; in memcg1_oom_prepare()
1446 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory; in mem_cgroup_resize_max()
1450 ret = -EINTR; in mem_cgroup_resize_max()
1459 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) : in mem_cgroup_resize_max()
1460 max <= memcg->memsw.max; in mem_cgroup_resize_max()
1463 ret = -EINVAL; in mem_cgroup_resize_max()
1466 if (max > counter->max) in mem_cgroup_resize_max()
1482 ret = -EBUSY; in mem_cgroup_resize_max()
1502 /* we call try-to-free pages for make this cgroup empty */ in mem_cgroup_force_empty()
1508 while (nr_retries && page_counter_read(&memcg->memory)) { in mem_cgroup_force_empty()
1510 return -EINTR; in mem_cgroup_force_empty()
1514 nr_retries--; in mem_cgroup_force_empty()
1527 return -EINVAL; in mem_cgroup_force_empty_write()
1543 pr_warn_once("Non-hierarchical mode is deprecated. " in mem_cgroup_hierarchy_write()
1544 "Please report your usecase to linux-mm@kvack.org if you " in mem_cgroup_hierarchy_write()
1547 return -EINVAL; in mem_cgroup_hierarchy_write()
1556 switch (MEMFILE_TYPE(cft->private)) { in mem_cgroup_read_u64()
1558 counter = &memcg->memory; in mem_cgroup_read_u64()
1561 counter = &memcg->memsw; in mem_cgroup_read_u64()
1564 counter = &memcg->kmem; in mem_cgroup_read_u64()
1567 counter = &memcg->tcpmem; in mem_cgroup_read_u64()
1573 switch (MEMFILE_ATTR(cft->private)) { in mem_cgroup_read_u64()
1575 if (counter == &memcg->memory) in mem_cgroup_read_u64()
1577 if (counter == &memcg->memsw) in mem_cgroup_read_u64()
1581 return (u64)counter->max * PAGE_SIZE; in mem_cgroup_read_u64()
1583 return (u64)counter->watermark * PAGE_SIZE; in mem_cgroup_read_u64()
1585 return counter->failcnt; in mem_cgroup_read_u64()
1587 return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE; in mem_cgroup_read_u64()
1600 return -EINVAL; in mem_cgroup_dummy_seq_show()
1609 ret = page_counter_set_max(&memcg->tcpmem, max); in memcg_update_tcp_max()
1613 if (!memcg->tcpmem_active) { in memcg_update_tcp_max()
1631 memcg->tcpmem_active = true; in memcg_update_tcp_max()
1650 ret = page_counter_memparse(buf, "-1", &nr_pages); in mem_cgroup_write()
1654 switch (MEMFILE_ATTR(of_cft(of)->private)) { in mem_cgroup_write()
1657 ret = -EINVAL; in mem_cgroup_write()
1660 switch (MEMFILE_TYPE(of_cft(of)->private)) { in mem_cgroup_write()
1670 "Please report your usecase to linux-mm@kvack.org if you " in mem_cgroup_write()
1676 "Please report your usecase to linux-mm@kvack.org if you " in mem_cgroup_write()
1684 ret = -EOPNOTSUPP; in mem_cgroup_write()
1687 "Please report your usecase to linux-mm@kvack.org if you " in mem_cgroup_write()
1689 WRITE_ONCE(memcg->soft_limit, nr_pages); in mem_cgroup_write()
1703 switch (MEMFILE_TYPE(of_cft(of)->private)) { in mem_cgroup_reset()
1705 counter = &memcg->memory; in mem_cgroup_reset()
1708 counter = &memcg->memsw; in mem_cgroup_reset()
1711 counter = &memcg->kmem; in mem_cgroup_reset()
1714 counter = &memcg->tcpmem; in mem_cgroup_reset()
1720 switch (MEMFILE_ATTR(of_cft(of)->private)) { in mem_cgroup_reset()
1725 counter->failcnt = 0; in mem_cgroup_reset()
1738 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
1798 seq_printf(m, "%s=%lu", stat->name, in memcg_numa_stat_show()
1799 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, in memcg_numa_stat_show()
1804 stat->lru_mask, false)); in memcg_numa_stat_show()
1810 seq_printf(m, "hierarchical_%s=%lu", stat->name, in memcg_numa_stat_show()
1811 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask, in memcg_numa_stat_show()
1816 stat->lru_mask, true)); in memcg_numa_stat_show()
1897 memory = min(memory, READ_ONCE(mi->memory.max)); in memcg1_stat_format()
1898 memsw = min(memsw, READ_ONCE(mi->memsw.max)); in memcg1_stat_format()
1931 mz = memcg->nodeinfo[pgdat->node_id]; in memcg1_stat_format()
1933 anon_cost += mz->lruvec.anon_cost; in memcg1_stat_format()
1934 file_cost += mz->lruvec.file_cost; in memcg1_stat_format()
1956 return -EINVAL; in mem_cgroup_swappiness_write()
1961 WRITE_ONCE(memcg->swappiness, val); in mem_cgroup_swappiness_write()
1972 seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable)); in mem_cgroup_oom_control_read()
1973 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); in mem_cgroup_oom_control_read()
1975 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); in mem_cgroup_oom_control_read()
1985 "Please report your usecase to linux-mm-@kvack.org if you " in mem_cgroup_oom_control_write()
1990 return -EINVAL; in mem_cgroup_oom_control_write()
1992 WRITE_ONCE(memcg->oom_kill_disable, val); in mem_cgroup_oom_control_write()
2169 page_counter_charge(&memcg->kmem, nr_pages); in memcg1_account_kmem()
2171 page_counter_uncharge(&memcg->kmem, -nr_pages); in memcg1_account_kmem()
2180 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) { in memcg1_charge_skmem()
2181 memcg->tcpmem_pressure = 0; in memcg1_charge_skmem()
2184 memcg->tcpmem_pressure = 1; in memcg1_charge_skmem()
2186 page_counter_charge(&memcg->tcpmem, nr_pages); in memcg1_charge_skmem()
2194 memcg->events_percpu = alloc_percpu_gfp(struct memcg1_events_percpu, in memcg1_alloc_events()
2196 return !!memcg->events_percpu; in memcg1_alloc_events()
2201 free_percpu(memcg->events_percpu); in memcg1_free_events()
2213 rtpn->rb_root = RB_ROOT; in memcg1_init()
2214 rtpn->rb_rightmost = NULL; in memcg1_init()
2215 spin_lock_init(&rtpn->lock); in memcg1_init()