memcontrol.c (29ef680ae7c21110af8e6416d84d8a72fc147b14) memcontrol.c (84c07d11aa619c6d24c682f469b10f344f0c02aa)
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *

--- 237 unchanged lines hidden (view full) ---

246 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
247}
248
249static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
250{
251 return (memcg == root_mem_cgroup);
252}
253
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *

--- 237 unchanged lines hidden (view full) ---

246 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
247}
248
249static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
250{
251 return (memcg == root_mem_cgroup);
252}
253
254#ifndef CONFIG_SLOB
254#ifdef CONFIG_MEMCG_KMEM
255/*
256 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
257 * The main reason for not using cgroup id for this:
258 * this works better in sparse environments, where we have a lot of memcgs,
259 * but only a few kmem-limited. Or also, if we have, for instance, 200
260 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
261 * 200 entry array for that.
262 *

--- 37 unchanged lines hidden (view full) ---

300 * conditional to this static branch, we'll have to allow modules that does
301 * kmem_cache_alloc and the such to see this symbol as well
302 */
303DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
304EXPORT_SYMBOL(memcg_kmem_enabled_key);
305
306struct workqueue_struct *memcg_kmem_cache_wq;
307
255/*
256 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
257 * The main reason for not using cgroup id for this:
258 * this works better in sparse environments, where we have a lot of memcgs,
259 * but only a few kmem-limited. Or also, if we have, for instance, 200
260 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
261 * 200 entry array for that.
262 *

--- 37 unchanged lines hidden (view full) ---

300 * conditional to this static branch, we'll have to allow modules that does
301 * kmem_cache_alloc and the such to see this symbol as well
302 */
303DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
304EXPORT_SYMBOL(memcg_kmem_enabled_key);
305
306struct workqueue_struct *memcg_kmem_cache_wq;
307
308#endif /* !CONFIG_SLOB */
308#endif /* CONFIG_MEMCG_KMEM */
309
310/**
311 * mem_cgroup_css_from_page - css of the memcg associated with a page
312 * @page: page of interest
313 *
314 * If memcg is bound to the default hierarchy, css of the memcg associated
315 * with @page is returned. The returned css remains associated with @page
316 * until it is released.

--- 1893 unchanged lines hidden (view full) ---

2210 * have the page locked
2211 */
2212 page->mem_cgroup = memcg;
2213
2214 if (lrucare)
2215 unlock_page_lru(page, isolated);
2216}
2217
309
310/**
311 * mem_cgroup_css_from_page - css of the memcg associated with a page
312 * @page: page of interest
313 *
314 * If memcg is bound to the default hierarchy, css of the memcg associated
315 * with @page is returned. The returned css remains associated with @page
316 * until it is released.

--- 1893 unchanged lines hidden (view full) ---

2210 * have the page locked
2211 */
2212 page->mem_cgroup = memcg;
2213
2214 if (lrucare)
2215 unlock_page_lru(page, isolated);
2216}
2217
2218#ifndef CONFIG_SLOB
2218#ifdef CONFIG_MEMCG_KMEM
2219static int memcg_alloc_cache_id(void)
2220{
2221 int id, size;
2222 int err;
2223
2224 id = ida_simple_get(&memcg_cache_ida,
2225 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2226 if (id < 0)

--- 248 unchanged lines hidden (view full) ---

2475 page->mem_cgroup = NULL;
2476
2477 /* slab pages do not have PageKmemcg flag set */
2478 if (PageKmemcg(page))
2479 __ClearPageKmemcg(page);
2480
2481 css_put_many(&memcg->css, nr_pages);
2482}
2219static int memcg_alloc_cache_id(void)
2220{
2221 int id, size;
2222 int err;
2223
2224 id = ida_simple_get(&memcg_cache_ida,
2225 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2226 if (id < 0)

--- 248 unchanged lines hidden (view full) ---

2475 page->mem_cgroup = NULL;
2476
2477 /* slab pages do not have PageKmemcg flag set */
2478 if (PageKmemcg(page))
2479 __ClearPageKmemcg(page);
2480
2481 css_put_many(&memcg->css, nr_pages);
2482}
2483#endif /* !CONFIG_SLOB */
2483#endif /* CONFIG_MEMCG_KMEM */
2484
2485#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2486
2487/*
2488 * Because tail pages are not marked as "used", set it. We're under
2489 * zone_lru_lock and migration entries setup in all page mappings.
2490 */
2491void mem_cgroup_split_huge_fixup(struct page *head)

--- 378 unchanged lines hidden (view full) ---

2870 return counter->failcnt;
2871 case RES_SOFT_LIMIT:
2872 return (u64)memcg->soft_limit * PAGE_SIZE;
2873 default:
2874 BUG();
2875 }
2876}
2877
2484
2485#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2486
2487/*
2488 * Because tail pages are not marked as "used", set it. We're under
2489 * zone_lru_lock and migration entries setup in all page mappings.
2490 */
2491void mem_cgroup_split_huge_fixup(struct page *head)

--- 378 unchanged lines hidden (view full) ---

2870 return counter->failcnt;
2871 case RES_SOFT_LIMIT:
2872 return (u64)memcg->soft_limit * PAGE_SIZE;
2873 default:
2874 BUG();
2875 }
2876}
2877
2878#ifndef CONFIG_SLOB
2878#ifdef CONFIG_MEMCG_KMEM
2879static int memcg_online_kmem(struct mem_cgroup *memcg)
2880{
2881 int memcg_id;
2882
2883 if (cgroup_memory_nokmem)
2884 return 0;
2885
2886 BUG_ON(memcg->kmemcg_id >= 0);

--- 83 unchanged lines hidden (view full) ---

2970 return 0;
2971}
2972static void memcg_offline_kmem(struct mem_cgroup *memcg)
2973{
2974}
2975static void memcg_free_kmem(struct mem_cgroup *memcg)
2976{
2977}
2879static int memcg_online_kmem(struct mem_cgroup *memcg)
2880{
2881 int memcg_id;
2882
2883 if (cgroup_memory_nokmem)
2884 return 0;
2885
2886 BUG_ON(memcg->kmemcg_id >= 0);

--- 83 unchanged lines hidden (view full) ---

2970 return 0;
2971}
2972static void memcg_offline_kmem(struct mem_cgroup *memcg)
2973{
2974}
2975static void memcg_free_kmem(struct mem_cgroup *memcg)
2976{
2977}
2978#endif /* !CONFIG_SLOB */
2978#endif /* CONFIG_MEMCG_KMEM */
2979
2980static int memcg_update_kmem_max(struct mem_cgroup *memcg,
2981 unsigned long max)
2982{
2983 int ret;
2984
2985 mutex_lock(&memcg_max_mutex);
2986 ret = page_counter_set_max(&memcg->kmem, max);

--- 1287 unchanged lines hidden (view full) ---

4274 memcg->last_scanned_node = MAX_NUMNODES;
4275 INIT_LIST_HEAD(&memcg->oom_notify);
4276 mutex_init(&memcg->thresholds_lock);
4277 spin_lock_init(&memcg->move_lock);
4278 vmpressure_init(&memcg->vmpressure);
4279 INIT_LIST_HEAD(&memcg->event_list);
4280 spin_lock_init(&memcg->event_list_lock);
4281 memcg->socket_pressure = jiffies;
2979
2980static int memcg_update_kmem_max(struct mem_cgroup *memcg,
2981 unsigned long max)
2982{
2983 int ret;
2984
2985 mutex_lock(&memcg_max_mutex);
2986 ret = page_counter_set_max(&memcg->kmem, max);

--- 1287 unchanged lines hidden (view full) ---

4274 memcg->last_scanned_node = MAX_NUMNODES;
4275 INIT_LIST_HEAD(&memcg->oom_notify);
4276 mutex_init(&memcg->thresholds_lock);
4277 spin_lock_init(&memcg->move_lock);
4278 vmpressure_init(&memcg->vmpressure);
4279 INIT_LIST_HEAD(&memcg->event_list);
4280 spin_lock_init(&memcg->event_list_lock);
4281 memcg->socket_pressure = jiffies;
4282#ifndef CONFIG_SLOB
4282#ifdef CONFIG_MEMCG_KMEM
4283 memcg->kmemcg_id = -1;
4284#endif
4285#ifdef CONFIG_CGROUP_WRITEBACK
4286 INIT_LIST_HEAD(&memcg->cgwb_list);
4287#endif
4288 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
4289 return memcg;
4290fail:

--- 1823 unchanged lines hidden (view full) ---

6114 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
6115 * basically everything that doesn't depend on a specific mem_cgroup structure
6116 * should be initialized from here.
6117 */
6118static int __init mem_cgroup_init(void)
6119{
6120 int cpu, node;
6121
4283 memcg->kmemcg_id = -1;
4284#endif
4285#ifdef CONFIG_CGROUP_WRITEBACK
4286 INIT_LIST_HEAD(&memcg->cgwb_list);
4287#endif
4288 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
4289 return memcg;
4290fail:

--- 1823 unchanged lines hidden (view full) ---

6114 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
6115 * basically everything that doesn't depend on a specific mem_cgroup structure
6116 * should be initialized from here.
6117 */
6118static int __init mem_cgroup_init(void)
6119{
6120 int cpu, node;
6121
6122#ifndef CONFIG_SLOB
6122#ifdef CONFIG_MEMCG_KMEM
6123 /*
6124 * Kmem cache creation is mostly done with the slab_mutex held,
6125 * so use a workqueue with limited concurrency to avoid stalling
6126 * all worker threads in case lots of cgroups are created and
6127 * destroyed simultaneously.
6128 */
6129 memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1);
6130 BUG_ON(!memcg_kmem_cache_wq);

--- 353 unchanged lines hidden ---
6123 /*
6124 * Kmem cache creation is mostly done with the slab_mutex held,
6125 * so use a workqueue with limited concurrency to avoid stalling
6126 * all worker threads in case lots of cgroups are created and
6127 * destroyed simultaneously.
6128 */
6129 memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1);
6130 BUG_ON(!memcg_kmem_cache_wq);

--- 353 unchanged lines hidden ---