Lines Matching refs:nr_pages

133 				      unsigned int nr_pages);
139 unsigned int nr_pages; in obj_cgroup_release() local
164 nr_pages = nr_bytes >> PAGE_SHIFT; in obj_cgroup_release()
166 if (nr_pages) in obj_cgroup_release()
167 obj_cgroup_uncharge_pages(objcg, nr_pages); in obj_cgroup_release()
1282 int zid, int nr_pages) in mem_cgroup_update_lru_size() argument
1294 if (nr_pages < 0) in mem_cgroup_update_lru_size()
1295 *lru_size += nr_pages; in mem_cgroup_update_lru_size()
1300 __func__, lruvec, lru, nr_pages, size)) { in mem_cgroup_update_lru_size()
1305 if (nr_pages > 0) in mem_cgroup_update_lru_size()
1306 *lru_size += nr_pages; in mem_cgroup_update_lru_size()
1744 unsigned int nr_pages; member
1776 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) in consume_stock() argument
1783 if (nr_pages > MEMCG_CHARGE_BATCH) in consume_stock()
1789 stock_pages = READ_ONCE(stock->nr_pages); in consume_stock()
1790 if (memcg == READ_ONCE(stock->cached) && stock_pages >= nr_pages) { in consume_stock()
1791 WRITE_ONCE(stock->nr_pages, stock_pages - nr_pages); in consume_stock()
1805 unsigned int stock_pages = READ_ONCE(stock->nr_pages); in drain_stock()
1816 WRITE_ONCE(stock->nr_pages, 0); in drain_stock()
1849 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) in __refill_stock() argument
1860 stock_pages = READ_ONCE(stock->nr_pages) + nr_pages; in __refill_stock()
1861 WRITE_ONCE(stock->nr_pages, stock_pages); in __refill_stock()
1867 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) in refill_stock() argument
1872 __refill_stock(memcg, nr_pages); in refill_stock()
1902 if (memcg && READ_ONCE(stock->nr_pages) && in drain_all_stock()
1941 unsigned int nr_pages, in reclaim_high() argument
1956 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages, in reclaim_high()
2081 unsigned int nr_pages, in calculate_high_delay() argument
2109 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH; in calculate_high_delay()
2122 unsigned int nr_pages = current->memcg_nr_pages_over_high; in mem_cgroup_handle_over_high() local
2127 if (likely(!nr_pages)) in mem_cgroup_handle_over_high()
2155 in_retry ? SWAP_CLUSTER_MAX : nr_pages, in mem_cgroup_handle_over_high()
2162 penalty_jiffies = calculate_high_delay(memcg, nr_pages, in mem_cgroup_handle_over_high()
2165 penalty_jiffies += calculate_high_delay(memcg, nr_pages, in mem_cgroup_handle_over_high()
2211 unsigned int nr_pages) in try_charge_memcg() argument
2213 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages); in try_charge_memcg()
2225 if (consume_stock(memcg, nr_pages)) in try_charge_memcg()
2240 if (batch > nr_pages) { in try_charge_memcg()
2241 batch = nr_pages; in try_charge_memcg()
2264 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, in try_charge_memcg()
2268 if (mem_cgroup_margin(mem_over_limit) >= nr_pages) in try_charge_memcg()
2288 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER)) in try_charge_memcg()
2307 get_order(nr_pages * PAGE_SIZE))) { in try_charge_memcg()
2334 page_counter_charge(&memcg->memory, nr_pages); in try_charge_memcg()
2336 page_counter_charge(&memcg->memsw, nr_pages); in try_charge_memcg()
2341 if (batch > nr_pages) in try_charge_memcg()
2342 refill_stock(memcg, batch - nr_pages); in try_charge_memcg()
2616 unsigned int nr_pages) in obj_cgroup_uncharge_pages() argument
2622 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); in obj_cgroup_uncharge_pages()
2623 memcg1_account_kmem(memcg, -nr_pages); in obj_cgroup_uncharge_pages()
2624 refill_stock(memcg, nr_pages); in obj_cgroup_uncharge_pages()
2638 unsigned int nr_pages) in obj_cgroup_charge_pages() argument
2645 ret = try_charge_memcg(memcg, gfp, nr_pages); in obj_cgroup_charge_pages()
2649 mod_memcg_state(memcg, MEMCG_KMEM, nr_pages); in obj_cgroup_charge_pages()
2650 memcg1_account_kmem(memcg, nr_pages); in obj_cgroup_charge_pages()
2692 unsigned int nr_pages = 1 << order; in __memcg_kmem_uncharge_page() local
2698 obj_cgroup_uncharge_pages(objcg, nr_pages); in __memcg_kmem_uncharge_page()
2795 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT; in drain_obj_stock() local
2798 if (nr_pages) { in drain_obj_stock()
2803 mod_memcg_state(memcg, MEMCG_KMEM, -nr_pages); in drain_obj_stock()
2804 memcg1_account_kmem(memcg, -nr_pages); in drain_obj_stock()
2805 __refill_stock(memcg, nr_pages); in drain_obj_stock()
2872 unsigned int nr_pages = 0; in refill_obj_stock() local
2888 nr_pages = stock->nr_bytes >> PAGE_SHIFT; in refill_obj_stock()
2895 if (nr_pages) in refill_obj_stock()
2896 obj_cgroup_uncharge_pages(objcg, nr_pages); in refill_obj_stock()
2901 unsigned int nr_pages, nr_bytes; in obj_cgroup_charge() local
2930 nr_pages = size >> PAGE_SHIFT; in obj_cgroup_charge()
2934 nr_pages += 1; in obj_cgroup_charge()
2936 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages); in obj_cgroup_charge()
4105 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_high_write() local
4108 if (nr_pages <= high) in memory_high_write()
4120 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high, in memory_high_write()
4154 unsigned long nr_pages = page_counter_read(&memcg->memory); in memory_max_write() local
4156 if (nr_pages <= max) in memory_max_write()
4169 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max, in memory_max_write()
4599 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) in mem_cgroup_swapin_uncharge_swap() argument
4619 mem_cgroup_uncharge_swap(entry, nr_pages); in mem_cgroup_swapin_uncharge_swap()
4657 long nr_pages; in uncharge_folio() local
4694 nr_pages = folio_nr_pages(folio); in uncharge_folio()
4697 ug->nr_memory += nr_pages; in uncharge_folio()
4698 ug->nr_kmem += nr_pages; in uncharge_folio()
4705 ug->nr_memory += nr_pages; in uncharge_folio()
4753 long nr_pages = folio_nr_pages(new); in mem_cgroup_replace_folio() local
4758 VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new); in mem_cgroup_replace_folio()
4774 page_counter_charge(&memcg->memory, nr_pages); in mem_cgroup_replace_folio()
4776 page_counter_charge(&memcg->memsw, nr_pages); in mem_cgroup_replace_folio()
4867 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, in mem_cgroup_charge_skmem() argument
4871 return memcg1_charge_skmem(memcg, nr_pages, gfp_mask); in mem_cgroup_charge_skmem()
4873 if (try_charge(memcg, gfp_mask, nr_pages) == 0) { in mem_cgroup_charge_skmem()
4874 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages); in mem_cgroup_charge_skmem()
4886 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) in mem_cgroup_uncharge_skmem() argument
4889 memcg1_uncharge_skmem(memcg, nr_pages); in mem_cgroup_uncharge_skmem()
4893 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); in mem_cgroup_uncharge_skmem()
4895 refill_stock(memcg, nr_pages); in mem_cgroup_uncharge_skmem()
5034 unsigned int nr_pages = folio_nr_pages(folio); in __mem_cgroup_try_charge_swap() local
5055 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) { in __mem_cgroup_try_charge_swap()
5063 if (nr_pages > 1) in __mem_cgroup_try_charge_swap()
5064 mem_cgroup_id_get_many(memcg, nr_pages - 1); in __mem_cgroup_try_charge_swap()
5065 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); in __mem_cgroup_try_charge_swap()
5077 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) in __mem_cgroup_uncharge_swap() argument
5082 id = swap_cgroup_clear(entry, nr_pages); in __mem_cgroup_uncharge_swap()
5088 page_counter_uncharge(&memcg->memsw, nr_pages); in __mem_cgroup_uncharge_swap()
5090 page_counter_uncharge(&memcg->swap, nr_pages); in __mem_cgroup_uncharge_swap()
5092 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages); in __mem_cgroup_uncharge_swap()
5093 mem_cgroup_id_put_many(memcg, nr_pages); in __mem_cgroup_uncharge_swap()