Lines Matching refs:gfp_mask

3212 				unsigned int alloc_flags, gfp_t gfp_mask)  in zone_watermark_fast()  argument
3291 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument
3299 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); in alloc_flags_nofragment()
3323 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, in gfp_to_alloc_flags_cma() argument
3327 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) in gfp_to_alloc_flags_cma()
3338 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument
3361 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist()
3423 gfp_mask)) in get_page_from_freelist()
3432 gfp_mask)) { in get_page_from_freelist()
3455 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
3475 gfp_mask, alloc_flags, ac->migratetype); in get_page_from_freelist()
3477 prep_new_page(page, order, gfp_mask, alloc_flags); in get_page_from_freelist()
3511 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) in warn_alloc_show_mem() argument
3520 if (!(gfp_mask & __GFP_NOMEMALLOC)) in warn_alloc_show_mem()
3524 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) in warn_alloc_show_mem()
3527 __show_mem(filter, nodemask, gfp_zone(gfp_mask)); in warn_alloc_show_mem()
3530 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) in warn_alloc() argument
3536 if ((gfp_mask & __GFP_NOWARN) || in warn_alloc()
3538 ((gfp_mask & __GFP_DMA) && !has_managed_dma())) in warn_alloc()
3545 current->comm, &vaf, gfp_mask, &gfp_mask, in warn_alloc()
3552 warn_alloc_show_mem(gfp_mask, nodemask); in warn_alloc()
3556 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, in __alloc_pages_cpuset_fallback() argument
3562 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
3569 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
3575 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, in __alloc_pages_may_oom() argument
3582 .gfp_mask = gfp_mask, in __alloc_pages_may_oom()
3606 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & in __alloc_pages_may_oom()
3626 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) in __alloc_pages_may_oom()
3645 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) { in __alloc_pages_may_oom()
3652 if (gfp_mask & __GFP_NOFAIL) in __alloc_pages_may_oom()
3653 page = __alloc_pages_cpuset_fallback(gfp_mask, order, in __alloc_pages_may_oom()
3670 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
3685 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, in __alloc_pages_direct_compact()
3702 prep_new_page(page, order, gfp_mask, alloc_flags); in __alloc_pages_direct_compact()
3706 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_compact()
3795 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
3835 static bool __need_reclaim(gfp_t gfp_mask) in __need_reclaim() argument
3838 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) in __need_reclaim()
3845 if (gfp_mask & __GFP_NOLOCKDEP) in __need_reclaim()
3861 void fs_reclaim_acquire(gfp_t gfp_mask) in fs_reclaim_acquire() argument
3863 gfp_mask = current_gfp_context(gfp_mask); in fs_reclaim_acquire()
3865 if (__need_reclaim(gfp_mask)) { in fs_reclaim_acquire()
3866 if (gfp_mask & __GFP_FS) in fs_reclaim_acquire()
3878 void fs_reclaim_release(gfp_t gfp_mask) in fs_reclaim_release() argument
3880 gfp_mask = current_gfp_context(gfp_mask); in fs_reclaim_release()
3882 if (__need_reclaim(gfp_mask)) { in fs_reclaim_release()
3883 if (gfp_mask & __GFP_FS) in fs_reclaim_release()
3916 __perform_reclaim(gfp_t gfp_mask, unsigned int order, in __perform_reclaim() argument
3926 fs_reclaim_acquire(gfp_mask); in __perform_reclaim()
3929 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, in __perform_reclaim()
3933 fs_reclaim_release(gfp_mask); in __perform_reclaim()
3942 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_reclaim() argument
3951 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); in __alloc_pages_direct_reclaim()
3956 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_reclaim()
3975 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, in wake_all_kswapds() argument
3988 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); in wake_all_kswapds()
3995 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) in gfp_to_alloc_flags() argument
4014 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); in gfp_to_alloc_flags()
4016 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { in gfp_to_alloc_flags()
4021 if (!(gfp_mask & __GFP_NOMEMALLOC)) { in gfp_to_alloc_flags()
4038 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); in gfp_to_alloc_flags()
4062 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) in __gfp_pfmemalloc_flags() argument
4064 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) in __gfp_pfmemalloc_flags()
4066 if (gfp_mask & __GFP_MEMALLOC) in __gfp_pfmemalloc_flags()
4080 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) in gfp_pfmemalloc_allowed() argument
4082 return !!__gfp_pfmemalloc_flags(gfp_mask); in gfp_pfmemalloc_allowed()
4096 should_reclaim_retry(gfp_t gfp_mask, unsigned order, in should_reclaim_retry() argument
4133 !__cpuset_zone_allowed(zone, gfp_mask)) in should_reclaim_retry()
4206 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, in __alloc_pages_slowpath() argument
4209 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; in __alloc_pages_slowpath()
4210 bool can_compact = gfp_compaction_allowed(gfp_mask); in __alloc_pages_slowpath()
4211 bool nofail = gfp_mask & __GFP_NOFAIL; in __alloc_pages_slowpath()
4256 alloc_flags = gfp_to_alloc_flags(gfp_mask, order); in __alloc_pages_slowpath()
4274 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { in __alloc_pages_slowpath()
4283 wake_all_kswapds(order, gfp_mask, ac); in __alloc_pages_slowpath()
4289 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4305 && !gfp_pfmemalloc_allowed(gfp_mask)) { in __alloc_pages_slowpath()
4306 page = __alloc_pages_direct_compact(gfp_mask, order, in __alloc_pages_slowpath()
4317 if (costly_order && (gfp_mask & __GFP_NORETRY)) { in __alloc_pages_slowpath()
4351 wake_all_kswapds(order, gfp_mask, ac); in __alloc_pages_slowpath()
4353 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); in __alloc_pages_slowpath()
4355 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) | in __alloc_pages_slowpath()
4370 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4383 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4389 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4395 if (gfp_mask & __GFP_NORETRY) in __alloc_pages_slowpath()
4403 !(gfp_mask & __GFP_RETRY_MAYFAIL))) in __alloc_pages_slowpath()
4406 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, in __alloc_pages_slowpath()
4432 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); in __alloc_pages_slowpath()
4439 (gfp_mask & __GFP_NOMEMALLOC))) in __alloc_pages_slowpath()
4477 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); in __alloc_pages_slowpath()
4485 warn_alloc(gfp_mask, ac->nodemask, in __alloc_pages_slowpath()
4491 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, in prepare_alloc_pages() argument
4496 ac->highest_zoneidx = gfp_zone(gfp_mask); in prepare_alloc_pages()
4497 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); in prepare_alloc_pages()
4499 ac->migratetype = gfp_migratetype(gfp_mask); in prepare_alloc_pages()
4513 might_alloc(gfp_mask); in prepare_alloc_pages()
4515 if (should_fail_alloc_page(gfp_mask, order)) in prepare_alloc_pages()
4518 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); in prepare_alloc_pages()
4521 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); in prepare_alloc_pages()
4795 unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order) in get_free_pages_noprof() argument
4799 page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order); in get_free_pages_noprof()
4806 unsigned long get_zeroed_page_noprof(gfp_t gfp_mask) in get_zeroed_page_noprof() argument
4808 return get_free_pages_noprof(gfp_mask | __GFP_ZERO, 0); in get_zeroed_page_noprof()
4894 void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) in alloc_pages_exact_noprof() argument
4899 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) in alloc_pages_exact_noprof()
4900 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); in alloc_pages_exact_noprof()
4902 addr = get_free_pages_noprof(gfp_mask, order); in alloc_pages_exact_noprof()
4919 void * __meminit alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) in alloc_pages_exact_nid_noprof() argument
4924 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) in alloc_pages_exact_nid_noprof()
4925 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); in alloc_pages_exact_nid_noprof()
4927 p = alloc_pages_node_noprof(nid, gfp_mask, order); in alloc_pages_exact_nid_noprof()
6274 .gfp_mask = cc->gfp_mask, in __alloc_contig_migrate_range()
6332 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) in __alloc_contig_migrate_range()
6344 static void split_free_pages(struct list_head *list, gfp_t gfp_mask) in split_free_pages() argument
6355 post_alloc_hook(page, order, gfp_mask); in split_free_pages()
6370 static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask) in __alloc_contig_verify_gfp_mask() argument
6381 gfp_mask &= ~(GFP_ZONEMASK | __GFP_RECLAIMABLE | __GFP_WRITE | in __alloc_contig_verify_gfp_mask()
6388 if (gfp_mask & ~(reclaim_mask | action_mask)) in __alloc_contig_verify_gfp_mask()
6399 *gfp_cc_mask = (gfp_mask & (reclaim_mask | cc_action_mask)) | in __alloc_contig_verify_gfp_mask()
6428 unsigned migratetype, gfp_t gfp_mask) in alloc_contig_range_noprof() argument
6444 gfp_mask = current_gfp_context(gfp_mask); in alloc_contig_range_noprof()
6445 if (__alloc_contig_verify_gfp_mask(gfp_mask, (gfp_t *)&cc.gfp_mask)) in alloc_contig_range_noprof()
6531 if (!(gfp_mask & __GFP_COMP)) { in alloc_contig_range_noprof()
6532 split_free_pages(cc.freepages, gfp_mask); in alloc_contig_range_noprof()
6544 prep_new_page(head, order, gfp_mask, 0); in alloc_contig_range_noprof()
6558 unsigned long nr_pages, gfp_t gfp_mask) in __alloc_contig_pages() argument
6563 gfp_mask); in __alloc_contig_pages()
6620 struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, in alloc_contig_pages_noprof() argument
6628 zonelist = node_zonelist(nid, gfp_mask); in alloc_contig_pages_noprof()
6630 gfp_zone(gfp_mask), nodemask) { in alloc_contig_pages_noprof()
6645 gfp_mask); in alloc_contig_pages_noprof()