/linux/include/linux/ |
H A D | gfp.h | 185 * We get the zone list from the current node and the gfp_mask. 245 static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) in __folio_alloc_node() 247 gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN); in __folio_alloc_node() 255 pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node); in alloc_pages_node() argument 264 __alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order) 267 warn_if_node_offline(nid, gfp_mask); 269 return __alloc_pages_noprof(gfp_mask, order, nid, NULL); 290 static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask, in alloc_page_vma() 296 return __alloc_pages_node_noprof(nid, gfp_mask, order); 309 static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigne 214 warn_if_node_offline(int this_node,gfp_t gfp_mask) warn_if_node_offline() argument 233 __alloc_pages_node(int nid,gfp_t gfp_mask,unsigned int order) __alloc_pages_node() argument 272 alloc_pages(gfp_t gfp_mask,unsigned int order) alloc_pages() argument 288 alloc_page(gfp_mask) global() argument 304 __get_free_page(gfp_mask) global() argument 307 __get_dma_pages(gfp_mask,order) global() argument 320 page_frag_alloc_align(struct page_frag_cache * nc,unsigned int fragsz,gfp_t gfp_mask,unsigned int align) page_frag_alloc_align() argument 328 page_frag_alloc(struct page_frag_cache * nc,unsigned int fragsz,gfp_t gfp_mask) page_frag_alloc() argument 368 gfp_compaction_allowed(gfp_t gfp_mask) gfp_compaction_allowed() argument [all...] |
H A D | mempool.h | 15 typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); 43 gfp_t gfp_mask, int node_id); 55 gfp_t gfp_mask, int nid); 66 extern void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) __malloc; in mempool_init_slab_pool() 78 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); 90 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); in mempool_init_kmalloc_pool() 100 void *mempool_kvmalloc(gfp_t gfp_mask, void *pool_data); 117 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
|
H A D | cpuset.h | 85 extern bool cpuset_node_allowed(int node, gfp_t gfp_mask); 87 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument 89 return cpuset_node_allowed(zone_to_nid(z), gfp_mask); in __cpuset_zone_allowed() 92 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument 95 return __cpuset_zone_allowed(z, gfp_mask); in cpuset_zone_allowed() 221 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument 226 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument
|
H A D | blk-crypto.h | 84 gfp_t gfp_mask); 115 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask); 128 gfp_t gfp_mask) in bio_crypt_clone() argument 131 return __bio_crypt_clone(dst, src, gfp_mask); in bio_crypt_clone()
|
H A D | page_owner.h | 13 unsigned short order, gfp_t gfp_mask); 29 unsigned short order, gfp_t gfp_mask) in set_page_owner() argument 32 __set_page_owner(page, order, gfp_mask); in set_page_owner() 61 unsigned short order, gfp_t gfp_mask) in set_page_owner() argument
|
H A D | fault-inject.h | 94 bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order); 97 bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order); 99 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument
|
/linux/block/ |
H A D | blk-lib.c | 39 sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask) in blk_alloc_discard_bio() argument 47 bio = bio_alloc(bdev, 0, REQ_OP_DISCARD, gfp_mask); in blk_alloc_discard_bio() 64 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) in __blkdev_issue_discard() argument 69 gfp_mask))) in __blkdev_issue_discard() 86 sector_t nr_sects, gfp_t gfp_mask) in blkdev_issue_discard() argument 93 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio); in blkdev_issue_discard() 107 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_write_zeroes() argument 125 bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask); in __blkdev_issue_write_zeroes() 154 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_zero_pages() argument 166 REQ_OP_WRITE, gfp_mask); in __blkdev_issue_zero_pages() [all …]
|
H A D | blk-map.c | 22 gfp_t gfp_mask) in bio_alloc_map_data() argument 29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); in bio_alloc_map_data() 132 struct iov_iter *iter, gfp_t gfp_mask) in bio_copy_user_iov() argument 142 bmd = bio_alloc_map_data(iter, gfp_mask); in bio_copy_user_iov() 157 bio = bio_kmalloc(nr_pages, gfp_mask); in bio_copy_user_iov() 185 page = alloc_page(GFP_NOIO | gfp_mask); in bio_copy_user_iov() 254 unsigned int nr_vecs, gfp_t gfp_mask) in blk_rq_map_bio_alloc() argument 259 bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask, in blk_rq_map_bio_alloc() 264 bio = bio_kmalloc(nr_vecs, gfp_mask); in blk_rq_map_bio_alloc() 273 gfp_t gfp_mask) in bio_map_user_iov() argument [all …]
|
H A D | blk-crypto.c | 92 const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) in bio_crypt_set_ctx() argument 100 WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM)); in bio_crypt_set_ctx() 102 bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in bio_crypt_set_ctx() 116 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) in __bio_crypt_clone() argument 118 dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in __bio_crypt_clone() 304 gfp_t gfp_mask) in __blk_crypto_rq_bio_prep() argument 307 rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in __blk_crypto_rq_bio_prep()
|
/linux/mm/ |
H A D | mempool.c | 197 gfp_t gfp_mask, int node_id) in mempool_init_node() argument 207 gfp_mask, node_id); in mempool_init_node() 217 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_init_node() 259 * @gfp_mask: memory allocation flags 272 gfp_t gfp_mask, int node_id) in mempool_create() 276 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); in mempool_create_node() 281 gfp_mask, node_id)) { in mempool_create_node() 374 * @gfp_mask: the usual allocation bitmask. 384 void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) 391 VM_WARN_ON_ONCE(gfp_mask in mempool_alloc() 278 mempool_create_node(int min_nr,mempool_alloc_t * alloc_fn,mempool_free_t * free_fn,void * pool_data,gfp_t gfp_mask,int node_id) mempool_create_node() argument 390 mempool_alloc(mempool_t * pool,gfp_t gfp_mask) mempool_alloc() argument 561 mempool_alloc_slab(gfp_t gfp_mask,void * pool_data) mempool_alloc_slab() argument 580 mempool_kmalloc(gfp_t gfp_mask,void * pool_data) mempool_kmalloc() argument 593 mempool_kvmalloc(gfp_t gfp_mask,void * pool_data) mempool_kvmalloc() argument 610 mempool_alloc_pages(gfp_t gfp_mask,void * pool_data) mempool_alloc_pages() argument [all...] |
H A D | page_alloc.c | 3011 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __zone_watermark_ok() 3013 return __should_fail_alloc_page(gfp_mask, order); in __zone_watermark_ok() 3135 unsigned int alloc_flags, gfp_t gfp_mask) in alloc_flags_nofragment() 3214 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in get_page_from_freelist() 3222 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); in get_page_from_freelist() 3245 /* Must be called after current_gfp_context() which can change gfp_mask */ in get_page_from_freelist() 3246 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, in get_page_from_freelist() 3250 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) in get_page_from_freelist() 3261 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() 3284 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist() 2926 should_fail_alloc_page(gfp_t gfp_mask,unsigned int order) should_fail_alloc_page() argument 3050 zone_watermark_fast(struct zone * z,unsigned int order,unsigned long mark,int highest_zoneidx,unsigned int alloc_flags,gfp_t gfp_mask) zone_watermark_fast() argument 3129 alloc_flags_nofragment(struct zone * zone,gfp_t gfp_mask) alloc_flags_nofragment() argument 3161 gfp_to_alloc_flags_cma(gfp_t gfp_mask,unsigned int alloc_flags) gfp_to_alloc_flags_cma() argument 3176 get_page_from_freelist(gfp_t gfp_mask,unsigned int order,int alloc_flags,const struct alloc_context * ac) get_page_from_freelist() argument 3355 warn_alloc_show_mem(gfp_t gfp_mask,nodemask_t * nodemask) warn_alloc_show_mem() argument 3374 warn_alloc(gfp_t gfp_mask,nodemask_t * nodemask,const char * fmt,...) warn_alloc() argument 3400 __alloc_pages_cpuset_fallback(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac) __alloc_pages_cpuset_fallback() argument 3420 __alloc_pages_may_oom(gfp_t gfp_mask,unsigned int order,const struct alloc_context * ac,unsigned long * did_some_progress) __alloc_pages_may_oom() argument 3515 __alloc_pages_direct_compact(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,enum compact_priority prio,enum compact_result * compact_result) __alloc_pages_direct_compact() argument 3640 __alloc_pages_direct_compact(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,enum compact_priority prio,enum compact_result * compact_result) __alloc_pages_direct_compact() argument 3680 __need_reclaim(gfp_t gfp_mask) __need_reclaim() argument 3706 fs_reclaim_acquire(gfp_t gfp_mask) fs_reclaim_acquire() argument 3723 fs_reclaim_release(gfp_t gfp_mask) fs_reclaim_release() argument 3761 __perform_reclaim(gfp_t gfp_mask,unsigned int order,const struct alloc_context * ac) __perform_reclaim() argument 3787 __alloc_pages_direct_reclaim(gfp_t gfp_mask,unsigned int order,unsigned int alloc_flags,const struct alloc_context * ac,unsigned long * did_some_progress) __alloc_pages_direct_reclaim() argument 3820 wake_all_kswapds(unsigned int order,gfp_t gfp_mask,const struct alloc_context * ac) wake_all_kswapds() argument 3840 gfp_to_alloc_flags(gfp_t gfp_mask,unsigned int order) gfp_to_alloc_flags() argument 3907 __gfp_pfmemalloc_flags(gfp_t gfp_mask) __gfp_pfmemalloc_flags() argument 3925 gfp_pfmemalloc_allowed(gfp_t gfp_mask) gfp_pfmemalloc_allowed() argument 3941 should_reclaim_retry(gfp_t gfp_mask,unsigned order,struct alloc_context * ac,int alloc_flags,bool did_some_progress,int * no_progress_loops) should_reclaim_retry() argument 4046 __alloc_pages_slowpath(gfp_t gfp_mask,unsigned int order,struct alloc_context * ac) __alloc_pages_slowpath() argument 4324 prepare_alloc_pages(gfp_t gfp_mask,unsigned int order,int preferred_nid,nodemask_t * nodemask,struct alloc_context * ac,gfp_t * alloc_gfp,unsigned int * alloc_flags) prepare_alloc_pages() argument 4618 __get_free_pages(gfp_t gfp_mask,unsigned int order) __get_free_pages() argument 4629 get_zeroed_page(gfp_t gfp_mask) get_zeroed_page() argument 4690 __page_frag_cache_refill(struct page_frag_cache * nc,gfp_t gfp_mask) __page_frag_cache_refill() argument 4730 __page_frag_alloc_align(struct page_frag_cache * nc,unsigned int fragsz,gfp_t gfp_mask,unsigned int align_mask) __page_frag_alloc_align() argument 4849 alloc_pages_exact(size_t size,gfp_t gfp_mask) alloc_pages_exact() argument 4874 alloc_pages_exact_nid(int nid,size_t size,gfp_t gfp_mask) alloc_pages_exact_nid() argument 6340 alloc_contig_range(unsigned long start,unsigned long end,unsigned migratetype,gfp_t gfp_mask) alloc_contig_range() argument 6466 __alloc_contig_pages(unsigned long start_pfn,unsigned long nr_pages,gfp_t gfp_mask) __alloc_contig_pages() argument 6526 alloc_contig_pages(unsigned long nr_pages,gfp_t gfp_mask,int nid,nodemask_t * nodemask) alloc_contig_pages() argument [all...] |
H A D | page_owner.c | 27 gfp_t gfp_mask; member 166 gfp_t gfp_mask) in add_stack_record_to_list() argument 172 stack = kmalloc(sizeof(*stack), gfp_nested_mask(gfp_mask)); in add_stack_record_to_list() 194 static void inc_stack_record_count(depot_stack_handle_t handle, gfp_t gfp_mask, in add_stack_record_to_list() 214 add_stack_record_to_list(stack_record, gfp_mask); in inc_stack_record_count() 235 gfp_t gfp_mask, in dec_stack_record_count() 246 page_owner->gfp_mask = gfp_mask; in __update_page_owner_handle() 314 gfp_t gfp_mask) in __reset_page_owner() 320 handle = save_stack(gfp_mask); in __set_page_owner() 199 inc_stack_record_count(depot_stack_handle_t handle,gfp_t gfp_mask,int nr_base_pages) inc_stack_record_count() argument 240 __update_page_owner_handle(struct page_ext * page_ext,depot_stack_handle_t handle,unsigned short order,gfp_t gfp_mask,short last_migrate_reason,u64 ts_nsec,pid_t pid,pid_t tgid,char * comm) __update_page_owner_handle() argument 319 __set_page_owner(struct page * page,unsigned short order,gfp_t gfp_mask) __set_page_owner() argument 603 gfp_t gfp_mask; __dump_page_owner() local [all...] |
H A D | fail_page_alloc.c | 24 bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument 30 if (gfp_mask & __GFP_NOFAIL) in __should_fail_alloc_page() 32 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in __should_fail_alloc_page() 35 (gfp_mask & __GFP_DIRECT_RECLAIM)) in __should_fail_alloc_page() 39 if (gfp_mask & __GFP_NOWARN) in __should_fail_alloc_page()
|
H A D | swap_state.c | 429 struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in __read_swap_cache_async() argument 470 folio = (struct folio *)alloc_pages_mpol(gfp_mask, 0, in __read_swap_cache_async() 514 if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry)) in __read_swap_cache_async() 518 if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) in __read_swap_cache_async() 552 struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in read_swap_cache_async() argument 562 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, in read_swap_cache_async() 635 * @gfp_mask: memory allocation flags 650 struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, in swap_cluster_readahead() argument 680 gfp_mask, mpol, ilx, &page_allocated, false); in swap_cluster_readahead() 697 folio = __read_swap_cache_async(entry, gfp_mask, mpo in swap_cluster_readahead() 815 swap_vma_readahead(swp_entry_t targ_entry,gfp_t gfp_mask,struct mempolicy * mpol,pgoff_t targ_ilx,struct vm_fault * vmf) swap_vma_readahead() argument 894 swapin_readahead(swp_entry_t entry,gfp_t gfp_mask,struct vm_fault * vmf) swapin_readahead() argument [all...] |
/linux/fs/nfs/blocklayout/ |
H A D | dev.c | 231 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask); 236 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_simple() argument 242 dev = bl_resolve_deviceid(server, v, gfp_mask); in bl_parse_simple() 327 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_scsi() argument 385 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_slice() argument 390 ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask); in bl_parse_slice() 401 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_concat() argument 408 sizeof(struct pnfs_block_dev), gfp_mask); in bl_parse_concat() 414 volumes, v->concat.volumes[i], gfp_mask); in bl_parse_concat() 430 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_stripe() argument [all …]
|
/linux/fs/btrfs/ |
H A D | ulist.h | 49 struct ulist *ulist_alloc(gfp_t gfp_mask); 51 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask); 53 u64 *old_aux, gfp_t gfp_mask); 58 void **old_aux, gfp_t gfp_mask) in ulist_add_merge_ptr() argument 62 int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask); in ulist_add_merge_ptr() 66 return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask); in ulist_add_merge_ptr()
|
H A D | ulist.c | 96 struct ulist *ulist_alloc(gfp_t gfp_mask) in ulist_alloc() argument 98 struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask); in ulist_alloc() 192 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask) in ulist_add() argument 194 return ulist_add_merge(ulist, val, aux, NULL, gfp_mask); in ulist_add() 198 u64 *old_aux, gfp_t gfp_mask) in ulist_add_merge() argument 209 node = kmalloc(sizeof(*node), gfp_mask); in ulist_add_merge()
|
/linux/include/linux/sched/ |
H A D | mm.h | 289 extern void fs_reclaim_acquire(gfp_t gfp_mask); in memalloc_retry_wait() 290 extern void fs_reclaim_release(gfp_t gfp_mask); in memalloc_retry_wait() 294 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } in memalloc_retry_wait() 295 static inline void fs_reclaim_release(gfp_t gfp_mask) { } in memalloc_retry_wait() 326 * @gfp_mask: gfp_t flags that would be used to allocate in memalloc_flags_save() 332 static inline void might_alloc(gfp_t gfp_mask) in memalloc_flags_restore() 334 fs_reclaim_acquire(gfp_mask); in memalloc_flags_restore() 335 fs_reclaim_release(gfp_mask); 337 might_sleep_if(gfpflags_allow_blocking(gfp_mask)); 272 fs_reclaim_acquire(gfp_t gfp_mask) fs_reclaim_acquire() argument 273 fs_reclaim_release(gfp_t gfp_mask) fs_reclaim_release() argument 310 might_alloc(gfp_t gfp_mask) might_alloc() argument
|
/linux/net/sunrpc/auth_gss/ |
H A D | gss_krb5_keys.c | 152 const struct xdr_netobj *in_constant, gfp_t gfp_mask) in krb5_DK() argument 174 inblockdata = kmalloc(blocksize, gfp_mask); in krb5_DK() 178 outblockdata = kmalloc(blocksize, gfp_mask); in krb5_DK() 271 gfp_t gfp_mask) in krb5_derive_key_v2() argument 277 inblock.data = kmalloc(inblock.len, gfp_mask); in krb5_derive_key_v2() 281 ret = krb5_DK(gk5e, inkey, inblock.data, label, gfp_mask); in krb5_derive_key_v2() 372 gfp_t gfp_mask) in krb5_kdf_feedback_cmac() argument 401 step.data = kzalloc(step.len, gfp_mask); in krb5_kdf_feedback_cmac() 406 DR.data = kmalloc(DR.len, gfp_mask); in krb5_kdf_feedback_cmac() 504 gfp_t gfp_mask) in krb5_kdf_hmac_sha2() argument [all …]
|
H A D | gss_krb5_mech.c | 297 gss_krb5_import_ctx_v2(struct krb5_ctx *ctx, gfp_t gfp_mask) in gss_krb5_import_ctx_v2() argument 306 keyout.data = kmalloc(GSS_KRB5_MAX_KEYLEN, gfp_mask); in gss_krb5_import_ctx_v2() 313 KEY_USAGE_SEED_ENCRYPTION, gfp_mask)) in gss_krb5_import_ctx_v2() 329 KEY_USAGE_SEED_ENCRYPTION, gfp_mask)) in gss_krb5_import_ctx_v2() 346 KEY_USAGE_SEED_CHECKSUM, gfp_mask)) in gss_krb5_import_ctx_v2() 354 KEY_USAGE_SEED_CHECKSUM, gfp_mask)) in gss_krb5_import_ctx_v2() 363 KEY_USAGE_SEED_INTEGRITY, gfp_mask)) in gss_krb5_import_ctx_v2() 371 KEY_USAGE_SEED_INTEGRITY, gfp_mask)) in gss_krb5_import_ctx_v2() 396 gfp_t gfp_mask) in gss_import_v2_context() argument 447 gss_kerberos_mech.gm_oid.len, gfp_mask); in gss_import_v2_context() [all …]
|
H A D | gss_krb5_internal.h | 40 gfp_t gfp_mask); 110 gfp_t gfp_mask); 116 gfp_t gfp_mask); 122 gfp_t gfp_mask); 141 u32 usage, u8 seed, gfp_t gfp_mask) in krb5_derive_key() argument 153 return gk5e->derive_key(gk5e, inkey, outkey, &label, gfp_mask); in krb5_derive_key()
|
/linux/drivers/net/ethernet/mellanox/mlx4/ |
H A D | icm.c | 99 gfp_t gfp_mask, int node) in mlx4_alloc_icm_pages() argument 103 page = alloc_pages_node(node, gfp_mask, order); in mlx4_alloc_icm_pages() 105 page = alloc_pages(gfp_mask, order); in mlx4_alloc_icm_pages() 115 int order, gfp_t gfp_mask) in mlx4_alloc_icm_coherent() argument 118 &buf->dma_addr, gfp_mask); in mlx4_alloc_icm_coherent() 133 gfp_t gfp_mask, int coherent) in mlx4_alloc_icm() argument 142 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); in mlx4_alloc_icm() 145 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN), in mlx4_alloc_icm() 149 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); in mlx4_alloc_icm() 162 gfp_mask & ~(__GFP_HIGHMEM | in mlx4_alloc_icm() [all …]
|
/linux/lib/ |
H A D | generic-radix-tree.c | 80 static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask) in genradix_alloc_node() argument 82 return kzalloc(GENRADIX_NODE_SIZE, gfp_mask); in genradix_alloc_node() 95 gfp_t gfp_mask) in __genradix_ptr_alloc() argument 112 new_node = genradix_alloc_node(gfp_mask); in __genradix_ptr_alloc() 135 new_node = genradix_alloc_node(gfp_mask); in __genradix_ptr_alloc() 277 gfp_t gfp_mask) in __genradix_prealloc() argument 282 if (!__genradix_ptr_alloc(radix, offset, gfp_mask)) in __genradix_prealloc()
|
/linux/drivers/connector/ |
H A D | connector.c | 62 gfp_t gfp_mask, netlink_filter_fn filter, in cn_netlink_send_mult() argument 97 skb = nlmsg_new(size, gfp_mask); in cn_netlink_send_mult() 115 gfp_mask, filter, in cn_netlink_send_mult() 118 !gfpflags_allow_blocking(gfp_mask)); in cn_netlink_send_mult() 124 gfp_t gfp_mask) in cn_netlink_send() argument 126 return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask, in cn_netlink_send()
|
/linux/kernel/power/ |
H A D | snapshot.c | 191 static void *get_image_page(gfp_t gfp_mask, int safe_needed) in get_image_page() argument 195 res = (void *)get_zeroed_page(gfp_mask); in get_image_page() 201 res = (void *)get_zeroed_page(gfp_mask); in get_image_page() 210 static void *__get_safe_page(gfp_t gfp_mask) in __get_safe_page() argument 219 return get_image_page(gfp_mask, PG_SAFE); in __get_safe_page() 222 unsigned long get_safe_page(gfp_t gfp_mask) in get_safe_page() argument 224 return (unsigned long)__get_safe_page(gfp_mask); in get_safe_page() 227 static struct page *alloc_image_page(gfp_t gfp_mask) in alloc_image_page() argument 231 page = alloc_page(gfp_mask); in alloc_image_page() 297 gfp_t gfp_mask; /* mask for allocating pages */ member [all …]
|