/linux/mm/ |
H A D | mempool.c | 197 gfp_t gfp_mask, int node_id) in mempool_init_node() argument 207 gfp_mask, node_id); in mempool_init_node() 217 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_init_node() 259 * @gfp_mask: memory allocation flags 272 gfp_t gfp_mask, int node_id) in mempool_create() 276 pool = kmalloc_node_noprof(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id); in mempool_create_node() 281 gfp_mask, node_id)) { in mempool_create_node() 374 * @gfp_mask: the usual allocation bitmask. 384 void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) 391 VM_WARN_ON_ONCE(gfp_mask in mempool_alloc() 278 mempool_create_node(int min_nr,mempool_alloc_t * alloc_fn,mempool_free_t * free_fn,void * pool_data,gfp_t gfp_mask,int node_id) mempool_create_node() argument 390 mempool_alloc(mempool_t * pool,gfp_t gfp_mask) mempool_alloc() argument 561 mempool_alloc_slab(gfp_t gfp_mask,void * pool_data) mempool_alloc_slab() argument 580 mempool_kmalloc(gfp_t gfp_mask,void * pool_data) mempool_kmalloc() argument 593 mempool_kvmalloc(gfp_t gfp_mask,void * pool_data) mempool_kvmalloc() argument 610 mempool_alloc_pages(gfp_t gfp_mask,void * pool_data) mempool_alloc_pages() argument [all...] |
H A D | page_owner.c | 27 gfp_t gfp_mask; member 166 gfp_t gfp_mask) in add_stack_record_to_list() argument 172 stack = kmalloc(sizeof(*stack), gfp_nested_mask(gfp_mask)); in add_stack_record_to_list() 194 static void inc_stack_record_count(depot_stack_handle_t handle, gfp_t gfp_mask, in add_stack_record_to_list() 214 add_stack_record_to_list(stack_record, gfp_mask); in inc_stack_record_count() 235 gfp_t gfp_mask, in dec_stack_record_count() 246 page_owner->gfp_mask = gfp_mask; in __update_page_owner_handle() 314 gfp_t gfp_mask) in __reset_page_owner() 320 handle = save_stack(gfp_mask); in __set_page_owner() 199 inc_stack_record_count(depot_stack_handle_t handle,gfp_t gfp_mask,int nr_base_pages) inc_stack_record_count() argument 240 __update_page_owner_handle(struct page_ext * page_ext,depot_stack_handle_t handle,unsigned short order,gfp_t gfp_mask,short last_migrate_reason,u64 ts_nsec,pid_t pid,pid_t tgid,char * comm) __update_page_owner_handle() argument 319 __set_page_owner(struct page * page,unsigned short order,gfp_t gfp_mask) __set_page_owner() argument 603 gfp_t gfp_mask; __dump_page_owner() local [all...] |
H A D | page_alloc.c | 3211 unsigned int alloc_flags, gfp_t gfp_mask) in zone_watermark_fast() argument 3290 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument 3298 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); in alloc_flags_nofragment() 3322 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, in gfp_to_alloc_flags_cma() argument 3326 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) in gfp_to_alloc_flags_cma() 3337 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument 3360 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist() 3422 gfp_mask)) in get_page_from_freelist() 3431 gfp_mask)) { in get_page_from_freelist() 3454 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist() [all …]
|
H A D | fail_page_alloc.c | 26 bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument 32 if (gfp_mask & __GFP_NOFAIL) in should_fail_alloc_page() 34 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in should_fail_alloc_page() 37 (gfp_mask & __GFP_DIRECT_RECLAIM)) in should_fail_alloc_page() 41 if (gfp_mask & __GFP_NOWARN) in should_fail_alloc_page()
|
H A D | swap_state.c | 432 struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in __read_swap_cache_async() argument 477 new_folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id()); in __read_swap_cache_async() 518 if (mem_cgroup_swapin_charge_folio(new_folio, NULL, gfp_mask, entry)) in __read_swap_cache_async() 522 if (add_to_swap_cache(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) in __read_swap_cache_async() 558 struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in read_swap_cache_async() argument 568 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, in read_swap_cache_async() 641 * @gfp_mask: memory allocation flags 656 struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, in swap_cluster_readahead() argument 686 gfp_mask, mpol, ilx, &page_allocated, false); in swap_cluster_readahead() 703 folio = __read_swap_cache_async(entry, gfp_mask, mpo in swap_cluster_readahead() 799 swap_vma_readahead(swp_entry_t targ_entry,gfp_t gfp_mask,struct mempolicy * mpol,pgoff_t targ_ilx,struct vm_fault * vmf) swap_vma_readahead() argument 872 swapin_readahead(swp_entry_t entry,gfp_t gfp_mask,struct vm_fault * vmf) swapin_readahead() argument [all...] |
H A D | memcontrol-v1.h | 10 int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, 13 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, in try_charge() argument 19 return try_charge_memcg(memcg, gfp_mask, nr_pages); in try_charge() 111 gfp_t gfp_mask); 148 gfp_t gfp_mask) { return true; } in memcg1_charge_skmem() argument
|
H A D | swap.h | 68 struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 133 gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx) in swap_cluster_readahead() argument 138 static inline struct folio *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, in swapin_readahead() argument 177 gfp_t gfp_mask, void **shadowp) in add_to_swap_cache() argument
|
/linux/block/ |
H A D | blk-lib.c | 39 sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask) in blk_alloc_discard_bio() argument 47 bio = bio_alloc(bdev, 0, REQ_OP_DISCARD, gfp_mask); in blk_alloc_discard_bio() 64 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) in __blkdev_issue_discard() argument 69 gfp_mask))) in __blkdev_issue_discard() 86 sector_t nr_sects, gfp_t gfp_mask) in blkdev_issue_discard() argument 93 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio); in blkdev_issue_discard() 122 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_write_zeroes() argument 134 bio = bio_alloc(bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask); in __blkdev_issue_write_zeroes() 196 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_zero_pages() argument 203 bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask); in __blkdev_issue_zero_pages() [all …]
|
H A D | blk-crypto.c | 92 const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask) in bio_crypt_set_ctx() argument 100 WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM)); in bio_crypt_set_ctx() 102 bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in bio_crypt_set_ctx() 116 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) in __bio_crypt_clone() argument 118 dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in __bio_crypt_clone() 304 gfp_t gfp_mask) in __blk_crypto_rq_bio_prep() argument 307 rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); in __blk_crypto_rq_bio_prep()
|
/linux/fs/nfs/blocklayout/ |
H A D | dev.c | 289 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask); 294 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_simple() argument 300 dev = bl_resolve_deviceid(server, v, gfp_mask); in bl_parse_simple() 385 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_scsi() argument 439 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_slice() argument 444 ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask); in bl_parse_slice() 455 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_concat() argument 462 sizeof(struct pnfs_block_dev), gfp_mask); in bl_parse_concat() 468 volumes, v->concat.volumes[i], gfp_mask); in bl_parse_concat() 484 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_stripe() argument [all …]
|
/linux/fs/btrfs/ |
H A D | ulist.h | 50 struct ulist *ulist_alloc(gfp_t gfp_mask); 53 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask); 55 u64 *old_aux, gfp_t gfp_mask); 60 void **old_aux, gfp_t gfp_mask) in ulist_add_merge_ptr() argument 64 int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask); in ulist_add_merge_ptr() 68 return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask); in ulist_add_merge_ptr()
|
H A D | ulist.c | 99 struct ulist *ulist_alloc(gfp_t gfp_mask) in ulist_alloc() argument 101 struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask); in ulist_alloc() 111 void ulist_prealloc(struct ulist *ulist, gfp_t gfp_mask) in ulist_prealloc() argument 114 ulist->prealloc = kzalloc(sizeof(*ulist->prealloc), gfp_mask); in ulist_prealloc() 201 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask) in ulist_add() argument 203 return ulist_add_merge(ulist, val, aux, NULL, gfp_mask); in ulist_add() 207 u64 *old_aux, gfp_t gfp_mask) in ulist_add_merge() argument 223 node = kmalloc(sizeof(*node), gfp_mask); in ulist_add_merge()
|
/linux/include/linux/ |
H A D | cpuset.h | 85 extern bool cpuset_node_allowed(int node, gfp_t gfp_mask); 87 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument 89 return cpuset_node_allowed(zone_to_nid(z), gfp_mask); in __cpuset_zone_allowed() 92 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument 95 return __cpuset_zone_allowed(z, gfp_mask); in cpuset_zone_allowed() 224 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument 229 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument
|
H A D | blk-crypto.h | 84 gfp_t gfp_mask); 115 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask); 128 gfp_t gfp_mask) in bio_crypt_clone() argument 131 return __bio_crypt_clone(dst, src, gfp_mask); in bio_crypt_clone()
|
H A D | page_owner.h | 13 unsigned short order, gfp_t gfp_mask); 29 unsigned short order, gfp_t gfp_mask) in set_page_owner() argument 32 __set_page_owner(page, order, gfp_mask); in set_page_owner() 61 unsigned short order, gfp_t gfp_mask) in set_page_owner() argument
|
/linux/include/linux/sched/ |
H A D | mm.h | 273 extern void fs_reclaim_acquire(gfp_t gfp_mask); 274 extern void fs_reclaim_release(gfp_t gfp_mask); 278 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } in fs_reclaim_acquire() argument 279 static inline void fs_reclaim_release(gfp_t gfp_mask) { } in fs_reclaim_release() argument 316 static inline void might_alloc(gfp_t gfp_mask) in might_alloc() argument 318 fs_reclaim_acquire(gfp_mask); in might_alloc() 319 fs_reclaim_release(gfp_mask); in might_alloc() 321 might_sleep_if(gfpflags_allow_blocking(gfp_mask)); in might_alloc()
|
/linux/net/sunrpc/auth_gss/ |
H A D | gss_krb5_keys.c | 152 const struct xdr_netobj *in_constant, gfp_t gfp_mask) in krb5_DK() argument 174 inblockdata = kmalloc(blocksize, gfp_mask); in krb5_DK() 178 outblockdata = kmalloc(blocksize, gfp_mask); in krb5_DK() 271 gfp_t gfp_mask) in krb5_derive_key_v2() argument 277 inblock.data = kmalloc(inblock.len, gfp_mask); in krb5_derive_key_v2() 281 ret = krb5_DK(gk5e, inkey, inblock.data, label, gfp_mask); in krb5_derive_key_v2() 372 gfp_t gfp_mask) in krb5_kdf_feedback_cmac() argument 401 step.data = kzalloc(step.len, gfp_mask); in krb5_kdf_feedback_cmac() 406 DR.data = kmalloc(DR.len, gfp_mask); in krb5_kdf_feedback_cmac() 504 gfp_t gfp_mask) in krb5_kdf_hmac_sha2() argument [all …]
|
H A D | gss_krb5_mech.c | 297 gss_krb5_import_ctx_v2(struct krb5_ctx *ctx, gfp_t gfp_mask) in gss_krb5_import_ctx_v2() argument 306 keyout.data = kmalloc(GSS_KRB5_MAX_KEYLEN, gfp_mask); in gss_krb5_import_ctx_v2() 313 KEY_USAGE_SEED_ENCRYPTION, gfp_mask)) in gss_krb5_import_ctx_v2() 329 KEY_USAGE_SEED_ENCRYPTION, gfp_mask)) in gss_krb5_import_ctx_v2() 346 KEY_USAGE_SEED_CHECKSUM, gfp_mask)) in gss_krb5_import_ctx_v2() 354 KEY_USAGE_SEED_CHECKSUM, gfp_mask)) in gss_krb5_import_ctx_v2() 363 KEY_USAGE_SEED_INTEGRITY, gfp_mask)) in gss_krb5_import_ctx_v2() 371 KEY_USAGE_SEED_INTEGRITY, gfp_mask)) in gss_krb5_import_ctx_v2() 396 gfp_t gfp_mask) in gss_import_v2_context() argument 447 gss_kerberos_mech.gm_oid.len, gfp_mask); in gss_import_v2_context() [all …]
|
H A D | gss_krb5_internal.h | 40 gfp_t gfp_mask); 110 gfp_t gfp_mask); 116 gfp_t gfp_mask); 122 gfp_t gfp_mask); 141 u32 usage, u8 seed, gfp_t gfp_mask) in krb5_derive_key() argument 153 return gk5e->derive_key(gk5e, inkey, outkey, &label, gfp_mask); in krb5_derive_key()
|
/linux/drivers/net/ethernet/mellanox/mlx4/ |
H A D | icm.c | 99 gfp_t gfp_mask, int node) in mlx4_alloc_icm_pages() argument 103 page = alloc_pages_node(node, gfp_mask, order); in mlx4_alloc_icm_pages() 105 page = alloc_pages(gfp_mask, order); in mlx4_alloc_icm_pages() 115 int order, gfp_t gfp_mask) in mlx4_alloc_icm_coherent() argument 118 &buf->dma_addr, gfp_mask); in mlx4_alloc_icm_coherent() 133 gfp_t gfp_mask, int coherent) in mlx4_alloc_icm() argument 142 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); in mlx4_alloc_icm() 145 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN), in mlx4_alloc_icm() 149 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); in mlx4_alloc_icm() 162 gfp_mask & ~(__GFP_HIGHMEM | in mlx4_alloc_icm() [all …]
|
/linux/drivers/connector/ |
H A D | connector.c | 62 gfp_t gfp_mask, netlink_filter_fn filter, in cn_netlink_send_mult() argument 97 skb = nlmsg_new(size, gfp_mask); in cn_netlink_send_mult() 115 gfp_mask, filter, in cn_netlink_send_mult() 118 !gfpflags_allow_blocking(gfp_mask)); in cn_netlink_send_mult() 124 gfp_t gfp_mask) in cn_netlink_send() argument 126 return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask, in cn_netlink_send()
|
/linux/kernel/power/ |
H A D | snapshot.c | 191 static void *get_image_page(gfp_t gfp_mask, int safe_needed) in get_image_page() argument 195 res = (void *)get_zeroed_page(gfp_mask); in get_image_page() 201 res = (void *)get_zeroed_page(gfp_mask); in get_image_page() 210 static void *__get_safe_page(gfp_t gfp_mask) in __get_safe_page() argument 219 return get_image_page(gfp_mask, PG_SAFE); in __get_safe_page() 222 unsigned long get_safe_page(gfp_t gfp_mask) in get_safe_page() argument 224 return (unsigned long)__get_safe_page(gfp_mask); in get_safe_page() 227 static struct page *alloc_image_page(gfp_t gfp_mask) in alloc_image_page() argument 231 page = alloc_page(gfp_mask); in alloc_image_page() 297 gfp_t gfp_mask; /* mask for allocating pages */ member [all …]
|
/linux/lib/ |
H A D | generic-radix-tree.c | 24 gfp_t gfp_mask) in __genradix_ptr_alloc() argument 44 new_node = genradix_alloc_node(gfp_mask); in __genradix_ptr_alloc() 69 new_node = genradix_alloc_node(gfp_mask); in __genradix_ptr_alloc() 211 gfp_t gfp_mask) in __genradix_prealloc() argument 216 if (!__genradix_ptr_alloc(radix, offset, NULL, gfp_mask)) in __genradix_prealloc()
|
H A D | radix-tree.c | 233 radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent, in radix_tree_node_alloc() argument 245 if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) { in radix_tree_node_alloc() 254 gfp_mask | __GFP_NOWARN); in radix_tree_node_alloc() 276 ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); in radix_tree_node_alloc() 322 static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) in __radix_tree_preload() argument 332 gfp_mask &= ~__GFP_ACCOUNT; in __radix_tree_preload() 338 node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); in __radix_tree_preload() 365 int radix_tree_preload(gfp_t gfp_mask) in radix_tree_preload() argument 368 WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask)); in radix_tree_preload() 369 return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); in radix_tree_preload() [all …]
|
/linux/include/trace/events/ |
H A D | compaction.h | 169 gfp_t gfp_mask, 172 TP_ARGS(order, gfp_mask, prio), 176 __field(unsigned long, gfp_mask) 182 __entry->gfp_mask = (__force unsigned long)gfp_mask; 188 show_gfp_flags(__entry->gfp_mask),
|