| /linux/include/linux/ |
| H A D | gfp.h | 261 static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) in warn_if_node_offline() argument 263 gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN); in warn_if_node_offline() 271 pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node); in warn_if_node_offline() 280 __alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node_noprof() argument 283 warn_if_node_offline(nid, gfp_mask); in __alloc_pages_node_noprof() 285 return __alloc_pages_noprof(gfp_mask, order, nid, NULL); in __alloc_pages_node_noprof() 306 static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask, in alloc_pages_node_noprof() argument 312 return __alloc_pages_node_noprof(nid, gfp_mask, order); in alloc_pages_node_noprof() 325 static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order) in alloc_pages_noprof() argument 327 return alloc_pages_node_noprof(numa_node_id(), gfp_mask, order); in alloc_pages_noprof() [all …]
|
| H A D | mempool.h | 15 typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); 43 void *pool_data, gfp_t gfp_mask, int node_id); 54 void *pool_data, gfp_t gfp_mask, int nid); 65 void *mempool_alloc_noprof(struct mempool *pool, gfp_t gfp_mask) __malloc; 83 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); 95 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); 109 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
|
| H A D | cpuset.h | 86 extern bool cpuset_current_node_allowed(int node, gfp_t gfp_mask); 88 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument 90 return cpuset_current_node_allowed(zone_to_nid(z), gfp_mask); in __cpuset_zone_allowed() 93 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument 96 return __cpuset_zone_allowed(z, gfp_mask); in cpuset_zone_allowed() 234 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument 239 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument
|
| H A D | blk-crypto.h | 137 gfp_t gfp_mask); 174 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask); 187 gfp_t gfp_mask) in bio_crypt_clone() argument 190 return __bio_crypt_clone(dst, src, gfp_mask); in bio_crypt_clone()
|
| H A D | vmalloc.h | 171 extern void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1); 175 unsigned long start, unsigned long end, gfp_t gfp_mask, 180 void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask, 184 void *vmalloc_huge_node_noprof(unsigned long size, gfp_t gfp_mask, int node) __alloc_size(1); 187 static inline void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) in vmalloc_huge() argument 189 return vmalloc_huge_node(size, gfp_mask, NUMA_NO_NODE); in vmalloc_huge() 335 unsigned int memalloc_apply_gfp_scope(gfp_t gfp_mask);
|
| /linux/mm/ |
| H A D | mempool.c | 235 void *pool_data, gfp_t gfp_mask, int node_id) in mempool_init_node() argument 248 gfp_mask, node_id); in mempool_init_node() 259 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_init_node() 315 void *pool_data, gfp_t gfp_mask, int node_id) in mempool_create_node_noprof() argument 319 pool = kmalloc_node_noprof(sizeof(*pool), gfp_mask | __GFP_ZERO, node_id); in mempool_create_node_noprof() 324 gfp_mask, node_id)) { in mempool_create_node_noprof() 415 gfp_t gfp_mask) in mempool_alloc_from_pool() argument 443 if (gfp_mask & __GFP_DIRECT_RECLAIM) { in mempool_alloc_from_pool() 472 static inline gfp_t mempool_adjust_gfp(gfp_t *gfp_mask) in mempool_adjust_gfp() argument 474 *gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; in mempool_adjust_gfp() [all …]
|
| H A D | page_alloc.c | 3642 unsigned int alloc_flags, gfp_t gfp_mask) in zone_watermark_fast() argument 3709 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument 3717 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); in alloc_flags_nofragment() 3746 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, in gfp_to_alloc_flags_cma() argument 3750 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) in gfp_to_alloc_flags_cma() 3761 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument 3786 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist() 3861 gfp_mask)) in get_page_from_freelist() 3870 gfp_mask)) { in get_page_from_freelist() 3893 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist() [all …]
|
| H A D | page_owner.c | 27 gfp_t gfp_mask; member 175 gfp_t gfp_mask) in add_stack_record_to_list() argument 180 if (!gfpflags_allow_spinning(gfp_mask)) in add_stack_record_to_list() 184 stack = kmalloc(sizeof(*stack), gfp_nested_mask(gfp_mask)); in add_stack_record_to_list() 206 static void inc_stack_record_count(depot_stack_handle_t handle, gfp_t gfp_mask, in inc_stack_record_count() argument 226 add_stack_record_to_list(stack_record, gfp_mask); in inc_stack_record_count() 247 gfp_t gfp_mask, in __update_page_owner_handle() argument 260 page_owner->gfp_mask = gfp_mask; in __update_page_owner_handle() 336 gfp_t gfp_mask) in __set_page_owner() argument 341 handle = save_stack(gfp_mask); in __set_page_owner() [all …]
|
| H A D | fail_page_alloc.c | 26 bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument 32 if (gfp_mask & __GFP_NOFAIL) in should_fail_alloc_page() 34 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in should_fail_alloc_page() 37 (gfp_mask & __GFP_DIRECT_RECLAIM)) in should_fail_alloc_page() 41 if (gfp_mask & __GFP_NOWARN) in should_fail_alloc_page()
|
| H A D | swap_state.c | 405 struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in __read_swap_cache_async() argument 440 new_folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id()); in __read_swap_cache_async() 481 if (mem_cgroup_swapin_charge_folio(new_folio, NULL, gfp_mask, entry)) in __read_swap_cache_async() 513 struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in read_swap_cache_async() argument 528 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, in read_swap_cache_async() 618 struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, in swap_cluster_readahead() argument 648 gfp_mask, mpol, ilx, &page_allocated, false); in swap_cluster_readahead() 665 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, in swap_cluster_readahead() 726 static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask, in swap_vma_readahead() argument 770 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, in swap_vma_readahead() [all …]
|
| H A D | vmalloc.c | 682 gfp_t gfp_mask) in vmap_pages_range_noflush() argument 685 page_shift, gfp_mask); in vmap_pages_range_noflush() 694 gfp_t gfp_mask) in __vmap_pages_range() argument 698 err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift, gfp_mask); in __vmap_pages_range() 1889 preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node) in preload_this_cpu_lock() argument 1903 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); in preload_this_cpu_lock() 2027 int node, gfp_t gfp_mask, in alloc_vmap_area() argument 2046 gfp_mask = gfp_mask & GFP_RECLAIM_MASK; in alloc_vmap_area() 2047 allow_block = gfpflags_allow_blocking(gfp_mask); in alloc_vmap_area() 2060 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); in alloc_vmap_area() [all …]
|
| H A D | readahead.c | 185 gfp_t gfp_mask, unsigned int order) in ractl_alloc_folio() argument 189 folio = filemap_alloc_folio(gfp_mask, order, NULL); in ractl_alloc_folio() 215 gfp_t gfp_mask = readahead_gfp_mask(mapping); in page_cache_ra_unbounded() local 275 folio = ractl_alloc_folio(ractl, gfp_mask, in page_cache_ra_unbounded() 280 ret = filemap_add_folio(mapping, folio, index + i, gfp_mask); in page_cache_ra_unbounded() 769 gfp_t gfp_mask = readahead_gfp_mask(mapping); in readahead_expand() local 788 folio = ractl_alloc_folio(ractl, gfp_mask, min_order); in readahead_expand() 793 if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) { in readahead_expand() 817 folio = ractl_alloc_folio(ractl, gfp_mask, min_order); in readahead_expand() 822 if (filemap_add_folio(mapping, folio, index, gfp_mask) < 0) { in readahead_expand()
|
| /linux/block/ |
| H A D | blk-map.c | 22 gfp_t gfp_mask) in bio_alloc_map_data() argument 29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); in bio_alloc_map_data() 46 unsigned int nr_vecs, gfp_t gfp_mask) in blk_rq_map_bio_alloc() argument 51 bio = bio_alloc_bioset(bdev, nr_vecs, rq->cmd_flags, gfp_mask, in blk_rq_map_bio_alloc() 151 struct iov_iter *iter, gfp_t gfp_mask) in bio_copy_user_iov() argument 161 bmd = bio_alloc_map_data(iter, gfp_mask); in bio_copy_user_iov() 176 bio = blk_rq_map_bio_alloc(rq, nr_pages, gfp_mask); in bio_copy_user_iov() 203 page = alloc_page(GFP_NOIO | gfp_mask); in bio_copy_user_iov() 261 gfp_t gfp_mask) in bio_map_user_iov() argument 270 bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask); in bio_map_user_iov() [all …]
|
| H A D | blk-lib.c | 39 sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask) in blk_alloc_discard_bio() argument 47 bio = bio_alloc(bdev, 0, REQ_OP_DISCARD, gfp_mask); in blk_alloc_discard_bio() 64 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) in __blkdev_issue_discard() argument 69 gfp_mask))) in __blkdev_issue_discard() 86 sector_t nr_sects, gfp_t gfp_mask) in blkdev_issue_discard() argument 93 __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio); in blkdev_issue_discard() 122 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_write_zeroes() argument 134 bio = bio_alloc(bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask); in __blkdev_issue_write_zeroes() 196 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_zero_pages() argument 209 bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask); in __blkdev_issue_zero_pages() [all …]
|
| /linux/fs/nfs/blocklayout/ |
| H A D | dev.c | 290 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask); 295 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_simple() argument 301 dev = bl_resolve_deviceid(server, v, gfp_mask); in bl_parse_simple() 386 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_scsi() argument 440 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_slice() argument 445 ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask); in bl_parse_slice() 456 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_concat() argument 463 sizeof(struct pnfs_block_dev), gfp_mask); in bl_parse_concat() 469 volumes, v->concat.volumes[i], gfp_mask); in bl_parse_concat() 485 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_stripe() argument [all …]
|
| /linux/fs/btrfs/ |
| H A D | ulist.h | 50 struct ulist *ulist_alloc(gfp_t gfp_mask); 53 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask); 55 u64 *old_aux, gfp_t gfp_mask); 60 void **old_aux, gfp_t gfp_mask) in ulist_add_merge_ptr() argument 64 int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask); in ulist_add_merge_ptr() 68 return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask); in ulist_add_merge_ptr()
|
| H A D | ulist.c | 99 struct ulist *ulist_alloc(gfp_t gfp_mask) in ulist_alloc() argument 101 struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask); in ulist_alloc() 111 void ulist_prealloc(struct ulist *ulist, gfp_t gfp_mask) in ulist_prealloc() argument 114 ulist->prealloc = kzalloc(sizeof(*ulist->prealloc), gfp_mask); in ulist_prealloc() 200 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask) in ulist_add() argument 202 return ulist_add_merge(ulist, val, aux, NULL, gfp_mask); in ulist_add() 206 u64 *old_aux, gfp_t gfp_mask) in ulist_add_merge() argument 222 node = kmalloc(sizeof(*node), gfp_mask); in ulist_add_merge()
|
| /linux/include/linux/sched/ |
| H A D | mm.h | 272 extern void fs_reclaim_acquire(gfp_t gfp_mask); 273 extern void fs_reclaim_release(gfp_t gfp_mask); 277 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } in fs_reclaim_acquire() argument 278 static inline void fs_reclaim_release(gfp_t gfp_mask) { } in fs_reclaim_release() argument 315 static inline void might_alloc(gfp_t gfp_mask) in might_alloc() argument 317 fs_reclaim_acquire(gfp_mask); in might_alloc() 318 fs_reclaim_release(gfp_mask); in might_alloc() 323 might_sleep_if(gfpflags_allow_blocking(gfp_mask)); in might_alloc()
|
| /linux/net/sunrpc/auth_gss/ |
| H A D | gss_krb5_keys.c | 152 const struct xdr_netobj *in_constant, gfp_t gfp_mask) in krb5_DK() argument 174 inblockdata = kmalloc(blocksize, gfp_mask); in krb5_DK() 178 outblockdata = kmalloc(blocksize, gfp_mask); in krb5_DK() 271 gfp_t gfp_mask) in krb5_derive_key_v2() argument 277 inblock.data = kmalloc(inblock.len, gfp_mask); in krb5_derive_key_v2() 281 ret = krb5_DK(gk5e, inkey, inblock.data, label, gfp_mask); in krb5_derive_key_v2() 372 gfp_t gfp_mask) in krb5_kdf_feedback_cmac() argument 401 step.data = kzalloc(step.len, gfp_mask); in krb5_kdf_feedback_cmac() 406 DR.data = kmalloc(DR.len, gfp_mask); in krb5_kdf_feedback_cmac() 504 gfp_t gfp_mask) in krb5_kdf_hmac_sha2() argument [all …]
|
| H A D | gss_krb5_mech.c | 297 gss_krb5_import_ctx_v2(struct krb5_ctx *ctx, gfp_t gfp_mask) in gss_krb5_import_ctx_v2() argument 306 keyout.data = kmalloc(GSS_KRB5_MAX_KEYLEN, gfp_mask); in gss_krb5_import_ctx_v2() 313 KEY_USAGE_SEED_ENCRYPTION, gfp_mask)) in gss_krb5_import_ctx_v2() 329 KEY_USAGE_SEED_ENCRYPTION, gfp_mask)) in gss_krb5_import_ctx_v2() 346 KEY_USAGE_SEED_CHECKSUM, gfp_mask)) in gss_krb5_import_ctx_v2() 354 KEY_USAGE_SEED_CHECKSUM, gfp_mask)) in gss_krb5_import_ctx_v2() 363 KEY_USAGE_SEED_INTEGRITY, gfp_mask)) in gss_krb5_import_ctx_v2() 371 KEY_USAGE_SEED_INTEGRITY, gfp_mask)) in gss_krb5_import_ctx_v2() 396 gfp_t gfp_mask) in gss_import_v2_context() argument 447 gss_kerberos_mech.gm_oid.len, gfp_mask); in gss_import_v2_context() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx4/ |
| H A D | icm.c | 99 gfp_t gfp_mask, int node) in mlx4_alloc_icm_pages() argument 103 page = alloc_pages_node(node, gfp_mask, order); in mlx4_alloc_icm_pages() 105 page = alloc_pages(gfp_mask, order); in mlx4_alloc_icm_pages() 115 int order, gfp_t gfp_mask) in mlx4_alloc_icm_coherent() argument 118 &buf->dma_addr, gfp_mask); in mlx4_alloc_icm_coherent() 133 gfp_t gfp_mask, int coherent) in mlx4_alloc_icm() argument 142 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); in mlx4_alloc_icm() 145 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN), in mlx4_alloc_icm() 149 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); in mlx4_alloc_icm() 162 gfp_mask & ~(__GFP_HIGHMEM | in mlx4_alloc_icm() [all …]
|
| /linux/mm/kasan/ |
| H A D | shadow.c | 339 static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask) in ___alloc_pages_bulk() argument 345 nr_populated = alloc_pages_bulk(gfp_mask, nr_pages, pages); in ___alloc_pages_bulk() 357 static int __kasan_populate_vmalloc_do(unsigned long start, unsigned long end, gfp_t gfp_mask) in __kasan_populate_vmalloc_do() argument 364 data.pages = (struct page **)__get_free_page(gfp_mask | __GFP_ZERO); in __kasan_populate_vmalloc_do() 370 ret = ___alloc_pages_bulk(data.pages, nr_pages, gfp_mask); in __kasan_populate_vmalloc_do() 380 flags = memalloc_apply_gfp_scope(gfp_mask); in __kasan_populate_vmalloc_do() 398 int __kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask) in __kasan_populate_vmalloc() argument 424 ret = __kasan_populate_vmalloc_do(shadow_start, shadow_end, gfp_mask); in __kasan_populate_vmalloc() 656 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) in kasan_alloc_module_shadow() argument 689 kmemleak_vmalloc(vm, size, gfp_mask); in kasan_alloc_module_shadow()
|
| /linux/drivers/connector/ |
| H A D | connector.c | 62 gfp_t gfp_mask, netlink_filter_fn filter, in cn_netlink_send_mult() argument 97 skb = nlmsg_new(size, gfp_mask); in cn_netlink_send_mult() 115 gfp_mask, filter, in cn_netlink_send_mult() 118 !gfpflags_allow_blocking(gfp_mask)); in cn_netlink_send_mult() 124 gfp_t gfp_mask) in cn_netlink_send() argument 126 return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask, in cn_netlink_send()
|
| /linux/tools/testing/selftests/net/bench/page_pool/ |
| H A D | bench_page_pool_simple.c | 109 gfp_t gfp_mask = GFP_ATOMIC; in pp_fill_ptr_ring() local 113 array = kcalloc(elems, sizeof(struct page *), gfp_mask); in pp_fill_ptr_ring() 116 array[i] = page_pool_alloc_pages(pp, gfp_mask); in pp_fill_ptr_ring() 130 gfp_t gfp_mask = GFP_ATOMIC; /* GFP_ATOMIC is not really needed */ in time_bench_page_pool() local 162 page = page_pool_alloc_pages(pp, gfp_mask); in time_bench_page_pool()
|
| /linux/lib/ |
| H A D | generic-radix-tree.c | 24 gfp_t gfp_mask) in __genradix_ptr_alloc() argument 44 new_node = genradix_alloc_node(gfp_mask); in __genradix_ptr_alloc() 69 new_node = genradix_alloc_node(gfp_mask); in __genradix_ptr_alloc() 211 gfp_t gfp_mask) in __genradix_prealloc() argument 216 if (!__genradix_ptr_alloc(radix, offset, NULL, gfp_mask)) in __genradix_prealloc()
|