| /linux/fs/btrfs/ |
| H A D | extent-io-tree.c | 166 static struct extent_state *alloc_extent_state_atomic(struct extent_state *prealloc) in alloc_extent_state_atomic() argument 168 if (!prealloc) in alloc_extent_state_atomic() 169 prealloc = alloc_extent_state(GFP_ATOMIC); in alloc_extent_state_atomic() 171 return prealloc; in alloc_extent_state_atomic() 488 * struct 'prealloc' as the newly created second half. 'split' indicates an 494 * prealloc: [orig->start, split - 1] 501 struct extent_state *prealloc, u64 split) 509 prealloc->start = orig->start; in split_state() 510 prealloc->end = split - 1; in split_state() 511 prealloc in split_state() 504 split_state(struct extent_io_tree * tree,struct extent_state * orig,struct extent_state * prealloc,u64 split) split_state() argument 618 struct extent_state *prealloc = NULL; btrfs_clear_extent_bit_changeset() local 1053 struct extent_state *prealloc = NULL; set_extent_bit() local 1323 struct extent_state *prealloc = NULL; btrfs_convert_extent_bit() local [all...] |
| H A D | ulist.c | 53 ulist->prealloc = NULL; in ulist_init() 72 kfree(ulist->prealloc); in ulist_release() 73 ulist->prealloc = NULL; in ulist_release() 113 if (!ulist->prealloc) in ulist_prealloc() 114 ulist->prealloc = kzalloc(sizeof(*ulist->prealloc), gfp_mask); in ulist_prealloc() 218 if (ulist->prealloc) { in ulist_add_merge() 219 node = ulist->prealloc; in ulist_add_merge() 220 ulist->prealloc = NULL; in ulist_add_merge()
|
| H A D | qgroup.c | 196 * Must be called with qgroup_lock held and @prealloc preallocated. 198 * The control on the lifespan of @prealloc would be transferred to this 199 * function, thus caller should no longer touch @prealloc. 202 struct btrfs_qgroup *prealloc, in add_qgroup_rb() argument 207 /* Caller must have pre-allocated @prealloc. */ in add_qgroup_rb() 208 ASSERT(prealloc); in add_qgroup_rb() 210 prealloc->qgroupid = qgroupid; in add_qgroup_rb() 211 node = rb_find_add(&prealloc->node, &fs_info->qgroup_tree, btrfs_qgroup_qgroupid_cmp); in add_qgroup_rb() 213 kfree(prealloc); in add_qgroup_rb() 217 INIT_LIST_HEAD(&prealloc in add_qgroup_rb() 271 __add_relation_rb(struct btrfs_qgroup_list * prealloc,struct btrfs_qgroup * member,struct btrfs_qgroup * parent) __add_relation_rb() argument 298 add_relation_rb(struct btrfs_fs_info * fs_info,struct btrfs_qgroup_list * prealloc,u64 memberid,u64 parentid) add_relation_rb() argument 459 struct btrfs_qgroup *prealloc; btrfs_read_qgroup_config() local 978 struct btrfs_qgroup *prealloc = NULL; btrfs_quota_enable() local 1511 btrfs_add_qgroup_relation(struct btrfs_trans_handle * trans,u64 src,u64 dst,struct btrfs_qgroup_list * prealloc) btrfs_add_qgroup_relation() argument 1645 struct btrfs_qgroup *prealloc = NULL; btrfs_create_qgroup() local 3287 struct btrfs_qgroup *prealloc; btrfs_qgroup_inherit() local [all...] |
| H A D | ulist.h | 44 struct ulist_node *prealloc; member
|
| H A D | extent_io.c | 871 struct btrfs_folio_state *prealloc) in attach_extent_buffer_folio() 893 /* Already mapped, just free prealloc */ in attach_extent_buffer_folio() 895 btrfs_free_folio_state(prealloc); in attach_extent_buffer_folio() 899 if (prealloc) in attach_extent_buffer_folio() 901 folio_attach_private(folio, prealloc); in attach_extent_buffer_folio() 3328 struct btrfs_folio_state *prealloc, in attach_eb_folio_to_filemap() 3389 ret = attach_extent_buffer_folio(eb, eb->folios[i], prealloc); in alloc_extent_buffer() 3411 struct btrfs_folio_state *prealloc = NULL; in alloc_extent_buffer() 3456 prealloc = btrfs_alloc_folio_state(fs_info, PAGE_SIZE, BTRFS_SUBPAGE_METADATA); in alloc_extent_buffer() 3457 if (IS_ERR(prealloc)) { in alloc_extent_buffer() 867 attach_extent_buffer_folio(struct extent_buffer * eb,struct folio * folio,struct btrfs_folio_state * prealloc) attach_extent_buffer_folio() argument 3296 attach_eb_folio_to_filemap(struct extent_buffer * eb,int i,struct btrfs_folio_state * prealloc,struct extent_buffer ** found_eb_ret) attach_eb_folio_to_filemap() argument 3379 struct btrfs_folio_state *prealloc = NULL; alloc_extent_buffer() local [all...] |
| H A D | qgroup.h | 342 struct btrfs_qgroup_list *prealloc);
|
| H A D | ioctl.c | 3553 * because of rename. With relocation we can prealloc extents, in btrfs_ioctl_balance_ctl() 3585 struct btrfs_qgroup_list *prealloc = NULL; in btrfs_ioctl_balance_progress() 3607 prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL); in btrfs_ioctl_quota_ctl() 3608 if (!prealloc) { in btrfs_ioctl_quota_ctl() 3625 ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst, prealloc); in btrfs_ioctl_quota_ctl() 3626 prealloc = NULL; in btrfs_ioctl_quota_ctl() 3644 kfree(prealloc); in btrfs_ioctl_quota_ctl() 3664 struct btrfs_qgroup_list *prealloc = NULL; btrfs_ioctl_qgroup_assign() local
|
| /linux/lib/ |
| H A D | stackdepot.c | 293 static bool depot_init_pool(void **prealloc) in depot_init_pool() argument 306 if (!new_pool && *prealloc) { in depot_init_pool() 308 WRITE_ONCE(new_pool, *prealloc); in depot_init_pool() 309 *prealloc = NULL; in depot_init_pool() 313 return false; /* new_pool and *prealloc are NULL */ in depot_init_pool() 341 static void depot_keep_new_pool(void **prealloc) in depot_keep_new_pool() 353 WRITE_ONCE(new_pool, *prealloc); in depot_keep_new_pool() 354 *prealloc = NULL; 361 static struct stack_record *depot_pop_free_pool(void **prealloc, size_t size) in depot_pop_free_pool() 371 if (!depot_init_pool(prealloc)) in depot_pop_free_pool() 340 depot_keep_new_pool(void ** prealloc) depot_keep_new_pool() argument 359 depot_pop_free_pool(void ** prealloc,size_t size) depot_pop_free_pool() argument 430 depot_alloc_stack(unsigned long * entries,unsigned int nr_entries,u32 hash,depot_flags_t flags,void ** prealloc) depot_alloc_stack() argument 642 void *prealloc = NULL; stack_depot_save_flags() local [all...] |
| /linux/net/sched/ |
| H A D | sch_gred.c | 484 struct gred_sched_data **prealloc, in gred_change_vq() argument 496 table->tab[dp] = q = *prealloc; in gred_change_vq() 497 *prealloc = NULL; in gred_change_vq() 651 struct gred_sched_data *prealloc; in gred_change() local 703 prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL); in gred_change() 706 err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc, in gred_change() 721 kfree(prealloc); in gred_change() 728 kfree(prealloc); in gred_change()
|
| /linux/arch/arc/lib/ |
| H A D | memset-archs.S | 21 prealloc [\reg, \off]
|
| /linux/drivers/gpu/drm/msm/ |
| H A D | msm_gem_vma.c | 117 /** @prealloc: Tracking for pre-allocated MMU pgtable pages */ 118 struct msm_mmu_prealloc prealloc; member 706 vm->mmu->prealloc = &job->prealloc; in msm_vma_job_run() 731 vm->mmu->prealloc = NULL; in msm_vma_job_run() 758 vm->mmu->funcs->prealloc_cleanup(vm->mmu, &job->prealloc); in msm_vma_job_free() 760 atomic_sub(job->prealloc.count, &vm->prealloc_throttle.in_flight); in msm_vma_job_free() 1150 mmu->funcs->prealloc_count(mmu, &job->prealloc, start_iova, end_iova - start_iova); in prealloc_count() 1166 * Determine the amount of memory to prealloc for pgtables. For sparse images, in ops_are_same_pte() 1218 atomic_add(job->prealloc in vm_bind_prealloc_count() [all...] |
| H A D | msm_mmu.h | 66 struct msm_mmu_prealloc *prealloc; member
|
| /linux/drivers/media/platform/renesas/vsp1/ |
| H A D | vsp1_dl.h | 56 unsigned int prealloc);
|
| /linux/drivers/gpu/drm/i915/display/ |
| H A D | intel_fbdev.c | 274 bool prealloc = false; in intel_fbdev_driver_fbdev_probe() local 305 prealloc = true; in intel_fbdev_driver_fbdev_probe() 341 if (!intel_bo_is_shmem(obj) && !prealloc) in intel_fbdev_driver_fbdev_probe()
|
| /linux/tools/testing/selftests/net/ |
| H A D | lwt_dst_cache_ref_loop.sh | 59 encap ioam6 trace prealloc type 0x800000 ns 0 size 4 \ 168 encap ioam6 trace prealloc type 0x800000 ns 1 size 4 \
|
| /linux/fs/xfs/ |
| H A D | xfs_iomap.c | 419 /* no dq, or over hi wmark, squash the prealloc completely */ in xfs_quota_calc_throttle() 542 * XFS_BMBT_MAX_EXTLEN is not a power of two value but we round the prealloc in xfs_iomap_prealloc_size() 545 * prealloc size, we round up first, apply appropriate throttling, round in xfs_iomap_prealloc_size() 560 * Check each quota to cap the prealloc size, provide a shift value to in xfs_iomap_prealloc_size() 574 * The final prealloc size is set to the minimum of free space available in xfs_iomap_prealloc_size() 596 * available, squash the prealloc hard. This can happen if we in xfs_iomap_prealloc_size() 1462 xfs_filblks_t prealloc, in xfs_bmapi_reserve_delalloc() argument 1475 whichfork == XFS_COW_FORK && !prealloc; in xfs_bmapi_reserve_delalloc() 1479 * Cap the alloc length. Keep track of prealloc so we know whether to in xfs_bmapi_reserve_delalloc() 1483 alen = XFS_FILBLKS_MIN(len + prealloc, XFS_MAX_BMBT_EXTLE in xfs_bmapi_reserve_delalloc() [all...] |
| /linux/drivers/md/ |
| H A D | dm-cache-target.c | 1403 struct dm_bio_prison_cell_v2 *prealloc; in mg_lock_writes() local 1405 prealloc = alloc_prison_cell(cache); in mg_lock_writes() 1415 prealloc, &mg->cell); in mg_lock_writes() 1417 free_prison_cell(cache, prealloc); in mg_lock_writes() 1422 if (mg->cell != prealloc) in mg_lock_writes() 1423 free_prison_cell(cache, prealloc); in mg_lock_writes() 1533 struct dm_bio_prison_cell_v2 *prealloc; in invalidate_lock() local 1535 prealloc = alloc_prison_cell(cache); in invalidate_lock() 1539 READ_WRITE_LOCK_LEVEL, prealloc, &mg->cell); in invalidate_lock() 1541 free_prison_cell(cache, prealloc); in invalidate_lock() [all...] |
| /linux/drivers/usb/gadget/function/ |
| H A D | u_ether.c | 314 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n) in prealloc() 358 status = prealloc(&dev->tx_reqs, link->in_ep, n); in alloc_requests() 361 status = prealloc(&dev->rx_reqs, link->out_ep, n); in alloc_requests() 313 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n) prealloc() function
|
| /linux/sound/hda/common/ |
| H A D | Kconfig | 95 via a proc file (/proc/asound/card*/pcm*/sub*/prealloc), too.
|
| /linux/mm/ |
| H A D | memory.c | 1049 struct folio **prealloc, struct page *page) in copy_present_page() argument 1054 new_folio = *prealloc; in copy_present_page() 1059 * We have a prealloc page, all good! Take it in copy_present_page() 1066 *prealloc = NULL; in copy_present_page() 1115 int max_nr, int *rss, struct folio **prealloc) in copy_present_ptes() argument 1133 if (unlikely(!*prealloc && folio_test_large(folio) && max_nr != 1)) { in copy_present_ptes() 1170 addr, rss, prealloc, page); in copy_present_ptes() 1222 struct folio *prealloc = NULL; in copy_pte_range() local 1302 /* copy_present_ptes() will clear `*prealloc' if consumed */ in copy_pte_range() 1305 ptent, addr, max_nr, rss, &prealloc); in copy_pte_range() [all...] |
| /linux/Documentation/filesystems/ |
| H A D | ntfs3.rst | 102 * - prealloc
|
| /linux/fs/kernfs/ |
| H A D | file.c | 687 if (ops->prealloc && ops->seq_show) in kernfs_fop_open() 689 if (ops->prealloc) { in kernfs_fop_open()
|
| /linux/fs/ntfs3/ |
| H A D | file.c | 533 &new_valid, ni->mi.sbi->options->prealloc, NULL); in ntfs_truncate() 1378 if (sbi->options->prealloc && in ntfs_file_release()
|
| /linux/Documentation/sound/designs/ |
| H A D | procfile.rst | 136 ``card*/pcm*/sub*/prealloc``
|
| /linux/fs/f2fs/ |
| H A D | data.c | 1625 blkcnt_t prealloc; in f2fs_map_blocks() 1674 prealloc = 0; in f2fs_map_blocks() 1699 prealloc++; in f2fs_map_blocks() 1795 err = f2fs_reserve_new_blocks(&dn, prealloc); 1800 if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) { in f2fs_overwrite_io() 1541 blkcnt_t prealloc; f2fs_map_blocks() local
|