Searched refs:min_order (Results 1 – 8 of 8) sorted by relevance
| /linux/mm/ |
| H A D | fail_page_alloc.c | 12 u32 min_order; member 17 .min_order = 1, 30 if (order < fail_page_alloc.min_order) in should_fail_alloc_page() 62 debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order); in fail_page_alloc_debugfs()
|
| H A D | readahead.c | 470 unsigned int min_order = mapping_min_folio_order(mapping); in page_cache_ra_order() local 488 new_order = max(new_order, min_order); in page_cache_ra_order() 510 while (order > min_order && index + (1UL << order) - 1 > limit) in page_cache_ra_order() 771 unsigned int min_order = mapping_min_folio_order(mapping); in readahead_expand() local 788 folio = ractl_alloc_folio(ractl, gfp_mask, min_order); in readahead_expand() 817 folio = ractl_alloc_folio(ractl, gfp_mask, min_order); in readahead_expand()
|
| H A D | truncate.c | 181 unsigned long min_order) in try_folio_split_or_unmap() argument 189 ret = try_folio_split_to_order(folio, split_at, min_order); in try_folio_split_or_unmap() 223 unsigned int min_order; in truncate_inode_partial_folio() local 253 min_order = mapping_min_folio_order(folio->mapping); in truncate_inode_partial_folio() 255 if (!try_folio_split_or_unmap(folio, split_at, min_order)) { in truncate_inode_partial_folio() 282 try_folio_split_or_unmap(folio2, split_at2, min_order); in truncate_inode_partial_folio()
|
| H A D | filemap.c | 1978 unsigned int min_order = mapping_min_folio_order(mapping); in __filemap_get_folio_mpol() local 1979 unsigned int order = max(min_order, FGF_GET_ORDER(fgp_flags)); in __filemap_get_folio_mpol() 2004 if (order > min_order) in __filemap_get_folio_mpol() 2021 } while (order-- > min_order); in __filemap_get_folio_mpol() 2600 unsigned int min_order = mapping_min_folio_order(mapping); in filemap_create_folio() local 2606 folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order, NULL); in filemap_create_folio() 2626 index = (iocb->ki_pos >> (PAGE_SHIFT + min_order)) << min_order; in filemap_create_folio()
|
| H A D | huge_memory.c | 3987 unsigned int min_order; in __folio_split() local 3991 min_order = mapping_min_folio_order(folio->mapping); in __folio_split() 3992 if (new_order < min_order) { in __folio_split() 4702 unsigned int min_order; in split_huge_pages_in_file() local 4717 min_order = mapping_min_folio_order(mapping); in split_huge_pages_in_file() 4718 target_order = max(new_order, min_order); in split_huge_pages_in_file()
|
| H A D | slub.c | 7580 unsigned int min_order, unsigned int max_order, in calc_slab_order() 7585 for (order = min_order; order <= max_order; order++) { in calc_slab_order() 7604 unsigned int min_order; in calculate_order() 7626 min_order = max_t(unsigned int, slub_min_order, in calculate_order() 7628 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE) in calculate_order() 7647 order = calc_slab_order(size, min_order, slub_max_order, in calculate_order() 7578 calc_slab_order(unsigned int size,unsigned int min_order,unsigned int max_order,unsigned int fract_leftover) calc_slab_order() argument 7602 unsigned int min_order; calculate_order() local
|
| H A D | page_alloc.c | 2360 int min_order = order; in __rmqueue_claim() local 2370 min_order = pageblock_order; in __rmqueue_claim() 2377 for (current_order = MAX_PAGE_ORDER; current_order >= min_order; in __rmqueue_claim()
|
| /linux/fs/ext4/ |
| H A D | inode.c | 5150 u16 min_order, max_order; in ext4_set_inode_mapping_order() local 5156 min_order = EXT4_SB(sb)->s_min_folio_order; in ext4_set_inode_mapping_order() 5157 if (!min_order && !S_ISREG(inode->i_mode)) in ext4_set_inode_mapping_order() 5161 max_order = min_order; in ext4_set_inode_mapping_order() 5163 mapping_set_folio_order_range(inode->i_mapping, min_order, max_order); in ext4_set_inode_mapping_order()
|