Home
last modified time | relevance | path

Searched refs:new_order (Results 1 – 12 of 12) sorted by relevance

/linux/include/linux/
H A Dhuge_mm.h373 unsigned int new_order);
374 int folio_split_unmapped(struct folio *folio, unsigned int new_order);
377 int folio_check_splittable(struct folio *folio, unsigned int new_order,
379 int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
383 unsigned int new_order) in split_huge_page_to_list_to_order() argument
385 return __split_huge_page_to_list_to_order(page, list, new_order); in split_huge_page_to_list_to_order()
387 static inline int split_huge_page_to_order(struct page *page, unsigned int new_order) in split_huge_page_to_order() argument
389 return split_huge_page_to_list_to_order(page, NULL, new_order); in split_huge_page_to_order()
407 struct page *page, unsigned int new_order) in try_folio_split_to_order() argument
409 if (folio_check_splittable(folio, new_order, SPLIT_TYPE_NON_UNIFORM)) in try_folio_split_to_order()
[all …]
H A Dpgalloc_tag.h199 void pgalloc_tag_split(struct folio *folio, int old_order, int new_order);
208 static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {} in pgalloc_tag_split() argument
H A Dmemcontrol.h1022 unsigned new_order);
1442 unsigned old_order, unsigned new_order) in folio_split_memcg_refs() argument
/linux/mm/
H A Dreadahead.c479 unsigned int new_order = ra->order; in page_cache_ra_order()
489 new_order = min(mapping_max_folio_order(mapping), new_order); in page_cache_ra_order()
490 new_order = min_t(unsigned int, new_order, ilog2(ra->size)); in page_cache_ra_order()
491 new_order = max(new_order, min_order); in page_cache_ra_order()
493 ra->order = new_order; in page_cache_ra_order()
499 * If the new_order is greater than min_order and index is in page_cache_ra_order()
500 * already aligned to new_order, the in page_cache_ra_order()
476 unsigned int new_order = ra->order; page_cache_ra_order() local
[all...]
H A Dhuge_memory.c3484 int new_order) in __split_folio_to_order() argument
3487 const bool handle_hwpoison = folio_test_has_hwpoisoned(folio) && new_order; in __split_folio_to_order()
3488 long new_nr_pages = 1 << new_order; in __split_folio_to_order()
3565 if (new_order) { in __split_folio_to_order()
3566 prep_compound_page(new_head, new_order); in __split_folio_to_order()
3581 if (new_order) in __split_folio_to_order()
3582 folio_set_order(folio, new_order); in __split_folio_to_order()
3628 static int __split_unmapped_folio(struct folio *folio, int new_order, in __split_unmapped_folio() argument
3634 int start_order = split_type == SPLIT_TYPE_UNIFORM ? new_order : old_order - 1; in __split_unmapped_folio()
3643 split_order >= new_order; in __split_unmapped_folio()
[all …]
H A Dmemory-failure.c1669 static int try_to_split_thp_page(struct page *page, unsigned int new_order, in try_to_split_thp_page() argument
1675 ret = split_huge_page_to_order(page, new_order); in try_to_split_thp_page()
2453 const int new_order = min_order_for_split(folio); in memory_failure() local
2470 err = try_to_split_thp_page(p, new_order, /* release= */ false); in memory_failure()
2477 if (err || new_order) { in memory_failure()
2806 const int new_order = min_order_for_split(folio); in soft_offline_in_use_page() local
2815 if (new_order || try_to_split_thp_page(page, /* new_order= */ 0, in soft_offline_in_use_page()
H A Dpage_owner.c361 void __split_page_owner(struct page *page, int old_order, int new_order) in __split_page_owner() argument
370 page_owner->order = new_order; in __split_page_owner()
H A Dmemcontrol.c3307 unsigned new_order) in folio_split_memcg_refs() argument
3314 new_refs = (1 << (old_order - new_order)) - 1; in folio_split_memcg_refs()
/linux/lib/
H A Dtest_xarray.c1837 unsigned int order, unsigned int new_order) in check_split_1() argument
1839 XA_STATE_ORDER(xas, xa, index, new_order); in check_split_1()
1849 for (i = 0; i < (1 << order); i += (1 << new_order)) in check_split_1()
1854 unsigned int val = index + (i & ~((1 << new_order) - 1)); in check_split_1()
1869 XA_BUG_ON(xa, found != 1 << (order - new_order)); in check_split_1()
1875 unsigned int order, unsigned int new_order) in check_split_2() argument
1877 XA_STATE_ORDER(xas, xa, index, new_order); in check_split_2()
1890 if (((new_order / XA_CHUNK_SHIFT) < (order / XA_CHUNK_SHIFT)) && in check_split_2()
1891 new_order < order - 1) { in check_split_2()
1896 for (i = 0; i < (1 << order); i += (1 << new_order)) in check_split_2()
[all …]
H A Dalloc_tag.c167 void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) in pgalloc_tag_split() argument
171 unsigned int nr_pages = 1 << new_order; in pgalloc_tag_split()
/linux/drivers/net/ethernet/cortina/
H A Dgemini.c1040 unsigned int new_order; in geth_resize_freeq() local
1067 new_order = min(15, ilog2(new_size - 1) + 1); in geth_resize_freeq()
1069 new_size, new_order); in geth_resize_freeq()
1070 if (geth->freeq_order == new_order) in geth_resize_freeq()
1086 geth->freeq_order = new_order; in geth_resize_freeq()
/linux/drivers/irqchip/
H A Dirq-gic-v3-its.c2498 u32 new_order = *order; in its_parse_indirect_baser() local
2531 new_order = max_t(u32, get_order(esz << ids), new_order); in its_parse_indirect_baser()
2532 if (new_order > MAX_PAGE_ORDER) { in its_parse_indirect_baser()
2533 new_order = MAX_PAGE_ORDER; in its_parse_indirect_baser()
2534 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); in its_parse_indirect_baser()
2540 *order = new_order; in its_parse_indirect_baser()