xref: /linux/arch/powerpc/include/asm/book3s/64/hugetlb.h (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2bee8b3b5SAneesh Kumar K.V #ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_H
3bee8b3b5SAneesh Kumar K.V #define _ASM_POWERPC_BOOK3S_64_HUGETLB_H
4*46d60bdbSChristophe Leroy 
5*46d60bdbSChristophe Leroy #include <asm/firmware.h>
6*46d60bdbSChristophe Leroy 
7bee8b3b5SAneesh Kumar K.V /*
8bee8b3b5SAneesh Kumar K.V  * For radix we want generic code to handle hugetlb. But then if we want
9bee8b3b5SAneesh Kumar K.V  * both hash and radix to be enabled together we need to workaround the
10bee8b3b5SAneesh Kumar K.V  * limitations.
11bee8b3b5SAneesh Kumar K.V  */
12bee8b3b5SAneesh Kumar K.V void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
13bee8b3b5SAneesh Kumar K.V void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
14bee8b3b5SAneesh Kumar K.V 
158ef5cbdeSAneesh Kumar K.V extern void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
168ef5cbdeSAneesh Kumar K.V 						unsigned long addr, pte_t *ptep,
178ef5cbdeSAneesh Kumar K.V 						pte_t old_pte, pte_t pte);
188ef5cbdeSAneesh Kumar K.V 
hstate_get_psize(struct hstate * hstate)19bee8b3b5SAneesh Kumar K.V static inline int hstate_get_psize(struct hstate *hstate)
20bee8b3b5SAneesh Kumar K.V {
21bee8b3b5SAneesh Kumar K.V 	unsigned long shift;
22bee8b3b5SAneesh Kumar K.V 
23bee8b3b5SAneesh Kumar K.V 	shift = huge_page_shift(hstate);
24bee8b3b5SAneesh Kumar K.V 	if (shift == mmu_psize_defs[MMU_PAGE_2M].shift)
25bee8b3b5SAneesh Kumar K.V 		return MMU_PAGE_2M;
26bee8b3b5SAneesh Kumar K.V 	else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
27bee8b3b5SAneesh Kumar K.V 		return MMU_PAGE_1G;
28ccf17c8bSAneesh Kumar K.V 	else if (shift == mmu_psize_defs[MMU_PAGE_16M].shift)
29ccf17c8bSAneesh Kumar K.V 		return MMU_PAGE_16M;
30ccf17c8bSAneesh Kumar K.V 	else if (shift == mmu_psize_defs[MMU_PAGE_16G].shift)
31ccf17c8bSAneesh Kumar K.V 		return MMU_PAGE_16G;
32bee8b3b5SAneesh Kumar K.V 	else {
33bee8b3b5SAneesh Kumar K.V 		WARN(1, "Wrong huge page shift\n");
34bee8b3b5SAneesh Kumar K.V 		return mmu_virtual_psize;
35bee8b3b5SAneesh Kumar K.V 	}
36bee8b3b5SAneesh Kumar K.V }
37049d567aSAneesh Kumar K.V 
384eb0716eSAlexandre Ghiti #define __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED
gigantic_page_runtime_supported(void)394eb0716eSAlexandre Ghiti static inline bool gigantic_page_runtime_supported(void)
4040692eb5SAneesh Kumar K.V {
4135f2806bSAneesh Kumar K.V 	/*
4235f2806bSAneesh Kumar K.V 	 * We used gigantic page reservation with hypervisor assist in some case.
4335f2806bSAneesh Kumar K.V 	 * We cannot use runtime allocation of gigantic pages in those platforms
4435f2806bSAneesh Kumar K.V 	 * This is hash translation mode LPARs.
4535f2806bSAneesh Kumar K.V 	 */
4635f2806bSAneesh Kumar K.V 	if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
4735f2806bSAneesh Kumar K.V 		return false;
4835f2806bSAneesh Kumar K.V 
4940692eb5SAneesh Kumar K.V 	return true;
5040692eb5SAneesh Kumar K.V }
5140692eb5SAneesh Kumar K.V 
528ef5cbdeSAneesh Kumar K.V #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
538ef5cbdeSAneesh Kumar K.V extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
548ef5cbdeSAneesh Kumar K.V 					 unsigned long addr, pte_t *ptep);
558ef5cbdeSAneesh Kumar K.V 
568ef5cbdeSAneesh Kumar K.V #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
578ef5cbdeSAneesh Kumar K.V extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
588ef5cbdeSAneesh Kumar K.V 					 unsigned long addr, pte_t *ptep,
598ef5cbdeSAneesh Kumar K.V 					 pte_t old_pte, pte_t new_pte);
608197af22SChristophe Leroy 
flush_hugetlb_page(struct vm_area_struct * vma,unsigned long vmaddr)618197af22SChristophe Leroy static inline void flush_hugetlb_page(struct vm_area_struct *vma,
628197af22SChristophe Leroy 				      unsigned long vmaddr)
638197af22SChristophe Leroy {
648197af22SChristophe Leroy 	if (radix_enabled())
658197af22SChristophe Leroy 		return radix__flush_hugetlb_page(vma, vmaddr);
668197af22SChristophe Leroy }
678197af22SChristophe Leroy 
688197af22SChristophe Leroy void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
698197af22SChristophe Leroy 
check_and_get_huge_psize(int shift)70723f268fSChristophe Leroy static inline int check_and_get_huge_psize(int shift)
71723f268fSChristophe Leroy {
72723f268fSChristophe Leroy 	int mmu_psize;
73723f268fSChristophe Leroy 
74723f268fSChristophe Leroy 	if (shift > SLICE_HIGH_SHIFT)
75723f268fSChristophe Leroy 		return -EINVAL;
76723f268fSChristophe Leroy 
77723f268fSChristophe Leroy 	mmu_psize = shift_to_mmu_psize(shift);
78723f268fSChristophe Leroy 
79723f268fSChristophe Leroy 	/*
80723f268fSChristophe Leroy 	 * We need to make sure that for different page sizes reported by
81723f268fSChristophe Leroy 	 * firmware we only add hugetlb support for page sizes that can be
82723f268fSChristophe Leroy 	 * supported by linux page table layout.
83723f268fSChristophe Leroy 	 * For now we have
84723f268fSChristophe Leroy 	 * Radix: 2M and 1G
85723f268fSChristophe Leroy 	 * Hash: 16M and 16G
86723f268fSChristophe Leroy 	 */
87723f268fSChristophe Leroy 	if (radix_enabled()) {
88723f268fSChristophe Leroy 		if (mmu_psize != MMU_PAGE_2M && mmu_psize != MMU_PAGE_1G)
89723f268fSChristophe Leroy 			return -EINVAL;
90723f268fSChristophe Leroy 	} else {
91723f268fSChristophe Leroy 		if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)
92723f268fSChristophe Leroy 			return -EINVAL;
93723f268fSChristophe Leroy 	}
94723f268fSChristophe Leroy 	return mmu_psize;
95723f268fSChristophe Leroy }
96723f268fSChristophe Leroy 
97bee8b3b5SAneesh Kumar K.V #endif
98