xref: /linux/arch/riscv/include/asm/vmalloc.h (revision 0526b56cbc3c489642bd6a5fe4b718dea7ef0ee8)
1 #ifndef _ASM_RISCV_VMALLOC_H
2 #define _ASM_RISCV_VMALLOC_H
3 
4 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
5 
6 #define IOREMAP_MAX_ORDER (PUD_SHIFT)
7 
8 #define arch_vmap_pud_supported arch_vmap_pud_supported
9 static inline bool arch_vmap_pud_supported(pgprot_t prot)
10 {
11 	return true;
12 }
13 
14 #define arch_vmap_pmd_supported arch_vmap_pmd_supported
15 static inline bool arch_vmap_pmd_supported(pgprot_t prot)
16 {
17 	return true;
18 }
19 
20 #ifdef CONFIG_RISCV_ISA_SVNAPOT
21 #include <linux/pgtable.h>
22 
23 #define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
24 static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
25 							 u64 pfn, unsigned int max_page_shift)
26 {
27 	unsigned long map_size = PAGE_SIZE;
28 	unsigned long size, order;
29 
30 	if (!has_svnapot())
31 		return map_size;
32 
33 	for_each_napot_order_rev(order) {
34 		if (napot_cont_shift(order) > max_page_shift)
35 			continue;
36 
37 		size = napot_cont_size(order);
38 		if (end - addr < size)
39 			continue;
40 
41 		if (!IS_ALIGNED(addr, size))
42 			continue;
43 
44 		if (!IS_ALIGNED(PFN_PHYS(pfn), size))
45 			continue;
46 
47 		map_size = size;
48 		break;
49 	}
50 
51 	return map_size;
52 }
53 
54 #define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
55 static inline int arch_vmap_pte_supported_shift(unsigned long size)
56 {
57 	int shift = PAGE_SHIFT;
58 	unsigned long order;
59 
60 	if (!has_svnapot())
61 		return shift;
62 
63 	WARN_ON_ONCE(size >= PMD_SIZE);
64 
65 	for_each_napot_order_rev(order) {
66 		if (napot_cont_size(order) > size)
67 			continue;
68 
69 		if (!IS_ALIGNED(size, napot_cont_size(order)))
70 			continue;
71 
72 		shift = napot_cont_shift(order);
73 		break;
74 	}
75 
76 	return shift;
77 }
78 
79 #endif /* CONFIG_RISCV_ISA_SVNAPOT */
80 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
81 #endif /* _ASM_RISCV_VMALLOC_H */
82