xref: /linux/arch/loongarch/include/asm/io.h (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  */
5 #ifndef _ASM_IO_H
6 #define _ASM_IO_H
7 
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 
11 #include <asm/addrspace.h>
12 #include <asm/cpu.h>
13 #include <asm/page.h>
14 #include <asm/pgtable-bits.h>
15 #include <asm/string.h>
16 
17 extern void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size);
18 extern void __init early_iounmap(void __iomem *addr, unsigned long size);
19 
20 #define early_memremap early_ioremap
21 #define early_memunmap early_iounmap
22 
23 #ifdef CONFIG_ARCH_IOREMAP
24 
25 static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
26 					 unsigned long prot_val)
27 {
28 	if (prot_val & _CACHE_CC)
29 		return (void __iomem *)(unsigned long)(CACHE_BASE + offset);
30 	else
31 		return (void __iomem *)(unsigned long)(UNCACHE_BASE + offset);
32 }
33 
34 #define ioremap(offset, size)		\
35 	ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_SUC))
36 
37 #define iounmap(addr) 			((void)(addr))
38 
39 #endif
40 
41 /*
42  * On LoongArch, ioremap() has two variants, ioremap_wc() and ioremap_cache().
43  * They map bus memory into CPU space, the mapped memory is marked uncachable
44  * (_CACHE_SUC), uncachable but accelerated by write-combine (_CACHE_WUC) and
45  * cachable (_CACHE_CC) respectively for CPU access.
46  *
47  * @offset:    bus address of the memory
48  * @size:      size of the resource to map
49  */
50 #define ioremap_wc(offset, size)	\
51 	ioremap_prot((offset), (size),	\
52 		pgprot_val(wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC))
53 
54 #define ioremap_cache(offset, size)	\
55 	ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL))
56 
57 #define mmiowb() wmb()
58 
59 /*
60  * String version of I/O memory access operations.
61  */
62 extern void __memset_io(volatile void __iomem *dst, int c, size_t count);
63 extern void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count);
64 extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count);
65 #define memset_io(c, v, l)     __memset_io((c), (v), (l))
66 #define memcpy_fromio(a, c, l) __memcpy_fromio((a), (c), (l))
67 #define memcpy_toio(c, a, l)   __memcpy_toio((c), (a), (l))
68 
69 #define __io_aw() mmiowb()
70 
71 #ifdef CONFIG_KFENCE
72 #define virt_to_phys(kaddr)								\
73 ({											\
74 	(likely((unsigned long)kaddr < vm_map_base)) ? __pa((unsigned long)kaddr) :	\
75 	page_to_phys(tlb_virt_to_page((unsigned long)kaddr)) + offset_in_page((unsigned long)kaddr);\
76 })
77 
78 #define phys_to_virt(paddr)								\
79 ({											\
80 	extern char *__kfence_pool;							\
81 	(unlikely(__kfence_pool == NULL)) ? __va((unsigned long)paddr) :		\
82 	page_address(phys_to_page((unsigned long)paddr)) + offset_in_page((unsigned long)paddr);\
83 })
84 #endif
85 
86 #include <asm-generic/io.h>
87 
88 #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
89 extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
90 extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
91 
92 #endif /* _ASM_IO_H */
93