1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 4 */ 5 #ifndef _ASM_IO_H 6 #define _ASM_IO_H 7 8 #include <linux/kernel.h> 9 #include <linux/types.h> 10 11 #include <asm/addrspace.h> 12 #include <asm/cpu.h> 13 #include <asm/page.h> 14 #include <asm/pgtable-bits.h> 15 #include <asm/string.h> 16 17 extern void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size); 18 extern void __init early_iounmap(void __iomem *addr, unsigned long size); 19 20 #define early_memremap early_ioremap 21 #define early_memunmap early_iounmap 22 23 #ifdef CONFIG_ARCH_IOREMAP 24 25 static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, 26 unsigned long prot_val) 27 { 28 switch (prot_val & _CACHE_MASK) { 29 case _CACHE_CC: 30 return (void __iomem *)(unsigned long)(CACHE_BASE + offset); 31 case _CACHE_SUC: 32 return (void __iomem *)(unsigned long)(UNCACHE_BASE + offset); 33 case _CACHE_WUC: 34 return (void __iomem *)(unsigned long)(WRITECOMBINE_BASE + offset); 35 default: 36 return NULL; 37 } 38 } 39 40 #define ioremap(offset, size) \ 41 ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL_SUC)) 42 43 #define iounmap(addr) ((void)(addr)) 44 45 #endif 46 47 /* 48 * On LoongArch, ioremap() has two variants, ioremap_wc() and ioremap_cache(). 49 * They map bus memory into CPU space, the mapped memory is marked uncachable 50 * (_CACHE_SUC), uncachable but accelerated by write-combine (_CACHE_WUC) and 51 * cachable (_CACHE_CC) respectively for CPU access. 52 * 53 * @offset: bus address of the memory 54 * @size: size of the resource to map 55 */ 56 #define ioremap_wc(offset, size) \ 57 ioremap_prot((offset), (size), \ 58 pgprot_val(wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC)) 59 60 #define ioremap_cache(offset, size) \ 61 ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL)) 62 63 #define mmiowb() wmb() 64 65 /* 66 * String version of I/O memory access operations. 67 */ 68 extern void __memset_io(volatile void __iomem *dst, int c, size_t count); 69 extern void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count); 70 extern void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count); 71 #define memset_io(c, v, l) __memset_io((c), (v), (l)) 72 #define memcpy_fromio(a, c, l) __memcpy_fromio((a), (c), (l)) 73 #define memcpy_toio(c, a, l) __memcpy_toio((c), (a), (l)) 74 75 #define __io_aw() mmiowb() 76 77 #ifdef CONFIG_KFENCE 78 #define virt_to_phys(kaddr) \ 79 ({ \ 80 (likely((unsigned long)kaddr < vm_map_base)) ? __pa((unsigned long)kaddr) : \ 81 page_to_phys(tlb_virt_to_page((unsigned long)kaddr)) + offset_in_page((unsigned long)kaddr);\ 82 }) 83 84 #define phys_to_virt(paddr) \ 85 ({ \ 86 extern char *__kfence_pool; \ 87 (unlikely(__kfence_pool == NULL)) ? __va((unsigned long)paddr) : \ 88 page_address(phys_to_page((unsigned long)paddr)) + offset_in_page((unsigned long)paddr);\ 89 }) 90 #endif 91 92 #include <asm-generic/io.h> 93 94 #define ARCH_HAS_VALID_PHYS_ADDR_RANGE 95 extern int valid_phys_addr_range(phys_addr_t addr, size_t size); 96 extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); 97 98 #endif /* _ASM_IO_H */ 99