xref: /linux/arch/riscv/include/asm/io.h (revision cb7e3669c683669d93139184adff68a7d9000536)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * {read,write}{b,w,l,q} based on arch/arm64/include/asm/io.h
4  *   which was based on arch/arm/include/io.h
5  *
6  * Copyright (C) 1996-2000 Russell King
7  * Copyright (C) 2012 ARM Ltd.
8  * Copyright (C) 2014 Regents of the University of California
9  */
10 
11 #ifndef _ASM_RISCV_IO_H
12 #define _ASM_RISCV_IO_H
13 
14 #include <linux/types.h>
15 #include <linux/pgtable.h>
16 #include <asm/mmiowb.h>
17 #include <asm/early_ioremap.h>
18 
19 /*
20  * MMIO access functions are separated out to break dependency cycles
21  * when using {read,write}* fns in low-level headers
22  */
23 #include <asm/mmio.h>
24 
25 /*
26  *  I/O port access constants.
27  */
28 #ifdef CONFIG_MMU
29 #define IO_SPACE_LIMIT		(PCI_IO_SIZE - 1)
30 #define PCI_IOBASE		((void __iomem *)PCI_IO_START)
31 
32 #define ioremap_wc(addr, size)	\
33 	ioremap_prot((addr), (size), __pgprot(_PAGE_KERNEL_NC))
34 
35 #endif /* CONFIG_MMU */
36 
37 /*
38  * Emulation routines for the port-mapped IO space used by some PCI drivers.
39  * These are defined as being "fully synchronous", but also "not guaranteed to
40  * be fully ordered with respect to other memory and I/O operations".  We're
41  * going to be on the safe side here and just make them:
42  *  - Fully ordered WRT each other, by bracketing them with two fences.  The
43  *    outer set contains both I/O so inX is ordered with outX, while the inner just
44  *    needs the type of the access (I for inX and O for outX).
45  *  - Ordered in the same manner as readX/writeX WRT memory by subsuming their
46  *    fences.
47  *  - Ordered WRT timer reads, so udelay and friends don't get elided by the
48  *    implementation.
49  * Note that there is no way to actually enforce that outX is a non-posted
50  * operation on RISC-V, but hopefully the timer ordering constraint is
51  * sufficient to ensure this works sanely on controllers that support I/O
52  * writes.
53  */
54 #define __io_pbr()	RISCV_FENCE(io, i)
55 #define __io_par(v)	RISCV_FENCE(i, ior)
56 #define __io_pbw()	RISCV_FENCE(iow, o)
57 #define __io_paw()	RISCV_FENCE(o, io)
58 
59 /*
60  * Accesses from a single hart to a single I/O address must be ordered.  This
61  * allows us to use the raw read macros, but we still need to fence before and
62  * after the block to ensure ordering WRT other macros.  These are defined to
63  * perform host-endian accesses so we use __raw instead of __cpu.
64  */
65 #define __io_reads_ins(port, ctype, len, bfence, afence)			\
66 	static inline void __ ## port ## len(const volatile void __iomem *addr,	\
67 					     void *buffer,			\
68 					     unsigned int count)		\
69 	{									\
70 		bfence;								\
71 		if (count) {							\
72 			ctype *buf = buffer;					\
73 										\
74 			do {							\
75 				ctype x = __raw_read ## len(addr);		\
76 				*buf++ = x;					\
77 			} while (--count);					\
78 		}								\
79 		afence;								\
80 	}
81 
82 #define __io_writes_outs(port, ctype, len, bfence, afence)			\
83 	static inline void __ ## port ## len(volatile void __iomem *addr,	\
84 					     const void *buffer,		\
85 					     unsigned int count)		\
86 	{									\
87 		bfence;								\
88 		if (count) {							\
89 			const ctype *buf = buffer;				\
90 										\
91 			do {							\
92 				__raw_write ## len(*buf++, addr);		\
93 			} while (--count);					\
94 		}								\
95 		afence;								\
96 	}
97 
98 __io_reads_ins(reads,  u8, b, __io_br(), __io_ar(addr))
99 __io_reads_ins(reads, u16, w, __io_br(), __io_ar(addr))
100 __io_reads_ins(reads, u32, l, __io_br(), __io_ar(addr))
101 #define readsb(addr, buffer, count) __readsb(addr, buffer, count)
102 #define readsw(addr, buffer, count) __readsw(addr, buffer, count)
103 #define readsl(addr, buffer, count) __readsl(addr, buffer, count)
104 
105 __io_reads_ins(ins,  u8, b, __io_pbr(), __io_par(addr))
106 __io_reads_ins(ins, u16, w, __io_pbr(), __io_par(addr))
107 __io_reads_ins(ins, u32, l, __io_pbr(), __io_par(addr))
108 #define insb(addr, buffer, count) __insb(PCI_IOBASE + (addr), buffer, count)
109 #define insw(addr, buffer, count) __insw(PCI_IOBASE + (addr), buffer, count)
110 #define insl(addr, buffer, count) __insl(PCI_IOBASE + (addr), buffer, count)
111 
112 __io_writes_outs(writes,  u8, b, __io_bw(), __io_aw())
113 __io_writes_outs(writes, u16, w, __io_bw(), __io_aw())
114 __io_writes_outs(writes, u32, l, __io_bw(), __io_aw())
115 #define writesb(addr, buffer, count) __writesb(addr, buffer, count)
116 #define writesw(addr, buffer, count) __writesw(addr, buffer, count)
117 #define writesl(addr, buffer, count) __writesl(addr, buffer, count)
118 
119 __io_writes_outs(outs,  u8, b, __io_pbw(), __io_paw())
120 __io_writes_outs(outs, u16, w, __io_pbw(), __io_paw())
121 __io_writes_outs(outs, u32, l, __io_pbw(), __io_paw())
122 #define outsb(addr, buffer, count) __outsb(PCI_IOBASE + (addr), buffer, count)
123 #define outsw(addr, buffer, count) __outsw(PCI_IOBASE + (addr), buffer, count)
124 #define outsl(addr, buffer, count) __outsl(PCI_IOBASE + (addr), buffer, count)
125 
126 #ifdef CONFIG_64BIT
127 __io_reads_ins(reads, u64, q, __io_br(), __io_ar(addr))
128 #define readsq(addr, buffer, count) __readsq(addr, buffer, count)
129 
130 __io_reads_ins(ins, u64, q, __io_pbr(), __io_par(addr))
131 #define insq(addr, buffer, count) __insq(PCI_IOBASE + (addr), buffer, count)
132 
133 __io_writes_outs(writes, u64, q, __io_bw(), __io_aw())
134 #define writesq(addr, buffer, count) __writesq(addr, buffer, count)
135 
136 __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
137 #define outsq(addr, buffer, count) __outsq(PCI_IOBASE + (addr), buffer, count)
138 #endif
139 
140 #include <asm-generic/io.h>
141 
142 #ifdef CONFIG_MMU
143 #define arch_memremap_wb(addr, size, flags)	\
144 	((__force void *)ioremap_prot((addr), (size), __pgprot(_PAGE_KERNEL)))
145 #endif
146 
147 #endif /* _ASM_RISCV_IO_H */
148