xref: /freebsd/sys/compat/linuxkpi/common/include/linux/io.h (revision 389e4940069316fe667ffa263fa7d6390d0a960f)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 #ifndef	_LINUX_IO_H_
32 #define	_LINUX_IO_H_
33 
34 #include <machine/vm.h>
35 #include <sys/endian.h>
36 #include <sys/types.h>
37 
38 #include <linux/compiler.h>
39 #include <linux/types.h>
40 
41 static inline uint32_t
42 __raw_readl(const volatile void *addr)
43 {
44 	return *(const volatile uint32_t *)addr;
45 }
46 
47 static inline void
48 __raw_writel(uint32_t b, volatile void *addr)
49 {
50 	*(volatile uint32_t *)addr = b;
51 }
52 
53 static inline uint64_t
54 __raw_readq(const volatile void *addr)
55 {
56 	return *(const volatile uint64_t *)addr;
57 }
58 
59 static inline void
60 __raw_writeq(uint64_t b, volatile void *addr)
61 {
62 	*(volatile uint64_t *)addr = b;
63 }
64 
65 /*
66  * XXX This is all x86 specific.  It should be bus space access.
67  */
68 #define	mmiowb()	barrier()
69 
70 #undef writel
71 static inline void
72 writel(uint32_t b, void *addr)
73 {
74 	*(volatile uint32_t *)addr = b;
75 }
76 
77 #undef writel_relaxed
78 static inline void
79 writel_relaxed(uint32_t b, void *addr)
80 {
81 	*(volatile uint32_t *)addr = b;
82 }
83 
84 #undef writeq
85 static inline void
86 writeq(uint64_t b, void *addr)
87 {
88 	*(volatile uint64_t *)addr = b;
89 }
90 
91 #undef writeb
92 static inline void
93 writeb(uint8_t b, void *addr)
94 {
95 	*(volatile uint8_t *)addr = b;
96 }
97 
98 #undef writew
99 static inline void
100 writew(uint16_t b, void *addr)
101 {
102 	*(volatile uint16_t *)addr = b;
103 }
104 
105 #undef ioread8
106 static inline uint8_t
107 ioread8(const volatile void *addr)
108 {
109 	return *(const volatile uint8_t *)addr;
110 }
111 
112 #undef ioread16
113 static inline uint16_t
114 ioread16(const volatile void *addr)
115 {
116 	return *(const volatile uint16_t *)addr;
117 }
118 
119 #undef ioread16be
120 static inline uint16_t
121 ioread16be(const volatile void *addr)
122 {
123 	return be16toh(*(const volatile uint16_t *)addr);
124 }
125 
126 #undef ioread32
127 static inline uint32_t
128 ioread32(const volatile void *addr)
129 {
130 	return *(const volatile uint32_t *)addr;
131 }
132 
133 #undef ioread32be
134 static inline uint32_t
135 ioread32be(const volatile void *addr)
136 {
137 	return be32toh(*(const volatile uint32_t *)addr);
138 }
139 
140 #undef iowrite8
141 static inline void
142 iowrite8(uint8_t v, volatile void *addr)
143 {
144 	*(volatile uint8_t *)addr = v;
145 }
146 
147 #undef iowrite16
148 static inline void
149 iowrite16(uint16_t v, volatile void *addr)
150 {
151 	*(volatile uint16_t *)addr = v;
152 }
153 
154 #undef iowrite32
155 static inline void
156 iowrite32(uint32_t v, volatile void *addr)
157 {
158 	*(volatile uint32_t *)addr = v;
159 }
160 
161 #undef iowrite32be
162 static inline void
163 iowrite32be(uint32_t v, volatile void *addr)
164 {
165 	*(volatile uint32_t *)addr = htobe32(v);
166 }
167 
168 #undef readb
169 static inline uint8_t
170 readb(const volatile void *addr)
171 {
172 	return *(const volatile uint8_t *)addr;
173 }
174 
175 #undef readw
176 static inline uint16_t
177 readw(const volatile void *addr)
178 {
179 	return *(const volatile uint16_t *)addr;
180 }
181 
182 #undef readl
183 static inline uint32_t
184 readl(const volatile void *addr)
185 {
186 	return *(const volatile uint32_t *)addr;
187 }
188 
189 #if defined(__i386__) || defined(__amd64__)
190 static inline void
191 _outb(u_char data, u_int port)
192 {
193 	__asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
194 }
195 #endif
196 
197 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
198 void *_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr);
199 #else
200 #define	_ioremap_attr(...) NULL
201 #endif
202 
203 #define	ioremap_nocache(addr, size)					\
204     _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
205 #define	ioremap_wc(addr, size)						\
206     _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_COMBINING)
207 #define	ioremap_wb(addr, size)						\
208     _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_BACK)
209 #define	ioremap_wt(addr, size)						\
210     _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_THROUGH)
211 #define	ioremap(addr, size)						\
212     _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
213 void iounmap(void *addr);
214 
215 #define	memset_io(a, b, c)	memset((a), (b), (c))
216 #define	memcpy_fromio(a, b, c)	memcpy((a), (b), (c))
217 #define	memcpy_toio(a, b, c)	memcpy((a), (b), (c))
218 
219 static inline void
220 __iowrite32_copy(void *to, void *from, size_t count)
221 {
222 	uint32_t *src;
223 	uint32_t *dst;
224 	int i;
225 
226 	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
227 		__raw_writel(*src, dst);
228 }
229 
230 static inline void
231 __iowrite64_copy(void *to, void *from, size_t count)
232 {
233 #ifdef __LP64__
234 	uint64_t *src;
235 	uint64_t *dst;
236 	int i;
237 
238 	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
239 		__raw_writeq(*src, dst);
240 #else
241 	__iowrite32_copy(to, from, count * 2);
242 #endif
243 }
244 
245 enum {
246 	MEMREMAP_WB = 1 << 0,
247 	MEMREMAP_WT = 1 << 1,
248 	MEMREMAP_WC = 1 << 2,
249 };
250 
251 static inline void *
252 memremap(resource_size_t offset, size_t size, unsigned long flags)
253 {
254 	void *addr = NULL;
255 
256 	if ((flags & MEMREMAP_WB) &&
257 	    (addr = ioremap_wb(offset, size)) != NULL)
258 		goto done;
259 	if ((flags & MEMREMAP_WT) &&
260 	    (addr = ioremap_wt(offset, size)) != NULL)
261 		goto done;
262 	if ((flags & MEMREMAP_WC) &&
263 	    (addr = ioremap_wc(offset, size)) != NULL)
264 		goto done;
265 done:
266 	return (addr);
267 }
268 
269 static inline void
270 memunmap(void *addr)
271 {
272 	/* XXX May need to check if this is RAM */
273 	iounmap(addr);
274 }
275 
276 #endif	/* _LINUX_IO_H_ */
277