xref: /freebsd/sys/compat/linuxkpi/common/include/linux/io.h (revision 7648bc9fee8dec6cb3c4941e0165a930fbe8dcb0)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 #ifndef	_LINUX_IO_H_
32 #define	_LINUX_IO_H_
33 
34 #include <machine/vm.h>
35 #include <sys/endian.h>
36 #include <sys/types.h>
37 
38 #include <linux/compiler.h>
39 #include <linux/types.h>
40 
41 /*
42  * XXX This is all x86 specific.  It should be bus space access.
43  */
44 
45 /* Access MMIO registers atomically without barriers and byte swapping. */
46 
47 static inline uint8_t
48 __raw_readb(const volatile void *addr)
49 {
50 	return (*(const volatile uint8_t *)addr);
51 }
52 #define	__raw_readb(addr)	__raw_readb(addr)
53 
54 static inline void
55 __raw_writeb(uint8_t v, volatile void *addr)
56 {
57 	*(volatile uint8_t *)addr = v;
58 }
59 #define	__raw_writeb(v, addr)	__raw_writeb(v, addr)
60 
61 static inline uint16_t
62 __raw_readw(const volatile void *addr)
63 {
64 	return (*(const volatile uint16_t *)addr);
65 }
66 #define	__raw_readw(addr)	__raw_readw(addr)
67 
68 static inline void
69 __raw_writew(uint16_t v, volatile void *addr)
70 {
71 	*(volatile uint16_t *)addr = v;
72 }
73 #define	__raw_writew(v, addr)	__raw_writew(v, addr)
74 
75 static inline uint32_t
76 __raw_readl(const volatile void *addr)
77 {
78 	return (*(const volatile uint32_t *)addr);
79 }
80 #define	__raw_readl(addr)	__raw_readl(addr)
81 
82 static inline void
83 __raw_writel(uint32_t v, volatile void *addr)
84 {
85 	*(volatile uint32_t *)addr = v;
86 }
87 #define	__raw_writel(v, addr)	__raw_writel(v, addr)
88 
89 #ifdef __LP64__
90 static inline uint64_t
91 __raw_readq(const volatile void *addr)
92 {
93 	return (*(const volatile uint64_t *)addr);
94 }
95 #define	__raw_readq(addr)	__raw_readq(addr)
96 
97 static inline void
98 __raw_writeq(uint64_t v, volatile void *addr)
99 {
100 	*(volatile uint64_t *)addr = v;
101 }
102 #define	__raw_writeq(v, addr)	__raw_writeq(v, addr)
103 #endif
104 
105 #define	mmiowb()	barrier()
106 
107 /* Access little-endian MMIO registers atomically with memory barriers. */
108 
109 #undef readb
110 static inline uint8_t
111 readb(const volatile void *addr)
112 {
113 	uint8_t v;
114 
115 	__compiler_membar();
116 	v = *(const volatile uint8_t *)addr;
117 	__compiler_membar();
118 	return (v);
119 }
120 #define	readb(addr)		readb(addr)
121 
122 #undef writeb
123 static inline void
124 writeb(uint8_t v, volatile void *addr)
125 {
126 	__compiler_membar();
127 	*(volatile uint8_t *)addr = v;
128 	__compiler_membar();
129 }
130 #define	writeb(v, addr)		writeb(v, addr)
131 
132 #undef readw
133 static inline uint16_t
134 readw(const volatile void *addr)
135 {
136 	uint16_t v;
137 
138 	__compiler_membar();
139 	v = *(const volatile uint16_t *)addr;
140 	__compiler_membar();
141 	return (v);
142 }
143 #define	readw(addr)		readw(addr)
144 
145 #undef writew
146 static inline void
147 writew(uint16_t v, volatile void *addr)
148 {
149 	__compiler_membar();
150 	*(volatile uint16_t *)addr = v;
151 	__compiler_membar();
152 }
153 #define	writew(v, addr)		writew(v, addr)
154 
155 #undef readl
156 static inline uint32_t
157 readl(const volatile void *addr)
158 {
159 	uint32_t v;
160 
161 	__compiler_membar();
162 	v = *(const volatile uint32_t *)addr;
163 	__compiler_membar();
164 	return (v);
165 }
166 #define	readl(addr)		readl(addr)
167 
168 #undef writel
169 static inline void
170 writel(uint32_t v, volatile void *addr)
171 {
172 	__compiler_membar();
173 	*(volatile uint32_t *)addr = v;
174 	__compiler_membar();
175 }
176 #define	writel(v, addr)		writel(v, addr)
177 
178 #undef readq
179 #undef writeq
180 #ifdef __LP64__
181 static inline uint64_t
182 readq(const volatile void *addr)
183 {
184 	uint64_t v;
185 
186 	__compiler_membar();
187 	v = *(const volatile uint64_t *)addr;
188 	__compiler_membar();
189 	return (v);
190 }
191 #define	readq(addr)		readq(addr)
192 
193 static inline void
194 writeq(uint64_t v, volatile void *addr)
195 {
196 	__compiler_membar();
197 	*(volatile uint64_t *)addr = v;
198 	__compiler_membar();
199 }
200 #define	writeq(v, addr)		writeq(v, addr)
201 #endif
202 
203 /* Access little-endian MMIO registers atomically without memory barriers. */
204 
205 #undef readb_relaxed
206 static inline uint8_t
207 readb_relaxed(const volatile void *addr)
208 {
209 	return (*(const volatile uint8_t *)addr);
210 }
211 #define	readb_relaxed(addr)	readb_relaxed(addr)
212 
213 #undef writeb_relaxed
214 static inline void
215 writeb_relaxed(uint8_t v, volatile void *addr)
216 {
217 	*(volatile uint8_t *)addr = v;
218 }
219 #define	writeb_relaxed(v, addr)	writeb_relaxed(v, addr)
220 
221 #undef readw_relaxed
222 static inline uint16_t
223 readw_relaxed(const volatile void *addr)
224 {
225 	return (*(const volatile uint16_t *)addr);
226 }
227 #define	readw_relaxed(addr)	readw_relaxed(addr)
228 
229 #undef writew_relaxed
230 static inline void
231 writew_relaxed(uint16_t v, volatile void *addr)
232 {
233 	*(volatile uint16_t *)addr = v;
234 }
235 #define	writew_relaxed(v, addr)	writew_relaxed(v, addr)
236 
237 #undef readl_relaxed
238 static inline uint32_t
239 readl_relaxed(const volatile void *addr)
240 {
241 	return (*(const volatile uint32_t *)addr);
242 }
243 #define	readl_relaxed(addr)	readl_relaxed(addr)
244 
245 #undef writel_relaxed
246 static inline void
247 writel_relaxed(uint32_t v, volatile void *addr)
248 {
249 	*(volatile uint32_t *)addr = v;
250 }
251 #define	writel_relaxed(v, addr)	writel_relaxed(v, addr)
252 
253 #undef readq_relaxed
254 #undef writeq_relaxed
255 #ifdef __LP64__
256 static inline uint64_t
257 readq_relaxed(const volatile void *addr)
258 {
259 	return (*(const volatile uint64_t *)addr);
260 }
261 #define	readq_relaxed(addr)	readq_relaxed(addr)
262 
263 static inline void
264 writeq_relaxed(uint64_t v, volatile void *addr)
265 {
266 	*(volatile uint64_t *)addr = v;
267 }
268 #define	writeq_relaxed(v, addr)	writeq_relaxed(v, addr)
269 #endif
270 
271 /* XXX On Linux ioread and iowrite handle both MMIO and port IO. */
272 
273 #undef ioread8
274 static inline uint8_t
275 ioread8(const volatile void *addr)
276 {
277 	return (readb(addr));
278 }
279 #define	ioread8(addr)		ioread8(addr)
280 
281 #undef ioread16
282 static inline uint16_t
283 ioread16(const volatile void *addr)
284 {
285 	return (readw(addr));
286 }
287 #define	ioread16(addr)		ioread16(addr)
288 
289 #undef ioread16be
290 static inline uint16_t
291 ioread16be(const volatile void *addr)
292 {
293 	return (bswap16(readw(addr)));
294 }
295 #define	ioread16be(addr)	ioread16be(addr)
296 
297 #undef ioread32
298 static inline uint32_t
299 ioread32(const volatile void *addr)
300 {
301 	return (readl(addr));
302 }
303 #define	ioread32(addr)		ioread32(addr)
304 
305 #undef ioread32be
306 static inline uint32_t
307 ioread32be(const volatile void *addr)
308 {
309 	return (bswap32(readl(addr)));
310 }
311 #define	ioread32be(addr)	ioread32be(addr)
312 
313 #undef iowrite8
314 static inline void
315 iowrite8(uint8_t v, volatile void *addr)
316 {
317 	writeb(v, addr);
318 }
319 #define	iowrite8(v, addr)	iowrite8(v, addr)
320 
321 #undef iowrite16
322 static inline void
323 iowrite16(uint16_t v, volatile void *addr)
324 {
325 	writew(v, addr);
326 }
327 #define	iowrite16	iowrite16
328 
329 #undef iowrite32
330 static inline void
331 iowrite32(uint32_t v, volatile void *addr)
332 {
333 	writel(v, addr);
334 }
335 #define	iowrite32(v, addr)	iowrite32(v, addr)
336 
337 #undef iowrite32be
338 static inline void
339 iowrite32be(uint32_t v, volatile void *addr)
340 {
341 	writel(bswap32(v), addr);
342 }
343 #define	iowrite32be(v, addr)	iowrite32be(v, addr)
344 
345 #if defined(__i386__) || defined(__amd64__)
346 static inline void
347 _outb(u_char data, u_int port)
348 {
349 	__asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
350 }
351 #endif
352 
353 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__)
354 void *_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr);
355 #else
356 #define	_ioremap_attr(...) NULL
357 #endif
358 
359 #define	ioremap_nocache(addr, size)					\
360     _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
361 #define	ioremap_wc(addr, size)						\
362     _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_COMBINING)
363 #define	ioremap_wb(addr, size)						\
364     _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_BACK)
365 #define	ioremap_wt(addr, size)						\
366     _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_THROUGH)
367 #define	ioremap(addr, size)						\
368     _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
369 void iounmap(void *addr);
370 
371 #define	memset_io(a, b, c)	memset((a), (b), (c))
372 #define	memcpy_fromio(a, b, c)	memcpy((a), (b), (c))
373 #define	memcpy_toio(a, b, c)	memcpy((a), (b), (c))
374 
375 static inline void
376 __iowrite32_copy(void *to, void *from, size_t count)
377 {
378 	uint32_t *src;
379 	uint32_t *dst;
380 	int i;
381 
382 	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
383 		__raw_writel(*src, dst);
384 }
385 
386 static inline void
387 __iowrite64_copy(void *to, void *from, size_t count)
388 {
389 #ifdef __LP64__
390 	uint64_t *src;
391 	uint64_t *dst;
392 	int i;
393 
394 	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
395 		__raw_writeq(*src, dst);
396 #else
397 	__iowrite32_copy(to, from, count * 2);
398 #endif
399 }
400 
401 enum {
402 	MEMREMAP_WB = 1 << 0,
403 	MEMREMAP_WT = 1 << 1,
404 	MEMREMAP_WC = 1 << 2,
405 };
406 
407 static inline void *
408 memremap(resource_size_t offset, size_t size, unsigned long flags)
409 {
410 	void *addr = NULL;
411 
412 	if ((flags & MEMREMAP_WB) &&
413 	    (addr = ioremap_wb(offset, size)) != NULL)
414 		goto done;
415 	if ((flags & MEMREMAP_WT) &&
416 	    (addr = ioremap_wt(offset, size)) != NULL)
417 		goto done;
418 	if ((flags & MEMREMAP_WC) &&
419 	    (addr = ioremap_wc(offset, size)) != NULL)
420 		goto done;
421 done:
422 	return (addr);
423 }
424 
425 static inline void
426 memunmap(void *addr)
427 {
428 	/* XXX May need to check if this is RAM */
429 	iounmap(addr);
430 }
431 
432 #endif	/* _LINUX_IO_H_ */
433