xref: /freebsd/sys/compat/linuxkpi/common/include/linux/io.h (revision 3332f1b444d4a73238e9f59cca27bfc95fe936bd)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 #ifndef	_LINUX_IO_H_
32 #define	_LINUX_IO_H_
33 
34 #include <sys/endian.h>
35 #include <sys/types.h>
36 
37 #include <machine/vm.h>
38 
39 #include <linux/compiler.h>
40 #include <linux/types.h>
41 
42 /*
43  * XXX This is all x86 specific.  It should be bus space access.
44  */
45 
46 /* rmb and wmb are declared in machine/atomic.h, so should be included first. */
47 #ifndef __io_br
48 #define	__io_br()	__compiler_membar()
49 #endif
50 
51 #ifndef __io_ar
52 #ifdef rmb
53 #define	__io_ar()	rmb()
54 #else
55 #define	__io_ar()	__compiler_membar()
56 #endif
57 #endif
58 
59 #ifndef __io_bw
60 #ifdef wmb
61 #define	__io_bw()	wmb()
62 #else
63 #define	__io_bw()	__compiler_membar()
64 #endif
65 #endif
66 
67 #ifndef __io_aw
68 #define	__io_aw()	__compiler_membar()
69 #endif
70 
71 /* Access MMIO registers atomically without barriers and byte swapping. */
72 
73 static inline uint8_t
74 __raw_readb(const volatile void *addr)
75 {
76 	return (*(const volatile uint8_t *)addr);
77 }
78 #define	__raw_readb(addr)	__raw_readb(addr)
79 
80 static inline void
81 __raw_writeb(uint8_t v, volatile void *addr)
82 {
83 	*(volatile uint8_t *)addr = v;
84 }
85 #define	__raw_writeb(v, addr)	__raw_writeb(v, addr)
86 
87 static inline uint16_t
88 __raw_readw(const volatile void *addr)
89 {
90 	return (*(const volatile uint16_t *)addr);
91 }
92 #define	__raw_readw(addr)	__raw_readw(addr)
93 
94 static inline void
95 __raw_writew(uint16_t v, volatile void *addr)
96 {
97 	*(volatile uint16_t *)addr = v;
98 }
99 #define	__raw_writew(v, addr)	__raw_writew(v, addr)
100 
101 static inline uint32_t
102 __raw_readl(const volatile void *addr)
103 {
104 	return (*(const volatile uint32_t *)addr);
105 }
106 #define	__raw_readl(addr)	__raw_readl(addr)
107 
108 static inline void
109 __raw_writel(uint32_t v, volatile void *addr)
110 {
111 	*(volatile uint32_t *)addr = v;
112 }
113 #define	__raw_writel(v, addr)	__raw_writel(v, addr)
114 
115 #ifdef __LP64__
116 static inline uint64_t
117 __raw_readq(const volatile void *addr)
118 {
119 	return (*(const volatile uint64_t *)addr);
120 }
121 #define	__raw_readq(addr)	__raw_readq(addr)
122 
123 static inline void
124 __raw_writeq(uint64_t v, volatile void *addr)
125 {
126 	*(volatile uint64_t *)addr = v;
127 }
128 #define	__raw_writeq(v, addr)	__raw_writeq(v, addr)
129 #endif
130 
131 #define	mmiowb()	barrier()
132 
133 /* Access little-endian MMIO registers atomically with memory barriers. */
134 
135 #undef readb
136 static inline uint8_t
137 readb(const volatile void *addr)
138 {
139 	uint8_t v;
140 
141 	__io_br();
142 	v = *(const volatile uint8_t *)addr;
143 	__io_ar();
144 	return (v);
145 }
146 #define	readb(addr)		readb(addr)
147 
148 #undef writeb
149 static inline void
150 writeb(uint8_t v, volatile void *addr)
151 {
152 	__io_bw();
153 	*(volatile uint8_t *)addr = v;
154 	__io_aw();
155 }
156 #define	writeb(v, addr)		writeb(v, addr)
157 
158 #undef readw
159 static inline uint16_t
160 readw(const volatile void *addr)
161 {
162 	uint16_t v;
163 
164 	__io_br();
165 	v = le16toh(__raw_readw(addr));
166 	__io_ar();
167 	return (v);
168 }
169 #define	readw(addr)		readw(addr)
170 
171 #undef writew
172 static inline void
173 writew(uint16_t v, volatile void *addr)
174 {
175 	__io_bw();
176 	__raw_writew(htole16(v), addr);
177 	__io_aw();
178 }
179 #define	writew(v, addr)		writew(v, addr)
180 
181 #undef readl
182 static inline uint32_t
183 readl(const volatile void *addr)
184 {
185 	uint32_t v;
186 
187 	__io_br();
188 	v = le32toh(__raw_readl(addr));
189 	__io_ar();
190 	return (v);
191 }
192 #define	readl(addr)		readl(addr)
193 
194 #undef writel
195 static inline void
196 writel(uint32_t v, volatile void *addr)
197 {
198 	__io_bw();
199 	__raw_writel(htole32(v), addr);
200 	__io_aw();
201 }
202 #define	writel(v, addr)		writel(v, addr)
203 
204 #undef readq
205 #undef writeq
206 #ifdef __LP64__
207 static inline uint64_t
208 readq(const volatile void *addr)
209 {
210 	uint64_t v;
211 
212 	__io_br();
213 	v = le64toh(__raw_readq(addr));
214 	__io_ar();
215 	return (v);
216 }
217 #define	readq(addr)		readq(addr)
218 
219 static inline void
220 writeq(uint64_t v, volatile void *addr)
221 {
222 	__io_bw();
223 	__raw_writeq(htole64(v), addr);
224 	__io_aw();
225 }
226 #define	writeq(v, addr)		writeq(v, addr)
227 #endif
228 
229 /* Access little-endian MMIO registers atomically without memory barriers. */
230 
231 #undef readb_relaxed
232 static inline uint8_t
233 readb_relaxed(const volatile void *addr)
234 {
235 	return (__raw_readb(addr));
236 }
237 #define	readb_relaxed(addr)	readb_relaxed(addr)
238 
239 #undef writeb_relaxed
240 static inline void
241 writeb_relaxed(uint8_t v, volatile void *addr)
242 {
243 	__raw_writeb(v, addr);
244 }
245 #define	writeb_relaxed(v, addr)	writeb_relaxed(v, addr)
246 
247 #undef readw_relaxed
248 static inline uint16_t
249 readw_relaxed(const volatile void *addr)
250 {
251 	return (le16toh(__raw_readw(addr)));
252 }
253 #define	readw_relaxed(addr)	readw_relaxed(addr)
254 
255 #undef writew_relaxed
256 static inline void
257 writew_relaxed(uint16_t v, volatile void *addr)
258 {
259 	__raw_writew(htole16(v), addr);
260 }
261 #define	writew_relaxed(v, addr)	writew_relaxed(v, addr)
262 
263 #undef readl_relaxed
264 static inline uint32_t
265 readl_relaxed(const volatile void *addr)
266 {
267 	return (le32toh(__raw_readl(addr)));
268 }
269 #define	readl_relaxed(addr)	readl_relaxed(addr)
270 
271 #undef writel_relaxed
272 static inline void
273 writel_relaxed(uint32_t v, volatile void *addr)
274 {
275 	__raw_writel(htole32(v), addr);
276 }
277 #define	writel_relaxed(v, addr)	writel_relaxed(v, addr)
278 
279 #undef readq_relaxed
280 #undef writeq_relaxed
281 #ifdef __LP64__
282 static inline uint64_t
283 readq_relaxed(const volatile void *addr)
284 {
285 	return (le64toh(__raw_readq(addr)));
286 }
287 #define	readq_relaxed(addr)	readq_relaxed(addr)
288 
289 static inline void
290 writeq_relaxed(uint64_t v, volatile void *addr)
291 {
292 	__raw_writeq(htole64(v), addr);
293 }
294 #define	writeq_relaxed(v, addr)	writeq_relaxed(v, addr)
295 #endif
296 
297 /* XXX On Linux ioread and iowrite handle both MMIO and port IO. */
298 
299 #undef ioread8
300 static inline uint8_t
301 ioread8(const volatile void *addr)
302 {
303 	return (readb(addr));
304 }
305 #define	ioread8(addr)		ioread8(addr)
306 
307 #undef ioread16
308 static inline uint16_t
309 ioread16(const volatile void *addr)
310 {
311 	return (readw(addr));
312 }
313 #define	ioread16(addr)		ioread16(addr)
314 
315 #undef ioread16be
316 static inline uint16_t
317 ioread16be(const volatile void *addr)
318 {
319 	uint16_t v;
320 
321 	__io_br();
322 	v = (be16toh(__raw_readw(addr)));
323 	__io_ar();
324 
325 	return (v);
326 }
327 #define	ioread16be(addr)	ioread16be(addr)
328 
329 #undef ioread32
330 static inline uint32_t
331 ioread32(const volatile void *addr)
332 {
333 	return (readl(addr));
334 }
335 #define	ioread32(addr)		ioread32(addr)
336 
337 #undef ioread32be
338 static inline uint32_t
339 ioread32be(const volatile void *addr)
340 {
341 	uint32_t v;
342 
343 	__io_br();
344 	v = (be32toh(__raw_readl(addr)));
345 	__io_ar();
346 
347 	return (v);
348 }
349 #define	ioread32be(addr)	ioread32be(addr)
350 
351 #undef iowrite8
352 static inline void
353 iowrite8(uint8_t v, volatile void *addr)
354 {
355 	writeb(v, addr);
356 }
357 #define	iowrite8(v, addr)	iowrite8(v, addr)
358 
359 #undef iowrite16
360 static inline void
361 iowrite16(uint16_t v, volatile void *addr)
362 {
363 	writew(v, addr);
364 }
365 #define	iowrite16	iowrite16
366 
367 #undef iowrite32
368 static inline void
369 iowrite32(uint32_t v, volatile void *addr)
370 {
371 	writel(v, addr);
372 }
373 #define	iowrite32(v, addr)	iowrite32(v, addr)
374 
375 #undef iowrite32be
376 static inline void
377 iowrite32be(uint32_t v, volatile void *addr)
378 {
379 	__io_bw();
380 	__raw_writel(htobe32(v), addr);
381 	__io_aw();
382 }
383 #define	iowrite32be(v, addr)	iowrite32be(v, addr)
384 
385 #if defined(__i386__) || defined(__amd64__)
386 static inline void
387 _outb(u_char data, u_int port)
388 {
389 	__asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
390 }
391 #endif
392 
393 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv)
394 void *_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr);
395 #else
396 #define	_ioremap_attr(...) NULL
397 #endif
398 
399 #ifdef VM_MEMATTR_DEVICE
400 #define	ioremap_nocache(addr, size)					\
401     _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
402 #define	ioremap_wt(addr, size)						\
403     _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
404 #define	ioremap(addr, size)						\
405     _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
406 #else
407 #define	ioremap_nocache(addr, size)					\
408     _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
409 #define	ioremap_wt(addr, size)						\
410     _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_THROUGH)
411 #define	ioremap(addr, size)						\
412     _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
413 #endif
414 #ifdef VM_MEMATTR_WRITE_COMBINING
415 #define	ioremap_wc(addr, size)						\
416     _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_COMBINING)
417 #else
418 #define	ioremap_wc(addr, size)	ioremap_nocache(addr, size)
419 #endif
420 #define	ioremap_wb(addr, size)						\
421     _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_BACK)
422 void iounmap(void *addr);
423 
424 #define	memset_io(a, b, c)	memset((a), (b), (c))
425 #define	memcpy_fromio(a, b, c)	memcpy((a), (b), (c))
426 #define	memcpy_toio(a, b, c)	memcpy((a), (b), (c))
427 
428 static inline void
429 __iowrite32_copy(void *to, void *from, size_t count)
430 {
431 	uint32_t *src;
432 	uint32_t *dst;
433 	int i;
434 
435 	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
436 		__raw_writel(*src, dst);
437 }
438 
439 static inline void
440 __iowrite64_copy(void *to, void *from, size_t count)
441 {
442 #ifdef __LP64__
443 	uint64_t *src;
444 	uint64_t *dst;
445 	int i;
446 
447 	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
448 		__raw_writeq(*src, dst);
449 #else
450 	__iowrite32_copy(to, from, count * 2);
451 #endif
452 }
453 
454 enum {
455 	MEMREMAP_WB = 1 << 0,
456 	MEMREMAP_WT = 1 << 1,
457 	MEMREMAP_WC = 1 << 2,
458 };
459 
460 static inline void *
461 memremap(resource_size_t offset, size_t size, unsigned long flags)
462 {
463 	void *addr = NULL;
464 
465 	if ((flags & MEMREMAP_WB) &&
466 	    (addr = ioremap_wb(offset, size)) != NULL)
467 		goto done;
468 	if ((flags & MEMREMAP_WT) &&
469 	    (addr = ioremap_wt(offset, size)) != NULL)
470 		goto done;
471 	if ((flags & MEMREMAP_WC) &&
472 	    (addr = ioremap_wc(offset, size)) != NULL)
473 		goto done;
474 done:
475 	return (addr);
476 }
477 
478 static inline void
479 memunmap(void *addr)
480 {
481 	/* XXX May need to check if this is RAM */
482 	iounmap(addr);
483 }
484 
485 #endif	/* _LINUX_IO_H_ */
486