xref: /freebsd/sys/compat/linuxkpi/common/include/linux/io.h (revision af787b8e8b803dbb2c6bd06629974ba39bd0fb70)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 #ifndef	_LINUXKPI_LINUX_IO_H_
30 #define	_LINUXKPI_LINUX_IO_H_
31 
32 #include <sys/endian.h>
33 #include <sys/types.h>
34 
35 #include <machine/vm.h>
36 
37 #include <linux/compiler.h>
38 #include <linux/err.h>
39 #include <linux/types.h>
40 #if !defined(__arm__)
41 #include <asm/set_memory.h>
42 #endif
43 
44 /*
45  * XXX This is all x86 specific.  It should be bus space access.
46  */
47 
48 /* rmb and wmb are declared in machine/atomic.h, so should be included first. */
49 #ifndef __io_br
50 #define	__io_br()	__compiler_membar()
51 #endif
52 
53 #ifndef __io_ar
54 #ifdef rmb
55 #define	__io_ar()	rmb()
56 #else
57 #define	__io_ar()	__compiler_membar()
58 #endif
59 #endif
60 
61 #ifndef __io_bw
62 #ifdef wmb
63 #define	__io_bw()	wmb()
64 #else
65 #define	__io_bw()	__compiler_membar()
66 #endif
67 #endif
68 
69 #ifndef __io_aw
70 #define	__io_aw()	__compiler_membar()
71 #endif
72 
73 /* Access MMIO registers atomically without barriers and byte swapping. */
74 
75 static inline uint8_t
76 __raw_readb(const volatile void *addr)
77 {
78 	return (*(const volatile uint8_t *)addr);
79 }
80 #define	__raw_readb(addr)	__raw_readb(addr)
81 
82 static inline void
83 __raw_writeb(uint8_t v, volatile void *addr)
84 {
85 	*(volatile uint8_t *)addr = v;
86 }
87 #define	__raw_writeb(v, addr)	__raw_writeb(v, addr)
88 
89 static inline uint16_t
90 __raw_readw(const volatile void *addr)
91 {
92 	return (*(const volatile uint16_t *)addr);
93 }
94 #define	__raw_readw(addr)	__raw_readw(addr)
95 
96 static inline void
97 __raw_writew(uint16_t v, volatile void *addr)
98 {
99 	*(volatile uint16_t *)addr = v;
100 }
101 #define	__raw_writew(v, addr)	__raw_writew(v, addr)
102 
103 static inline uint32_t
104 __raw_readl(const volatile void *addr)
105 {
106 	return (*(const volatile uint32_t *)addr);
107 }
108 #define	__raw_readl(addr)	__raw_readl(addr)
109 
110 static inline void
111 __raw_writel(uint32_t v, volatile void *addr)
112 {
113 	*(volatile uint32_t *)addr = v;
114 }
115 #define	__raw_writel(v, addr)	__raw_writel(v, addr)
116 
117 #ifdef __LP64__
118 static inline uint64_t
119 __raw_readq(const volatile void *addr)
120 {
121 	return (*(const volatile uint64_t *)addr);
122 }
123 #define	__raw_readq(addr)	__raw_readq(addr)
124 
125 static inline void
126 __raw_writeq(uint64_t v, volatile void *addr)
127 {
128 	*(volatile uint64_t *)addr = v;
129 }
130 #define	__raw_writeq(v, addr)	__raw_writeq(v, addr)
131 #endif
132 
133 #define	mmiowb()	barrier()
134 
135 /* Access little-endian MMIO registers atomically with memory barriers. */
136 
137 #undef readb
138 static inline uint8_t
139 readb(const volatile void *addr)
140 {
141 	uint8_t v;
142 
143 	__io_br();
144 	v = *(const volatile uint8_t *)addr;
145 	__io_ar();
146 	return (v);
147 }
148 #define	readb(addr)		readb(addr)
149 
150 #undef writeb
151 static inline void
152 writeb(uint8_t v, volatile void *addr)
153 {
154 	__io_bw();
155 	*(volatile uint8_t *)addr = v;
156 	__io_aw();
157 }
158 #define	writeb(v, addr)		writeb(v, addr)
159 
160 #undef readw
161 static inline uint16_t
162 readw(const volatile void *addr)
163 {
164 	uint16_t v;
165 
166 	__io_br();
167 	v = le16toh(__raw_readw(addr));
168 	__io_ar();
169 	return (v);
170 }
171 #define	readw(addr)		readw(addr)
172 
173 #undef writew
174 static inline void
175 writew(uint16_t v, volatile void *addr)
176 {
177 	__io_bw();
178 	__raw_writew(htole16(v), addr);
179 	__io_aw();
180 }
181 #define	writew(v, addr)		writew(v, addr)
182 
183 #undef readl
184 static inline uint32_t
185 readl(const volatile void *addr)
186 {
187 	uint32_t v;
188 
189 	__io_br();
190 	v = le32toh(__raw_readl(addr));
191 	__io_ar();
192 	return (v);
193 }
194 #define	readl(addr)		readl(addr)
195 
196 #undef writel
197 static inline void
198 writel(uint32_t v, volatile void *addr)
199 {
200 	__io_bw();
201 	__raw_writel(htole32(v), addr);
202 	__io_aw();
203 }
204 #define	writel(v, addr)		writel(v, addr)
205 
206 #undef readq
207 #undef writeq
208 #ifdef __LP64__
209 static inline uint64_t
210 readq(const volatile void *addr)
211 {
212 	uint64_t v;
213 
214 	__io_br();
215 	v = le64toh(__raw_readq(addr));
216 	__io_ar();
217 	return (v);
218 }
219 #define	readq(addr)		readq(addr)
220 
221 static inline void
222 writeq(uint64_t v, volatile void *addr)
223 {
224 	__io_bw();
225 	__raw_writeq(htole64(v), addr);
226 	__io_aw();
227 }
228 #define	writeq(v, addr)		writeq(v, addr)
229 #endif
230 
231 /* Access little-endian MMIO registers atomically without memory barriers. */
232 
233 #undef readb_relaxed
234 static inline uint8_t
235 readb_relaxed(const volatile void *addr)
236 {
237 	return (__raw_readb(addr));
238 }
239 #define	readb_relaxed(addr)	readb_relaxed(addr)
240 
241 #undef writeb_relaxed
242 static inline void
243 writeb_relaxed(uint8_t v, volatile void *addr)
244 {
245 	__raw_writeb(v, addr);
246 }
247 #define	writeb_relaxed(v, addr)	writeb_relaxed(v, addr)
248 
249 #undef readw_relaxed
250 static inline uint16_t
251 readw_relaxed(const volatile void *addr)
252 {
253 	return (le16toh(__raw_readw(addr)));
254 }
255 #define	readw_relaxed(addr)	readw_relaxed(addr)
256 
257 #undef writew_relaxed
258 static inline void
259 writew_relaxed(uint16_t v, volatile void *addr)
260 {
261 	__raw_writew(htole16(v), addr);
262 }
263 #define	writew_relaxed(v, addr)	writew_relaxed(v, addr)
264 
265 #undef readl_relaxed
266 static inline uint32_t
267 readl_relaxed(const volatile void *addr)
268 {
269 	return (le32toh(__raw_readl(addr)));
270 }
271 #define	readl_relaxed(addr)	readl_relaxed(addr)
272 
273 #undef writel_relaxed
274 static inline void
275 writel_relaxed(uint32_t v, volatile void *addr)
276 {
277 	__raw_writel(htole32(v), addr);
278 }
279 #define	writel_relaxed(v, addr)	writel_relaxed(v, addr)
280 
281 #undef readq_relaxed
282 #undef writeq_relaxed
283 #ifdef __LP64__
284 static inline uint64_t
285 readq_relaxed(const volatile void *addr)
286 {
287 	return (le64toh(__raw_readq(addr)));
288 }
289 #define	readq_relaxed(addr)	readq_relaxed(addr)
290 
291 static inline void
292 writeq_relaxed(uint64_t v, volatile void *addr)
293 {
294 	__raw_writeq(htole64(v), addr);
295 }
296 #define	writeq_relaxed(v, addr)	writeq_relaxed(v, addr)
297 #endif
298 
299 /* XXX On Linux ioread and iowrite handle both MMIO and port IO. */
300 
301 #undef ioread8
302 static inline uint8_t
303 ioread8(const volatile void *addr)
304 {
305 	return (readb(addr));
306 }
307 #define	ioread8(addr)		ioread8(addr)
308 
309 #undef ioread16
310 static inline uint16_t
311 ioread16(const volatile void *addr)
312 {
313 	return (readw(addr));
314 }
315 #define	ioread16(addr)		ioread16(addr)
316 
317 #undef ioread16be
318 static inline uint16_t
319 ioread16be(const volatile void *addr)
320 {
321 	uint16_t v;
322 
323 	__io_br();
324 	v = (be16toh(__raw_readw(addr)));
325 	__io_ar();
326 
327 	return (v);
328 }
329 #define	ioread16be(addr)	ioread16be(addr)
330 
331 #undef ioread32
332 static inline uint32_t
333 ioread32(const volatile void *addr)
334 {
335 	return (readl(addr));
336 }
337 #define	ioread32(addr)		ioread32(addr)
338 
339 #undef ioread32be
340 static inline uint32_t
341 ioread32be(const volatile void *addr)
342 {
343 	uint32_t v;
344 
345 	__io_br();
346 	v = (be32toh(__raw_readl(addr)));
347 	__io_ar();
348 
349 	return (v);
350 }
351 #define	ioread32be(addr)	ioread32be(addr)
352 
353 #undef ioread64
354 static inline uint64_t
355 ioread64(const volatile void *addr)
356 {
357 	return (readq(addr));
358 }
359 #define	ioread64(addr)		ioread64(addr)
360 
361 #undef iowrite8
362 static inline void
363 iowrite8(uint8_t v, volatile void *addr)
364 {
365 	writeb(v, addr);
366 }
367 #define	iowrite8(v, addr)	iowrite8(v, addr)
368 
369 #undef iowrite16
370 static inline void
371 iowrite16(uint16_t v, volatile void *addr)
372 {
373 	writew(v, addr);
374 }
375 #define	iowrite16	iowrite16
376 
377 #undef iowrite32
378 static inline void
379 iowrite32(uint32_t v, volatile void *addr)
380 {
381 	writel(v, addr);
382 }
383 #define	iowrite32(v, addr)	iowrite32(v, addr)
384 
385 #undef iowrite32be
386 static inline void
387 iowrite32be(uint32_t v, volatile void *addr)
388 {
389 	__io_bw();
390 	__raw_writel(htobe32(v), addr);
391 	__io_aw();
392 }
393 #define	iowrite32be(v, addr)	iowrite32be(v, addr)
394 
395 #if defined(__i386__) || defined(__amd64__)
396 static inline void
397 _outb(u_char data, u_int port)
398 {
399 	__asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
400 }
401 #endif
402 
403 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv)
404 void *_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr);
405 #else
406 static __inline void *
407 _ioremap_attr(vm_paddr_t _phys_addr, unsigned long _size, int _attr)
408 {
409 	return (NULL);
410 }
411 #endif
412 
413 struct device;
414 static inline void *
415 devm_ioremap(struct device *dev, resource_size_t offset, resource_size_t size)
416 {
417 	return (NULL);
418 }
419 
420 #ifdef VM_MEMATTR_DEVICE
421 #define	ioremap_nocache(addr, size)					\
422     _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
423 #define	ioremap_wt(addr, size)						\
424     _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
425 #define	ioremap(addr, size)						\
426     _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
427 #else
428 #define	ioremap_nocache(addr, size)					\
429     _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
430 #define	ioremap_wt(addr, size)						\
431     _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_THROUGH)
432 #define	ioremap(addr, size)						\
433     _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
434 #endif
435 #ifdef VM_MEMATTR_WRITE_COMBINING
436 #define	ioremap_wc(addr, size)						\
437     _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_COMBINING)
438 #else
439 #define	ioremap_wc(addr, size)	ioremap_nocache(addr, size)
440 #endif
441 #define	ioremap_cache(addr, size)					\
442     _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_BACK)
443 void iounmap(void *addr);
444 
445 #define	memset_io(a, b, c)	memset((a), (b), (c))
446 #define	memcpy_fromio(a, b, c)	memcpy((a), (b), (c))
447 #define	memcpy_toio(a, b, c)	memcpy((a), (b), (c))
448 
449 static inline void
450 __iowrite32_copy(void *to, const void *from, size_t count)
451 {
452 	const uint32_t *src;
453 	uint32_t *dst;
454 	int i;
455 
456 	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
457 		__raw_writel(*src, dst);
458 }
459 
460 static inline void
461 __iowrite64_copy(void *to, const void *from, size_t count)
462 {
463 #ifdef __LP64__
464 	const uint64_t *src;
465 	uint64_t *dst;
466 	int i;
467 
468 	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
469 		__raw_writeq(*src, dst);
470 #else
471 	__iowrite32_copy(to, from, count * 2);
472 #endif
473 }
474 
475 static inline void
476 __ioread32_copy(void *to, const void *from, size_t count)
477 {
478 	const uint32_t *src;
479 	uint32_t *dst;
480 	int i;
481 
482 	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
483 		*dst = __raw_readl(src);
484 }
485 
486 static inline void
487 __ioread64_copy(void *to, const void *from, size_t count)
488 {
489 #ifdef __LP64__
490 	const uint64_t *src;
491 	uint64_t *dst;
492 	int i;
493 
494 	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
495 		*dst = __raw_readq(src);
496 #else
497 	__ioread32_copy(to, from, count * 2);
498 #endif
499 }
500 
501 enum {
502 	MEMREMAP_WB = 1 << 0,
503 	MEMREMAP_WT = 1 << 1,
504 	MEMREMAP_WC = 1 << 2,
505 };
506 
507 static inline void *
508 memremap(resource_size_t offset, size_t size, unsigned long flags)
509 {
510 	void *addr = NULL;
511 
512 	if ((flags & MEMREMAP_WB) &&
513 	    (addr = ioremap_cache(offset, size)) != NULL)
514 		goto done;
515 	if ((flags & MEMREMAP_WT) &&
516 	    (addr = ioremap_wt(offset, size)) != NULL)
517 		goto done;
518 	if ((flags & MEMREMAP_WC) &&
519 	    (addr = ioremap_wc(offset, size)) != NULL)
520 		goto done;
521 done:
522 	return (addr);
523 }
524 
525 static inline void
526 memunmap(void *addr)
527 {
528 	/* XXX May need to check if this is RAM */
529 	iounmap(addr);
530 }
531 
532 #define	IOMEM_ERR_PTR(err)	(void __iomem *)ERR_PTR(err)
533 
534 #define	__MTRR_ID_BASE	1
535 int lkpi_arch_phys_wc_add(unsigned long, unsigned long);
536 void lkpi_arch_phys_wc_del(int);
537 #define	arch_phys_wc_add(...)	lkpi_arch_phys_wc_add(__VA_ARGS__)
538 #define	arch_phys_wc_del(...)	lkpi_arch_phys_wc_del(__VA_ARGS__)
539 #define	arch_phys_wc_index(x)	\
540 	(((x) < __MTRR_ID_BASE) ? -1 : ((x) - __MTRR_ID_BASE))
541 
542 #if defined(__amd64__) || defined(__i386__) || defined(__aarch64__) || defined(__powerpc__) || defined(__riscv)
543 static inline int
544 arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
545 {
546 	vm_offset_t va;
547 
548 	va = PHYS_TO_DMAP(start);
549 
550 #ifdef VM_MEMATTR_WRITE_COMBINING
551 	return (-pmap_change_attr(va, size, VM_MEMATTR_WRITE_COMBINING));
552 #else
553 	return (-pmap_change_attr(va, size, VM_MEMATTR_UNCACHEABLE));
554 #endif
555 }
556 
557 static inline void
558 arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
559 {
560 	vm_offset_t va;
561 
562 	va = PHYS_TO_DMAP(start);
563 
564 	pmap_change_attr(va, size, VM_MEMATTR_WRITE_BACK);
565 }
566 #endif
567 
568 #endif	/* _LINUXKPI_LINUX_IO_H_ */
569