xref: /freebsd/sys/compat/linuxkpi/common/include/linux/io.h (revision aa1a8ff2d6dbc51ef058f46f3db5a8bb77967145)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 #ifndef	_LINUXKPI_LINUX_IO_H_
30 #define	_LINUXKPI_LINUX_IO_H_
31 
32 #include <sys/endian.h>
33 #include <sys/types.h>
34 
35 #include <machine/vm.h>
36 
37 #include <linux/compiler.h>
38 #include <linux/err.h>
39 #include <linux/types.h>
40 #if !defined(__arm__)
41 #include <asm/set_memory.h>
42 #endif
43 
44 /*
45  * XXX This is all x86 specific.  It should be bus space access.
46  */
47 
48 /* rmb and wmb are declared in machine/atomic.h, so should be included first. */
49 #ifndef __io_br
50 #define	__io_br()	__compiler_membar()
51 #endif
52 
53 #ifndef __io_ar
54 #ifdef rmb
55 #define	__io_ar()	rmb()
56 #else
57 #define	__io_ar()	__compiler_membar()
58 #endif
59 #endif
60 
61 #ifndef __io_bw
62 #ifdef wmb
63 #define	__io_bw()	wmb()
64 #else
65 #define	__io_bw()	__compiler_membar()
66 #endif
67 #endif
68 
69 #ifndef __io_aw
70 #define	__io_aw()	__compiler_membar()
71 #endif
72 
73 /* Access MMIO registers atomically without barriers and byte swapping. */
74 
75 static inline uint8_t
76 __raw_readb(const volatile void *addr)
77 {
78 	return (*(const volatile uint8_t *)addr);
79 }
80 #define	__raw_readb(addr)	__raw_readb(addr)
81 
82 static inline void
83 __raw_writeb(uint8_t v, volatile void *addr)
84 {
85 	*(volatile uint8_t *)addr = v;
86 }
87 #define	__raw_writeb(v, addr)	__raw_writeb(v, addr)
88 
89 static inline uint16_t
90 __raw_readw(const volatile void *addr)
91 {
92 	return (*(const volatile uint16_t *)addr);
93 }
94 #define	__raw_readw(addr)	__raw_readw(addr)
95 
96 static inline void
97 __raw_writew(uint16_t v, volatile void *addr)
98 {
99 	*(volatile uint16_t *)addr = v;
100 }
101 #define	__raw_writew(v, addr)	__raw_writew(v, addr)
102 
103 static inline uint32_t
104 __raw_readl(const volatile void *addr)
105 {
106 	return (*(const volatile uint32_t *)addr);
107 }
108 #define	__raw_readl(addr)	__raw_readl(addr)
109 
110 static inline void
111 __raw_writel(uint32_t v, volatile void *addr)
112 {
113 	*(volatile uint32_t *)addr = v;
114 }
115 #define	__raw_writel(v, addr)	__raw_writel(v, addr)
116 
117 #ifdef __LP64__
118 static inline uint64_t
119 __raw_readq(const volatile void *addr)
120 {
121 	return (*(const volatile uint64_t *)addr);
122 }
123 #define	__raw_readq(addr)	__raw_readq(addr)
124 
125 static inline void
126 __raw_writeq(uint64_t v, volatile void *addr)
127 {
128 	*(volatile uint64_t *)addr = v;
129 }
130 #define	__raw_writeq(v, addr)	__raw_writeq(v, addr)
131 #endif
132 
133 #define	mmiowb()	barrier()
134 
135 /* Access little-endian MMIO registers atomically with memory barriers. */
136 
137 #undef readb
138 static inline uint8_t
139 readb(const volatile void *addr)
140 {
141 	uint8_t v;
142 
143 	__io_br();
144 	v = *(const volatile uint8_t *)addr;
145 	__io_ar();
146 	return (v);
147 }
148 #define	readb(addr)		readb(addr)
149 
150 #undef writeb
151 static inline void
152 writeb(uint8_t v, volatile void *addr)
153 {
154 	__io_bw();
155 	*(volatile uint8_t *)addr = v;
156 	__io_aw();
157 }
158 #define	writeb(v, addr)		writeb(v, addr)
159 
160 #undef readw
161 static inline uint16_t
162 readw(const volatile void *addr)
163 {
164 	uint16_t v;
165 
166 	__io_br();
167 	v = le16toh(__raw_readw(addr));
168 	__io_ar();
169 	return (v);
170 }
171 #define	readw(addr)		readw(addr)
172 
173 #undef writew
174 static inline void
175 writew(uint16_t v, volatile void *addr)
176 {
177 	__io_bw();
178 	__raw_writew(htole16(v), addr);
179 	__io_aw();
180 }
181 #define	writew(v, addr)		writew(v, addr)
182 
183 #undef readl
184 static inline uint32_t
185 readl(const volatile void *addr)
186 {
187 	uint32_t v;
188 
189 	__io_br();
190 	v = le32toh(__raw_readl(addr));
191 	__io_ar();
192 	return (v);
193 }
194 #define	readl(addr)		readl(addr)
195 
196 #undef writel
197 static inline void
198 writel(uint32_t v, volatile void *addr)
199 {
200 	__io_bw();
201 	__raw_writel(htole32(v), addr);
202 	__io_aw();
203 }
204 #define	writel(v, addr)		writel(v, addr)
205 
206 #undef readq
207 #undef writeq
208 #ifdef __LP64__
209 static inline uint64_t
210 readq(const volatile void *addr)
211 {
212 	uint64_t v;
213 
214 	__io_br();
215 	v = le64toh(__raw_readq(addr));
216 	__io_ar();
217 	return (v);
218 }
219 #define	readq(addr)		readq(addr)
220 
221 static inline void
222 writeq(uint64_t v, volatile void *addr)
223 {
224 	__io_bw();
225 	__raw_writeq(htole64(v), addr);
226 	__io_aw();
227 }
228 #define	writeq(v, addr)		writeq(v, addr)
229 #endif
230 
231 /* Access little-endian MMIO registers atomically without memory barriers. */
232 
233 #undef readb_relaxed
234 static inline uint8_t
235 readb_relaxed(const volatile void *addr)
236 {
237 	return (__raw_readb(addr));
238 }
239 #define	readb_relaxed(addr)	readb_relaxed(addr)
240 
241 #undef writeb_relaxed
242 static inline void
243 writeb_relaxed(uint8_t v, volatile void *addr)
244 {
245 	__raw_writeb(v, addr);
246 }
247 #define	writeb_relaxed(v, addr)	writeb_relaxed(v, addr)
248 
249 #undef readw_relaxed
250 static inline uint16_t
251 readw_relaxed(const volatile void *addr)
252 {
253 	return (le16toh(__raw_readw(addr)));
254 }
255 #define	readw_relaxed(addr)	readw_relaxed(addr)
256 
257 #undef writew_relaxed
258 static inline void
259 writew_relaxed(uint16_t v, volatile void *addr)
260 {
261 	__raw_writew(htole16(v), addr);
262 }
263 #define	writew_relaxed(v, addr)	writew_relaxed(v, addr)
264 
265 #undef readl_relaxed
266 static inline uint32_t
267 readl_relaxed(const volatile void *addr)
268 {
269 	return (le32toh(__raw_readl(addr)));
270 }
271 #define	readl_relaxed(addr)	readl_relaxed(addr)
272 
273 #undef writel_relaxed
274 static inline void
275 writel_relaxed(uint32_t v, volatile void *addr)
276 {
277 	__raw_writel(htole32(v), addr);
278 }
279 #define	writel_relaxed(v, addr)	writel_relaxed(v, addr)
280 
281 #undef readq_relaxed
282 #undef writeq_relaxed
283 #ifdef __LP64__
284 static inline uint64_t
285 readq_relaxed(const volatile void *addr)
286 {
287 	return (le64toh(__raw_readq(addr)));
288 }
289 #define	readq_relaxed(addr)	readq_relaxed(addr)
290 
291 static inline void
292 writeq_relaxed(uint64_t v, volatile void *addr)
293 {
294 	__raw_writeq(htole64(v), addr);
295 }
296 #define	writeq_relaxed(v, addr)	writeq_relaxed(v, addr)
297 #endif
298 
299 /* XXX On Linux ioread and iowrite handle both MMIO and port IO. */
300 
301 #undef ioread8
302 static inline uint8_t
303 ioread8(const volatile void *addr)
304 {
305 	return (readb(addr));
306 }
307 #define	ioread8(addr)		ioread8(addr)
308 
309 #undef ioread16
310 static inline uint16_t
311 ioread16(const volatile void *addr)
312 {
313 	return (readw(addr));
314 }
315 #define	ioread16(addr)		ioread16(addr)
316 
317 #undef ioread16be
318 static inline uint16_t
319 ioread16be(const volatile void *addr)
320 {
321 	uint16_t v;
322 
323 	__io_br();
324 	v = (be16toh(__raw_readw(addr)));
325 	__io_ar();
326 
327 	return (v);
328 }
329 #define	ioread16be(addr)	ioread16be(addr)
330 
331 #undef ioread32
332 static inline uint32_t
333 ioread32(const volatile void *addr)
334 {
335 	return (readl(addr));
336 }
337 #define	ioread32(addr)		ioread32(addr)
338 
339 #undef ioread32be
340 static inline uint32_t
341 ioread32be(const volatile void *addr)
342 {
343 	uint32_t v;
344 
345 	__io_br();
346 	v = (be32toh(__raw_readl(addr)));
347 	__io_ar();
348 
349 	return (v);
350 }
351 #define	ioread32be(addr)	ioread32be(addr)
352 
353 #ifdef __LP64__
354 #undef ioread64
355 static inline uint64_t
356 ioread64(const volatile void *addr)
357 {
358 	return (readq(addr));
359 }
360 #define	ioread64(addr)		ioread64(addr)
361 #endif
362 
363 #undef iowrite8
364 static inline void
365 iowrite8(uint8_t v, volatile void *addr)
366 {
367 	writeb(v, addr);
368 }
369 #define	iowrite8(v, addr)	iowrite8(v, addr)
370 
371 #undef iowrite16
372 static inline void
373 iowrite16(uint16_t v, volatile void *addr)
374 {
375 	writew(v, addr);
376 }
377 #define	iowrite16	iowrite16
378 
379 #undef iowrite32
380 static inline void
381 iowrite32(uint32_t v, volatile void *addr)
382 {
383 	writel(v, addr);
384 }
385 #define	iowrite32(v, addr)	iowrite32(v, addr)
386 
387 #undef iowrite32be
388 static inline void
389 iowrite32be(uint32_t v, volatile void *addr)
390 {
391 	__io_bw();
392 	__raw_writel(htobe32(v), addr);
393 	__io_aw();
394 }
395 #define	iowrite32be(v, addr)	iowrite32be(v, addr)
396 
397 #if defined(__i386__) || defined(__amd64__)
398 static inline void
399 _outb(u_char data, u_int port)
400 {
401 	__asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
402 }
403 #endif
404 
405 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv)
406 void *_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr);
407 #else
408 static __inline void *
409 _ioremap_attr(vm_paddr_t _phys_addr, unsigned long _size, int _attr)
410 {
411 	return (NULL);
412 }
413 #endif
414 
415 struct device;
416 static inline void *
417 devm_ioremap(struct device *dev, resource_size_t offset, resource_size_t size)
418 {
419 	return (NULL);
420 }
421 
422 #ifdef VM_MEMATTR_DEVICE
423 #define	ioremap_nocache(addr, size)					\
424     _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
425 #define	ioremap_wt(addr, size)						\
426     _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
427 #define	ioremap(addr, size)						\
428     _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
429 #else
430 #define	ioremap_nocache(addr, size)					\
431     _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
432 #define	ioremap_wt(addr, size)						\
433     _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_THROUGH)
434 #define	ioremap(addr, size)						\
435     _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
436 #endif
437 #ifdef VM_MEMATTR_WRITE_COMBINING
438 #define	ioremap_wc(addr, size)						\
439     _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_COMBINING)
440 #else
441 #define	ioremap_wc(addr, size)	ioremap_nocache(addr, size)
442 #endif
443 #define	ioremap_cache(addr, size)					\
444     _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_BACK)
445 void iounmap(void *addr);
446 
447 #define	memset_io(a, b, c)	memset((a), (b), (c))
448 #define	memcpy_fromio(a, b, c)	memcpy((a), (b), (c))
449 #define	memcpy_toio(a, b, c)	memcpy((a), (b), (c))
450 
451 static inline void
452 __iowrite32_copy(void *to, const void *from, size_t count)
453 {
454 	const uint32_t *src;
455 	uint32_t *dst;
456 	int i;
457 
458 	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
459 		__raw_writel(*src, dst);
460 }
461 
462 static inline void
463 __iowrite64_copy(void *to, const void *from, size_t count)
464 {
465 #ifdef __LP64__
466 	const uint64_t *src;
467 	uint64_t *dst;
468 	int i;
469 
470 	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
471 		__raw_writeq(*src, dst);
472 #else
473 	__iowrite32_copy(to, from, count * 2);
474 #endif
475 }
476 
477 static inline void
478 __ioread32_copy(void *to, const void *from, size_t count)
479 {
480 	const uint32_t *src;
481 	uint32_t *dst;
482 	int i;
483 
484 	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
485 		*dst = __raw_readl(src);
486 }
487 
488 static inline void
489 __ioread64_copy(void *to, const void *from, size_t count)
490 {
491 #ifdef __LP64__
492 	const uint64_t *src;
493 	uint64_t *dst;
494 	int i;
495 
496 	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
497 		*dst = __raw_readq(src);
498 #else
499 	__ioread32_copy(to, from, count * 2);
500 #endif
501 }
502 
503 enum {
504 	MEMREMAP_WB = 1 << 0,
505 	MEMREMAP_WT = 1 << 1,
506 	MEMREMAP_WC = 1 << 2,
507 };
508 
509 static inline void *
510 memremap(resource_size_t offset, size_t size, unsigned long flags)
511 {
512 	void *addr = NULL;
513 
514 	if ((flags & MEMREMAP_WB) &&
515 	    (addr = ioremap_cache(offset, size)) != NULL)
516 		goto done;
517 	if ((flags & MEMREMAP_WT) &&
518 	    (addr = ioremap_wt(offset, size)) != NULL)
519 		goto done;
520 	if ((flags & MEMREMAP_WC) &&
521 	    (addr = ioremap_wc(offset, size)) != NULL)
522 		goto done;
523 done:
524 	return (addr);
525 }
526 
527 static inline void
528 memunmap(void *addr)
529 {
530 	/* XXX May need to check if this is RAM */
531 	iounmap(addr);
532 }
533 
534 #define	IOMEM_ERR_PTR(err)	(void __iomem *)ERR_PTR(err)
535 
536 #define	__MTRR_ID_BASE	1
537 int lkpi_arch_phys_wc_add(unsigned long, unsigned long);
538 void lkpi_arch_phys_wc_del(int);
539 #define	arch_phys_wc_add(...)	lkpi_arch_phys_wc_add(__VA_ARGS__)
540 #define	arch_phys_wc_del(...)	lkpi_arch_phys_wc_del(__VA_ARGS__)
541 #define	arch_phys_wc_index(x)	\
542 	(((x) < __MTRR_ID_BASE) ? -1 : ((x) - __MTRR_ID_BASE))
543 
544 #if defined(__amd64__) || defined(__i386__) || defined(__aarch64__) || defined(__powerpc__) || defined(__riscv)
545 static inline int
546 arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
547 {
548 	vm_offset_t va;
549 
550 	va = PHYS_TO_DMAP(start);
551 
552 #ifdef VM_MEMATTR_WRITE_COMBINING
553 	return (-pmap_change_attr(va, size, VM_MEMATTR_WRITE_COMBINING));
554 #else
555 	return (-pmap_change_attr(va, size, VM_MEMATTR_UNCACHEABLE));
556 #endif
557 }
558 
559 static inline void
560 arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
561 {
562 	vm_offset_t va;
563 
564 	va = PHYS_TO_DMAP(start);
565 
566 	pmap_change_attr(va, size, VM_MEMATTR_WRITE_BACK);
567 }
568 #endif
569 
570 #endif	/* _LINUXKPI_LINUX_IO_H_ */
571