xref: /freebsd/sys/compat/linuxkpi/common/include/linux/io.h (revision edf8578117e8844e02c0121147f45e4609b30680)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 #ifndef	_LINUXKPI_LINUX_IO_H_
30 #define	_LINUXKPI_LINUX_IO_H_
31 
32 #include <sys/endian.h>
33 #include <sys/types.h>
34 
35 #include <machine/vm.h>
36 
37 #include <linux/compiler.h>
38 #include <linux/types.h>
39 #if !defined(__arm__)
40 #include <asm/set_memory.h>
41 #endif
42 
43 /*
44  * XXX This is all x86 specific.  It should be bus space access.
45  */
46 
47 /* rmb and wmb are declared in machine/atomic.h, so should be included first. */
48 #ifndef __io_br
49 #define	__io_br()	__compiler_membar()
50 #endif
51 
52 #ifndef __io_ar
53 #ifdef rmb
54 #define	__io_ar()	rmb()
55 #else
56 #define	__io_ar()	__compiler_membar()
57 #endif
58 #endif
59 
60 #ifndef __io_bw
61 #ifdef wmb
62 #define	__io_bw()	wmb()
63 #else
64 #define	__io_bw()	__compiler_membar()
65 #endif
66 #endif
67 
68 #ifndef __io_aw
69 #define	__io_aw()	__compiler_membar()
70 #endif
71 
72 /* Access MMIO registers atomically without barriers and byte swapping. */
73 
74 static inline uint8_t
75 __raw_readb(const volatile void *addr)
76 {
77 	return (*(const volatile uint8_t *)addr);
78 }
79 #define	__raw_readb(addr)	__raw_readb(addr)
80 
81 static inline void
82 __raw_writeb(uint8_t v, volatile void *addr)
83 {
84 	*(volatile uint8_t *)addr = v;
85 }
86 #define	__raw_writeb(v, addr)	__raw_writeb(v, addr)
87 
88 static inline uint16_t
89 __raw_readw(const volatile void *addr)
90 {
91 	return (*(const volatile uint16_t *)addr);
92 }
93 #define	__raw_readw(addr)	__raw_readw(addr)
94 
95 static inline void
96 __raw_writew(uint16_t v, volatile void *addr)
97 {
98 	*(volatile uint16_t *)addr = v;
99 }
100 #define	__raw_writew(v, addr)	__raw_writew(v, addr)
101 
102 static inline uint32_t
103 __raw_readl(const volatile void *addr)
104 {
105 	return (*(const volatile uint32_t *)addr);
106 }
107 #define	__raw_readl(addr)	__raw_readl(addr)
108 
109 static inline void
110 __raw_writel(uint32_t v, volatile void *addr)
111 {
112 	*(volatile uint32_t *)addr = v;
113 }
114 #define	__raw_writel(v, addr)	__raw_writel(v, addr)
115 
116 #ifdef __LP64__
117 static inline uint64_t
118 __raw_readq(const volatile void *addr)
119 {
120 	return (*(const volatile uint64_t *)addr);
121 }
122 #define	__raw_readq(addr)	__raw_readq(addr)
123 
124 static inline void
125 __raw_writeq(uint64_t v, volatile void *addr)
126 {
127 	*(volatile uint64_t *)addr = v;
128 }
129 #define	__raw_writeq(v, addr)	__raw_writeq(v, addr)
130 #endif
131 
132 #define	mmiowb()	barrier()
133 
134 /* Access little-endian MMIO registers atomically with memory barriers. */
135 
136 #undef readb
137 static inline uint8_t
138 readb(const volatile void *addr)
139 {
140 	uint8_t v;
141 
142 	__io_br();
143 	v = *(const volatile uint8_t *)addr;
144 	__io_ar();
145 	return (v);
146 }
147 #define	readb(addr)		readb(addr)
148 
149 #undef writeb
150 static inline void
151 writeb(uint8_t v, volatile void *addr)
152 {
153 	__io_bw();
154 	*(volatile uint8_t *)addr = v;
155 	__io_aw();
156 }
157 #define	writeb(v, addr)		writeb(v, addr)
158 
159 #undef readw
160 static inline uint16_t
161 readw(const volatile void *addr)
162 {
163 	uint16_t v;
164 
165 	__io_br();
166 	v = le16toh(__raw_readw(addr));
167 	__io_ar();
168 	return (v);
169 }
170 #define	readw(addr)		readw(addr)
171 
172 #undef writew
173 static inline void
174 writew(uint16_t v, volatile void *addr)
175 {
176 	__io_bw();
177 	__raw_writew(htole16(v), addr);
178 	__io_aw();
179 }
180 #define	writew(v, addr)		writew(v, addr)
181 
182 #undef readl
183 static inline uint32_t
184 readl(const volatile void *addr)
185 {
186 	uint32_t v;
187 
188 	__io_br();
189 	v = le32toh(__raw_readl(addr));
190 	__io_ar();
191 	return (v);
192 }
193 #define	readl(addr)		readl(addr)
194 
195 #undef writel
196 static inline void
197 writel(uint32_t v, volatile void *addr)
198 {
199 	__io_bw();
200 	__raw_writel(htole32(v), addr);
201 	__io_aw();
202 }
203 #define	writel(v, addr)		writel(v, addr)
204 
205 #undef readq
206 #undef writeq
207 #ifdef __LP64__
208 static inline uint64_t
209 readq(const volatile void *addr)
210 {
211 	uint64_t v;
212 
213 	__io_br();
214 	v = le64toh(__raw_readq(addr));
215 	__io_ar();
216 	return (v);
217 }
218 #define	readq(addr)		readq(addr)
219 
220 static inline void
221 writeq(uint64_t v, volatile void *addr)
222 {
223 	__io_bw();
224 	__raw_writeq(htole64(v), addr);
225 	__io_aw();
226 }
227 #define	writeq(v, addr)		writeq(v, addr)
228 #endif
229 
230 /* Access little-endian MMIO registers atomically without memory barriers. */
231 
232 #undef readb_relaxed
233 static inline uint8_t
234 readb_relaxed(const volatile void *addr)
235 {
236 	return (__raw_readb(addr));
237 }
238 #define	readb_relaxed(addr)	readb_relaxed(addr)
239 
240 #undef writeb_relaxed
241 static inline void
242 writeb_relaxed(uint8_t v, volatile void *addr)
243 {
244 	__raw_writeb(v, addr);
245 }
246 #define	writeb_relaxed(v, addr)	writeb_relaxed(v, addr)
247 
248 #undef readw_relaxed
249 static inline uint16_t
250 readw_relaxed(const volatile void *addr)
251 {
252 	return (le16toh(__raw_readw(addr)));
253 }
254 #define	readw_relaxed(addr)	readw_relaxed(addr)
255 
256 #undef writew_relaxed
257 static inline void
258 writew_relaxed(uint16_t v, volatile void *addr)
259 {
260 	__raw_writew(htole16(v), addr);
261 }
262 #define	writew_relaxed(v, addr)	writew_relaxed(v, addr)
263 
264 #undef readl_relaxed
265 static inline uint32_t
266 readl_relaxed(const volatile void *addr)
267 {
268 	return (le32toh(__raw_readl(addr)));
269 }
270 #define	readl_relaxed(addr)	readl_relaxed(addr)
271 
272 #undef writel_relaxed
273 static inline void
274 writel_relaxed(uint32_t v, volatile void *addr)
275 {
276 	__raw_writel(htole32(v), addr);
277 }
278 #define	writel_relaxed(v, addr)	writel_relaxed(v, addr)
279 
280 #undef readq_relaxed
281 #undef writeq_relaxed
282 #ifdef __LP64__
283 static inline uint64_t
284 readq_relaxed(const volatile void *addr)
285 {
286 	return (le64toh(__raw_readq(addr)));
287 }
288 #define	readq_relaxed(addr)	readq_relaxed(addr)
289 
290 static inline void
291 writeq_relaxed(uint64_t v, volatile void *addr)
292 {
293 	__raw_writeq(htole64(v), addr);
294 }
295 #define	writeq_relaxed(v, addr)	writeq_relaxed(v, addr)
296 #endif
297 
298 /* XXX On Linux ioread and iowrite handle both MMIO and port IO. */
299 
300 #undef ioread8
301 static inline uint8_t
302 ioread8(const volatile void *addr)
303 {
304 	return (readb(addr));
305 }
306 #define	ioread8(addr)		ioread8(addr)
307 
308 #undef ioread16
309 static inline uint16_t
310 ioread16(const volatile void *addr)
311 {
312 	return (readw(addr));
313 }
314 #define	ioread16(addr)		ioread16(addr)
315 
316 #undef ioread16be
317 static inline uint16_t
318 ioread16be(const volatile void *addr)
319 {
320 	uint16_t v;
321 
322 	__io_br();
323 	v = (be16toh(__raw_readw(addr)));
324 	__io_ar();
325 
326 	return (v);
327 }
328 #define	ioread16be(addr)	ioread16be(addr)
329 
330 #undef ioread32
331 static inline uint32_t
332 ioread32(const volatile void *addr)
333 {
334 	return (readl(addr));
335 }
336 #define	ioread32(addr)		ioread32(addr)
337 
338 #undef ioread32be
339 static inline uint32_t
340 ioread32be(const volatile void *addr)
341 {
342 	uint32_t v;
343 
344 	__io_br();
345 	v = (be32toh(__raw_readl(addr)));
346 	__io_ar();
347 
348 	return (v);
349 }
350 #define	ioread32be(addr)	ioread32be(addr)
351 
352 #undef iowrite8
353 static inline void
354 iowrite8(uint8_t v, volatile void *addr)
355 {
356 	writeb(v, addr);
357 }
358 #define	iowrite8(v, addr)	iowrite8(v, addr)
359 
360 #undef iowrite16
361 static inline void
362 iowrite16(uint16_t v, volatile void *addr)
363 {
364 	writew(v, addr);
365 }
366 #define	iowrite16	iowrite16
367 
368 #undef iowrite32
369 static inline void
370 iowrite32(uint32_t v, volatile void *addr)
371 {
372 	writel(v, addr);
373 }
374 #define	iowrite32(v, addr)	iowrite32(v, addr)
375 
376 #undef iowrite32be
377 static inline void
378 iowrite32be(uint32_t v, volatile void *addr)
379 {
380 	__io_bw();
381 	__raw_writel(htobe32(v), addr);
382 	__io_aw();
383 }
384 #define	iowrite32be(v, addr)	iowrite32be(v, addr)
385 
386 #if defined(__i386__) || defined(__amd64__)
387 static inline void
388 _outb(u_char data, u_int port)
389 {
390 	__asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
391 }
392 #endif
393 
394 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv)
395 void *_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr);
396 #else
397 static __inline void *
398 _ioremap_attr(vm_paddr_t _phys_addr, unsigned long _size, int _attr)
399 {
400 	return (NULL);
401 }
402 #endif
403 
404 struct device;
405 static inline void *
406 devm_ioremap(struct device *dev, resource_size_t offset, resource_size_t size)
407 {
408 	return (NULL);
409 }
410 
411 #ifdef VM_MEMATTR_DEVICE
412 #define	ioremap_nocache(addr, size)					\
413     _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
414 #define	ioremap_wt(addr, size)						\
415     _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
416 #define	ioremap(addr, size)						\
417     _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
418 #else
419 #define	ioremap_nocache(addr, size)					\
420     _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
421 #define	ioremap_wt(addr, size)						\
422     _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_THROUGH)
423 #define	ioremap(addr, size)						\
424     _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
425 #endif
426 #ifdef VM_MEMATTR_WRITE_COMBINING
427 #define	ioremap_wc(addr, size)						\
428     _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_COMBINING)
429 #else
430 #define	ioremap_wc(addr, size)	ioremap_nocache(addr, size)
431 #endif
432 #define	ioremap_cache(addr, size)					\
433     _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_BACK)
434 void iounmap(void *addr);
435 
436 #define	memset_io(a, b, c)	memset((a), (b), (c))
437 #define	memcpy_fromio(a, b, c)	memcpy((a), (b), (c))
438 #define	memcpy_toio(a, b, c)	memcpy((a), (b), (c))
439 
440 static inline void
441 __iowrite32_copy(void *to, const void *from, size_t count)
442 {
443 	const uint32_t *src;
444 	uint32_t *dst;
445 	int i;
446 
447 	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
448 		__raw_writel(*src, dst);
449 }
450 
451 static inline void
452 __iowrite64_copy(void *to, const void *from, size_t count)
453 {
454 #ifdef __LP64__
455 	const uint64_t *src;
456 	uint64_t *dst;
457 	int i;
458 
459 	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
460 		__raw_writeq(*src, dst);
461 #else
462 	__iowrite32_copy(to, from, count * 2);
463 #endif
464 }
465 
466 static inline void
467 __ioread32_copy(void *to, const void *from, size_t count)
468 {
469 	const uint32_t *src;
470 	uint32_t *dst;
471 	int i;
472 
473 	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
474 		*dst = __raw_readl(src);
475 }
476 
477 static inline void
478 __ioread64_copy(void *to, const void *from, size_t count)
479 {
480 #ifdef __LP64__
481 	const uint64_t *src;
482 	uint64_t *dst;
483 	int i;
484 
485 	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
486 		*dst = __raw_readq(src);
487 #else
488 	__ioread32_copy(to, from, count * 2);
489 #endif
490 }
491 
492 enum {
493 	MEMREMAP_WB = 1 << 0,
494 	MEMREMAP_WT = 1 << 1,
495 	MEMREMAP_WC = 1 << 2,
496 };
497 
498 static inline void *
499 memremap(resource_size_t offset, size_t size, unsigned long flags)
500 {
501 	void *addr = NULL;
502 
503 	if ((flags & MEMREMAP_WB) &&
504 	    (addr = ioremap_cache(offset, size)) != NULL)
505 		goto done;
506 	if ((flags & MEMREMAP_WT) &&
507 	    (addr = ioremap_wt(offset, size)) != NULL)
508 		goto done;
509 	if ((flags & MEMREMAP_WC) &&
510 	    (addr = ioremap_wc(offset, size)) != NULL)
511 		goto done;
512 done:
513 	return (addr);
514 }
515 
516 static inline void
517 memunmap(void *addr)
518 {
519 	/* XXX May need to check if this is RAM */
520 	iounmap(addr);
521 }
522 
523 #define	__MTRR_ID_BASE	1
524 int lkpi_arch_phys_wc_add(unsigned long, unsigned long);
525 void lkpi_arch_phys_wc_del(int);
526 #define	arch_phys_wc_add(...)	lkpi_arch_phys_wc_add(__VA_ARGS__)
527 #define	arch_phys_wc_del(...)	lkpi_arch_phys_wc_del(__VA_ARGS__)
528 #define	arch_phys_wc_index(x)	\
529 	(((x) < __MTRR_ID_BASE) ? -1 : ((x) - __MTRR_ID_BASE))
530 
531 #if defined(__amd64__) || defined(__i386__) || defined(__aarch64__) || defined(__powerpc__) || defined(__riscv)
532 static inline int
533 arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
534 {
535 	vm_offset_t va;
536 
537 	va = PHYS_TO_DMAP(start);
538 
539 #ifdef VM_MEMATTR_WRITE_COMBINING
540 	return (-pmap_change_attr(va, size, VM_MEMATTR_WRITE_COMBINING));
541 #else
542 	return (-pmap_change_attr(va, size, VM_MEMATTR_UNCACHEABLE));
543 #endif
544 }
545 
546 static inline void
547 arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
548 {
549 	vm_offset_t va;
550 
551 	va = PHYS_TO_DMAP(start);
552 
553 	pmap_change_attr(va, size, VM_MEMATTR_WRITE_BACK);
554 }
555 #endif
556 
557 #endif	/* _LINUXKPI_LINUX_IO_H_ */
558