xref: /freebsd/sys/compat/linuxkpi/common/include/linux/io.h (revision 08c2f6cf46d8981bfc7219295882e38ec7dc8ffc)
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 #ifndef	_LINUXKPI_LINUX_IO_H_
30 #define	_LINUXKPI_LINUX_IO_H_
31 
32 #include <sys/endian.h>
33 #include <sys/types.h>
34 
35 #include <machine/vm.h>
36 
37 #include <linux/compiler.h>
38 #include <linux/err.h>
39 #include <asm-generic/io.h>
40 #include <linux/types.h>
41 #if !defined(__arm__)
42 #include <asm/set_memory.h>
43 #endif
44 
45 /*
46  * XXX This is all x86 specific.  It should be bus space access.
47  */
48 
49 /* rmb and wmb are declared in machine/atomic.h, so should be included first. */
50 #ifndef __io_br
51 #define	__io_br()	__compiler_membar()
52 #endif
53 
54 #ifndef __io_ar
55 #ifdef rmb
56 #define	__io_ar()	rmb()
57 #else
58 #define	__io_ar()	__compiler_membar()
59 #endif
60 #endif
61 
62 #ifndef __io_bw
63 #ifdef wmb
64 #define	__io_bw()	wmb()
65 #else
66 #define	__io_bw()	__compiler_membar()
67 #endif
68 #endif
69 
70 #ifndef __io_aw
71 #define	__io_aw()	__compiler_membar()
72 #endif
73 
74 /* Access MMIO registers atomically without barriers and byte swapping. */
75 
76 static inline uint8_t
77 __raw_readb(const volatile void *addr)
78 {
79 	return (*(const volatile uint8_t *)addr);
80 }
81 #define	__raw_readb(addr)	__raw_readb(addr)
82 
83 static inline void
84 __raw_writeb(uint8_t v, volatile void *addr)
85 {
86 	*(volatile uint8_t *)addr = v;
87 }
88 #define	__raw_writeb(v, addr)	__raw_writeb(v, addr)
89 
90 static inline uint16_t
91 __raw_readw(const volatile void *addr)
92 {
93 	return (*(const volatile uint16_t *)addr);
94 }
95 #define	__raw_readw(addr)	__raw_readw(addr)
96 
97 static inline void
98 __raw_writew(uint16_t v, volatile void *addr)
99 {
100 	*(volatile uint16_t *)addr = v;
101 }
102 #define	__raw_writew(v, addr)	__raw_writew(v, addr)
103 
104 static inline uint32_t
105 __raw_readl(const volatile void *addr)
106 {
107 	return (*(const volatile uint32_t *)addr);
108 }
109 #define	__raw_readl(addr)	__raw_readl(addr)
110 
111 static inline void
112 __raw_writel(uint32_t v, volatile void *addr)
113 {
114 	*(volatile uint32_t *)addr = v;
115 }
116 #define	__raw_writel(v, addr)	__raw_writel(v, addr)
117 
118 #ifdef __LP64__
119 static inline uint64_t
120 __raw_readq(const volatile void *addr)
121 {
122 	return (*(const volatile uint64_t *)addr);
123 }
124 #define	__raw_readq(addr)	__raw_readq(addr)
125 
126 static inline void
127 __raw_writeq(uint64_t v, volatile void *addr)
128 {
129 	*(volatile uint64_t *)addr = v;
130 }
131 #define	__raw_writeq(v, addr)	__raw_writeq(v, addr)
132 #endif
133 
134 #define	mmiowb()	barrier()
135 
136 /* Access little-endian MMIO registers atomically with memory barriers. */
137 
138 #undef readb
139 static inline uint8_t
140 readb(const volatile void *addr)
141 {
142 	uint8_t v;
143 
144 	__io_br();
145 	v = *(const volatile uint8_t *)addr;
146 	__io_ar();
147 	return (v);
148 }
149 #define	readb(addr)		readb(addr)
150 
151 #undef writeb
152 static inline void
153 writeb(uint8_t v, volatile void *addr)
154 {
155 	__io_bw();
156 	*(volatile uint8_t *)addr = v;
157 	__io_aw();
158 }
159 #define	writeb(v, addr)		writeb(v, addr)
160 
161 #undef readw
162 static inline uint16_t
163 readw(const volatile void *addr)
164 {
165 	uint16_t v;
166 
167 	__io_br();
168 	v = le16toh(__raw_readw(addr));
169 	__io_ar();
170 	return (v);
171 }
172 #define	readw(addr)		readw(addr)
173 
174 #undef writew
175 static inline void
176 writew(uint16_t v, volatile void *addr)
177 {
178 	__io_bw();
179 	__raw_writew(htole16(v), addr);
180 	__io_aw();
181 }
182 #define	writew(v, addr)		writew(v, addr)
183 
184 #undef readl
185 static inline uint32_t
186 readl(const volatile void *addr)
187 {
188 	uint32_t v;
189 
190 	__io_br();
191 	v = le32toh(__raw_readl(addr));
192 	__io_ar();
193 	return (v);
194 }
195 #define	readl(addr)		readl(addr)
196 
197 #undef writel
198 static inline void
199 writel(uint32_t v, volatile void *addr)
200 {
201 	__io_bw();
202 	__raw_writel(htole32(v), addr);
203 	__io_aw();
204 }
205 #define	writel(v, addr)		writel(v, addr)
206 
207 #undef readq
208 #undef writeq
209 #ifdef __LP64__
210 static inline uint64_t
211 readq(const volatile void *addr)
212 {
213 	uint64_t v;
214 
215 	__io_br();
216 	v = le64toh(__raw_readq(addr));
217 	__io_ar();
218 	return (v);
219 }
220 #define	readq(addr)		readq(addr)
221 
222 static inline void
223 writeq(uint64_t v, volatile void *addr)
224 {
225 	__io_bw();
226 	__raw_writeq(htole64(v), addr);
227 	__io_aw();
228 }
229 #define	writeq(v, addr)		writeq(v, addr)
230 #endif
231 
232 /* Access little-endian MMIO registers atomically without memory barriers. */
233 
234 #undef readb_relaxed
235 static inline uint8_t
236 readb_relaxed(const volatile void *addr)
237 {
238 	return (__raw_readb(addr));
239 }
240 #define	readb_relaxed(addr)	readb_relaxed(addr)
241 
242 #undef writeb_relaxed
243 static inline void
244 writeb_relaxed(uint8_t v, volatile void *addr)
245 {
246 	__raw_writeb(v, addr);
247 }
248 #define	writeb_relaxed(v, addr)	writeb_relaxed(v, addr)
249 
250 #undef readw_relaxed
251 static inline uint16_t
252 readw_relaxed(const volatile void *addr)
253 {
254 	return (le16toh(__raw_readw(addr)));
255 }
256 #define	readw_relaxed(addr)	readw_relaxed(addr)
257 
258 #undef writew_relaxed
259 static inline void
260 writew_relaxed(uint16_t v, volatile void *addr)
261 {
262 	__raw_writew(htole16(v), addr);
263 }
264 #define	writew_relaxed(v, addr)	writew_relaxed(v, addr)
265 
266 #undef readl_relaxed
267 static inline uint32_t
268 readl_relaxed(const volatile void *addr)
269 {
270 	return (le32toh(__raw_readl(addr)));
271 }
272 #define	readl_relaxed(addr)	readl_relaxed(addr)
273 
274 #undef writel_relaxed
275 static inline void
276 writel_relaxed(uint32_t v, volatile void *addr)
277 {
278 	__raw_writel(htole32(v), addr);
279 }
280 #define	writel_relaxed(v, addr)	writel_relaxed(v, addr)
281 
282 #undef readq_relaxed
283 #undef writeq_relaxed
284 #ifdef __LP64__
285 static inline uint64_t
286 readq_relaxed(const volatile void *addr)
287 {
288 	return (le64toh(__raw_readq(addr)));
289 }
290 #define	readq_relaxed(addr)	readq_relaxed(addr)
291 
292 static inline void
293 writeq_relaxed(uint64_t v, volatile void *addr)
294 {
295 	__raw_writeq(htole64(v), addr);
296 }
297 #define	writeq_relaxed(v, addr)	writeq_relaxed(v, addr)
298 #endif
299 
300 /* XXX On Linux ioread and iowrite handle both MMIO and port IO. */
301 
302 #undef ioread8
303 static inline uint8_t
304 ioread8(const volatile void *addr)
305 {
306 	return (readb(addr));
307 }
308 #define	ioread8(addr)		ioread8(addr)
309 
310 #undef ioread16
311 static inline uint16_t
312 ioread16(const volatile void *addr)
313 {
314 	return (readw(addr));
315 }
316 #define	ioread16(addr)		ioread16(addr)
317 
318 #undef ioread16be
319 static inline uint16_t
320 ioread16be(const volatile void *addr)
321 {
322 	uint16_t v;
323 
324 	__io_br();
325 	v = (be16toh(__raw_readw(addr)));
326 	__io_ar();
327 
328 	return (v);
329 }
330 #define	ioread16be(addr)	ioread16be(addr)
331 
332 #undef ioread32
333 static inline uint32_t
334 ioread32(const volatile void *addr)
335 {
336 	return (readl(addr));
337 }
338 #define	ioread32(addr)		ioread32(addr)
339 
340 #undef ioread32be
341 static inline uint32_t
342 ioread32be(const volatile void *addr)
343 {
344 	uint32_t v;
345 
346 	__io_br();
347 	v = (be32toh(__raw_readl(addr)));
348 	__io_ar();
349 
350 	return (v);
351 }
352 #define	ioread32be(addr)	ioread32be(addr)
353 
354 #ifdef __LP64__
355 #undef ioread64
356 static inline uint64_t
357 ioread64(const volatile void *addr)
358 {
359 	return (readq(addr));
360 }
361 #define	ioread64(addr)		ioread64(addr)
362 #endif
363 
364 #undef iowrite8
365 static inline void
366 iowrite8(uint8_t v, volatile void *addr)
367 {
368 	writeb(v, addr);
369 }
370 #define	iowrite8(v, addr)	iowrite8(v, addr)
371 
372 #undef iowrite16
373 static inline void
374 iowrite16(uint16_t v, volatile void *addr)
375 {
376 	writew(v, addr);
377 }
378 #define	iowrite16	iowrite16
379 
380 #undef iowrite32
381 static inline void
382 iowrite32(uint32_t v, volatile void *addr)
383 {
384 	writel(v, addr);
385 }
386 #define	iowrite32(v, addr)	iowrite32(v, addr)
387 
388 #undef iowrite32be
389 static inline void
390 iowrite32be(uint32_t v, volatile void *addr)
391 {
392 	__io_bw();
393 	__raw_writel(htobe32(v), addr);
394 	__io_aw();
395 }
396 #define	iowrite32be(v, addr)	iowrite32be(v, addr)
397 
398 #if defined(__i386__) || defined(__amd64__)
399 #define	_outb(data, port) outb((data), (port))
400 #endif
401 
402 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) || defined(__riscv)
403 void *_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr);
404 #else
405 static __inline void *
406 _ioremap_attr(vm_paddr_t _phys_addr, unsigned long _size, int _attr)
407 {
408 	return (NULL);
409 }
410 #endif
411 
412 struct device;
413 static inline void *
414 devm_ioremap(struct device *dev, resource_size_t offset, resource_size_t size)
415 {
416 	return (NULL);
417 }
418 
419 #ifdef VM_MEMATTR_DEVICE
420 #define	ioremap_nocache(addr, size)					\
421     _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
422 #define	ioremap_wt(addr, size)						\
423     _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
424 #define	ioremap(addr, size)						\
425     _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE)
426 #else
427 #define	ioremap_nocache(addr, size)					\
428     _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
429 #define	ioremap_wt(addr, size)						\
430     _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_THROUGH)
431 #define	ioremap(addr, size)						\
432     _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE)
433 #endif
434 #ifdef VM_MEMATTR_WRITE_COMBINING
435 #define	ioremap_wc(addr, size)						\
436     _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_COMBINING)
437 #else
438 #define	ioremap_wc(addr, size)	ioremap_nocache(addr, size)
439 #endif
440 #define	ioremap_cache(addr, size)					\
441     _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_BACK)
442 void iounmap(void *addr);
443 
444 #define	memset_io(a, b, c)	memset((a), (b), (c))
445 #define	memcpy_fromio(a, b, c)	memcpy((a), (b), (c))
446 #define	memcpy_toio(a, b, c)	memcpy((a), (b), (c))
447 
448 static inline void
449 __iowrite32_copy(void *to, const void *from, size_t count)
450 {
451 	const uint32_t *src;
452 	uint32_t *dst;
453 	int i;
454 
455 	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
456 		__raw_writel(*src, dst);
457 }
458 
459 static inline void
460 __iowrite64_copy(void *to, const void *from, size_t count)
461 {
462 #ifdef __LP64__
463 	const uint64_t *src;
464 	uint64_t *dst;
465 	int i;
466 
467 	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
468 		__raw_writeq(*src, dst);
469 #else
470 	__iowrite32_copy(to, from, count * 2);
471 #endif
472 }
473 
474 static inline void
475 __ioread32_copy(void *to, const void *from, size_t count)
476 {
477 	const uint32_t *src;
478 	uint32_t *dst;
479 	int i;
480 
481 	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
482 		*dst = __raw_readl(src);
483 }
484 
485 static inline void
486 __ioread64_copy(void *to, const void *from, size_t count)
487 {
488 #ifdef __LP64__
489 	const uint64_t *src;
490 	uint64_t *dst;
491 	int i;
492 
493 	for (i = 0, src = from, dst = to; i < count; i++, src++, dst++)
494 		*dst = __raw_readq(src);
495 #else
496 	__ioread32_copy(to, from, count * 2);
497 #endif
498 }
499 
500 enum {
501 	MEMREMAP_WB = 1 << 0,
502 	MEMREMAP_WT = 1 << 1,
503 	MEMREMAP_WC = 1 << 2,
504 };
505 
506 static inline void *
507 memremap(resource_size_t offset, size_t size, unsigned long flags)
508 {
509 	void *addr = NULL;
510 
511 	if ((flags & MEMREMAP_WB) &&
512 	    (addr = ioremap_cache(offset, size)) != NULL)
513 		goto done;
514 	if ((flags & MEMREMAP_WT) &&
515 	    (addr = ioremap_wt(offset, size)) != NULL)
516 		goto done;
517 	if ((flags & MEMREMAP_WC) &&
518 	    (addr = ioremap_wc(offset, size)) != NULL)
519 		goto done;
520 done:
521 	return (addr);
522 }
523 
524 static inline void
525 memunmap(void *addr)
526 {
527 	/* XXX May need to check if this is RAM */
528 	iounmap(addr);
529 }
530 
531 #define	IOMEM_ERR_PTR(err)	(void __iomem *)ERR_PTR(err)
532 
533 #define	__MTRR_ID_BASE	1
534 int lkpi_arch_phys_wc_add(unsigned long, unsigned long);
535 void lkpi_arch_phys_wc_del(int);
536 #define	arch_phys_wc_add(...)	lkpi_arch_phys_wc_add(__VA_ARGS__)
537 #define	arch_phys_wc_del(...)	lkpi_arch_phys_wc_del(__VA_ARGS__)
538 #define	arch_phys_wc_index(x)	\
539 	(((x) < __MTRR_ID_BASE) ? -1 : ((x) - __MTRR_ID_BASE))
540 
541 static inline int
542 arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
543 {
544 #if defined(__amd64__)
545 	vm_offset_t va;
546 
547 	va = PHYS_TO_DMAP(start);
548 	return (-pmap_change_attr(va, size, VM_MEMATTR_WRITE_COMBINING));
549 #else
550 	return (0);
551 #endif
552 }
553 
554 static inline void
555 arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
556 {
557 #if defined(__amd64__)
558 	vm_offset_t va;
559 
560 	va = PHYS_TO_DMAP(start);
561 
562 	pmap_change_attr(va, size, VM_MEMATTR_WRITE_BACK);
563 #endif
564 }
565 
566 #endif	/* _LINUXKPI_LINUX_IO_H_ */
567