xref: /linux/arch/arm64/kernel/io.c (revision af8e51644a70f612974a6e767fa7d896d3c23f88)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/kernel/io.c
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  */
7 
8 #include <linux/export.h>
9 #include <linux/types.h>
10 #include <linux/io.h>
11 
12 /*
13  * Copy data from IO memory space to "real" memory space.
14  */
15 void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
16 {
17 	while (count && !IS_ALIGNED((unsigned long)from, 8)) {
18 		*(u8 *)to = __raw_readb(from);
19 		from++;
20 		to++;
21 		count--;
22 	}
23 
24 	while (count >= 8) {
25 		*(u64 *)to = __raw_readq(from);
26 		from += 8;
27 		to += 8;
28 		count -= 8;
29 	}
30 
31 	while (count) {
32 		*(u8 *)to = __raw_readb(from);
33 		from++;
34 		to++;
35 		count--;
36 	}
37 }
38 EXPORT_SYMBOL(__memcpy_fromio);
39 
40 /*
41  * This generates a memcpy that works on a from/to address which is aligned to
42  * bits. Count is in terms of the number of bits sized quantities to copy. It
43  * optimizes to use the STR groupings when possible so that it is WC friendly.
44  */
45 #define memcpy_toio_aligned(to, from, count, bits)                        \
46 	({                                                                \
47 		volatile u##bits __iomem *_to = to;                       \
48 		const u##bits *_from = from;                              \
49 		size_t _count = count;                                    \
50 		const u##bits *_end_from = _from + ALIGN_DOWN(_count, 8); \
51                                                                           \
52 		for (; _from < _end_from; _from += 8, _to += 8)           \
53 			__const_memcpy_toio_aligned##bits(_to, _from, 8); \
54 		if ((_count % 8) >= 4) {                                  \
55 			__const_memcpy_toio_aligned##bits(_to, _from, 4); \
56 			_from += 4;                                       \
57 			_to += 4;                                         \
58 		}                                                         \
59 		if ((_count % 4) >= 2) {                                  \
60 			__const_memcpy_toio_aligned##bits(_to, _from, 2); \
61 			_from += 2;                                       \
62 			_to += 2;                                         \
63 		}                                                         \
64 		if (_count % 2)                                           \
65 			__const_memcpy_toio_aligned##bits(_to, _from, 1); \
66 	})
67 
68 void __iowrite64_copy_full(void __iomem *to, const void *from, size_t count)
69 {
70 	memcpy_toio_aligned(to, from, count, 64);
71 	dgh();
72 }
73 EXPORT_SYMBOL(__iowrite64_copy_full);
74 
75 void __iowrite32_copy_full(void __iomem *to, const void *from, size_t count)
76 {
77 	memcpy_toio_aligned(to, from, count, 32);
78 	dgh();
79 }
80 EXPORT_SYMBOL(__iowrite32_copy_full);
81 
82 /*
83  * Copy data from "real" memory space to IO memory space.
84  */
85 void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
86 {
87 	while (count && !IS_ALIGNED((unsigned long)to, 8)) {
88 		__raw_writeb(*(u8 *)from, to);
89 		from++;
90 		to++;
91 		count--;
92 	}
93 
94 	while (count >= 8) {
95 		__raw_writeq(*(u64 *)from, to);
96 		from += 8;
97 		to += 8;
98 		count -= 8;
99 	}
100 
101 	while (count) {
102 		__raw_writeb(*(u8 *)from, to);
103 		from++;
104 		to++;
105 		count--;
106 	}
107 }
108 EXPORT_SYMBOL(__memcpy_toio);
109 
110 /*
111  * "memset" on IO memory space.
112  */
113 void __memset_io(volatile void __iomem *dst, int c, size_t count)
114 {
115 	u64 qc = (u8)c;
116 
117 	qc |= qc << 8;
118 	qc |= qc << 16;
119 	qc |= qc << 32;
120 
121 	while (count && !IS_ALIGNED((unsigned long)dst, 8)) {
122 		__raw_writeb(c, dst);
123 		dst++;
124 		count--;
125 	}
126 
127 	while (count >= 8) {
128 		__raw_writeq(qc, dst);
129 		dst += 8;
130 		count -= 8;
131 	}
132 
133 	while (count) {
134 		__raw_writeb(c, dst);
135 		dst++;
136 		count--;
137 	}
138 }
139 EXPORT_SYMBOL(__memset_io);
140