xref: /linux/arch/sparc/include/asm/io_64.h (revision 3932b9ca55b0be314a36d3e84faff3e823c081f5)
1 #ifndef __SPARC64_IO_H
2 #define __SPARC64_IO_H
3 
4 #include <linux/kernel.h>
5 #include <linux/compiler.h>
6 #include <linux/types.h>
7 
8 #include <asm/page.h>      /* IO address mapping routines need this */
9 #include <asm/asi.h>
10 #include <asm-generic/pci_iomap.h>
11 
12 /* BIO layer definitions. */
13 extern unsigned long kern_base, kern_size;
14 
15 /* __raw_{read,write}{b,w,l,q} uses direct access.
16  * Access the memory as big endian bypassing the cache
17  * by using ASI_PHYS_BYPASS_EC_E
18  */
19 #define __raw_readb __raw_readb
20 static inline u8 __raw_readb(const volatile void __iomem *addr)
21 {
22 	u8 ret;
23 
24 	__asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_raw_readb */"
25 			     : "=r" (ret)
26 			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
27 
28 	return ret;
29 }
30 
31 #define __raw_readw __raw_readw
32 static inline u16 __raw_readw(const volatile void __iomem *addr)
33 {
34 	u16 ret;
35 
36 	__asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_raw_readw */"
37 			     : "=r" (ret)
38 			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
39 
40 	return ret;
41 }
42 
43 #define __raw_readl __raw_readl
44 static inline u32 __raw_readl(const volatile void __iomem *addr)
45 {
46 	u32 ret;
47 
48 	__asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_raw_readl */"
49 			     : "=r" (ret)
50 			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
51 
52 	return ret;
53 }
54 
55 #define __raw_readq __raw_readq
56 static inline u64 __raw_readq(const volatile void __iomem *addr)
57 {
58 	u64 ret;
59 
60 	__asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_raw_readq */"
61 			     : "=r" (ret)
62 			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
63 
64 	return ret;
65 }
66 
67 #define __raw_writeb __raw_writeb
68 static inline void __raw_writeb(u8 b, const volatile void __iomem *addr)
69 {
70 	__asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_raw_writeb */"
71 			     : /* no outputs */
72 			     : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
73 }
74 
75 #define __raw_writew __raw_writew
76 static inline void __raw_writew(u16 w, const volatile void __iomem *addr)
77 {
78 	__asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_raw_writew */"
79 			     : /* no outputs */
80 			     : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
81 }
82 
83 #define __raw_writel __raw_writel
84 static inline void __raw_writel(u32 l, const volatile void __iomem *addr)
85 {
86 	__asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_raw_writel */"
87 			     : /* no outputs */
88 			     : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
89 }
90 
91 #define __raw_writeq __raw_writeq
92 static inline void __raw_writeq(u64 q, const volatile void __iomem *addr)
93 {
94 	__asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_raw_writeq */"
95 			     : /* no outputs */
96 			     : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
97 }
98 
99 /* Memory functions, same as I/O accesses on Ultra.
100  * Access memory as little endian bypassing
101  * the cache by using ASI_PHYS_BYPASS_EC_E_L
102  */
103 #define readb readb
104 static inline u8 readb(const volatile void __iomem *addr)
105 {	u8 ret;
106 
107 	__asm__ __volatile__("lduba\t[%1] %2, %0\t/* pci_readb */"
108 			     : "=r" (ret)
109 			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
110 			     : "memory");
111 	return ret;
112 }
113 
114 #define readw readw
115 static inline u16 readw(const volatile void __iomem *addr)
116 {	u16 ret;
117 
118 	__asm__ __volatile__("lduha\t[%1] %2, %0\t/* pci_readw */"
119 			     : "=r" (ret)
120 			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
121 			     : "memory");
122 
123 	return ret;
124 }
125 
126 #define readl readl
127 static inline u32 readl(const volatile void __iomem *addr)
128 {	u32 ret;
129 
130 	__asm__ __volatile__("lduwa\t[%1] %2, %0\t/* pci_readl */"
131 			     : "=r" (ret)
132 			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
133 			     : "memory");
134 
135 	return ret;
136 }
137 
138 #define readq readq
139 static inline u64 readq(const volatile void __iomem *addr)
140 {	u64 ret;
141 
142 	__asm__ __volatile__("ldxa\t[%1] %2, %0\t/* pci_readq */"
143 			     : "=r" (ret)
144 			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
145 			     : "memory");
146 
147 	return ret;
148 }
149 
150 #define writeb writeb
151 static inline void writeb(u8 b, volatile void __iomem *addr)
152 {
153 	__asm__ __volatile__("stba\t%r0, [%1] %2\t/* pci_writeb */"
154 			     : /* no outputs */
155 			     : "Jr" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
156 			     : "memory");
157 }
158 
159 #define writew writew
160 static inline void writew(u16 w, volatile void __iomem *addr)
161 {
162 	__asm__ __volatile__("stha\t%r0, [%1] %2\t/* pci_writew */"
163 			     : /* no outputs */
164 			     : "Jr" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
165 			     : "memory");
166 }
167 
168 #define writel writel
169 static inline void writel(u32 l, volatile void __iomem *addr)
170 {
171 	__asm__ __volatile__("stwa\t%r0, [%1] %2\t/* pci_writel */"
172 			     : /* no outputs */
173 			     : "Jr" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
174 			     : "memory");
175 }
176 
177 #define writeq writeq
178 static inline void writeq(u64 q, volatile void __iomem *addr)
179 {
180 	__asm__ __volatile__("stxa\t%r0, [%1] %2\t/* pci_writeq */"
181 			     : /* no outputs */
182 			     : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
183 			     : "memory");
184 }
185 
186 
187 #define inb inb
188 static inline u8 inb(unsigned long addr)
189 {
190 	return readb((volatile void __iomem *)addr);
191 }
192 
193 #define inw inw
194 static inline u16 inw(unsigned long addr)
195 {
196 	return readw((volatile void __iomem *)addr);
197 }
198 
199 #define inl inl
200 static inline u32 inl(unsigned long addr)
201 {
202 	return readl((volatile void __iomem *)addr);
203 }
204 
205 #define outb outb
206 static inline void outb(u8 b, unsigned long addr)
207 {
208 	writeb(b, (volatile void __iomem *)addr);
209 }
210 
211 #define outw outw
212 static inline void outw(u16 w, unsigned long addr)
213 {
214 	writew(w, (volatile void __iomem *)addr);
215 }
216 
217 #define outl outl
218 static inline void outl(u32 l, unsigned long addr)
219 {
220 	writel(l, (volatile void __iomem *)addr);
221 }
222 
223 
224 #define inb_p(__addr) 		inb(__addr)
225 #define outb_p(__b, __addr)	outb(__b, __addr)
226 #define inw_p(__addr)		inw(__addr)
227 #define outw_p(__w, __addr)	outw(__w, __addr)
228 #define inl_p(__addr)		inl(__addr)
229 #define outl_p(__l, __addr)	outl(__l, __addr)
230 
231 void outsb(unsigned long, const void *, unsigned long);
232 void outsw(unsigned long, const void *, unsigned long);
233 void outsl(unsigned long, const void *, unsigned long);
234 void insb(unsigned long, void *, unsigned long);
235 void insw(unsigned long, void *, unsigned long);
236 void insl(unsigned long, void *, unsigned long);
237 
238 static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
239 {
240 	insb((unsigned long __force)port, buf, count);
241 }
242 static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count)
243 {
244 	insw((unsigned long __force)port, buf, count);
245 }
246 
247 static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count)
248 {
249 	insl((unsigned long __force)port, buf, count);
250 }
251 
252 static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count)
253 {
254 	outsb((unsigned long __force)port, buf, count);
255 }
256 
257 static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count)
258 {
259 	outsw((unsigned long __force)port, buf, count);
260 }
261 
262 static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count)
263 {
264 	outsl((unsigned long __force)port, buf, count);
265 }
266 
267 #define readb_relaxed(__addr)	readb(__addr)
268 #define readw_relaxed(__addr)	readw(__addr)
269 #define readl_relaxed(__addr)	readl(__addr)
270 #define readq_relaxed(__addr)	readq(__addr)
271 
272 /* Valid I/O Space regions are anywhere, because each PCI bus supported
273  * can live in an arbitrary area of the physical address range.
274  */
275 #define IO_SPACE_LIMIT 0xffffffffffffffffUL
276 
277 /* Now, SBUS variants, only difference from PCI is that we do
278  * not use little-endian ASIs.
279  */
280 static inline u8 sbus_readb(const volatile void __iomem *addr)
281 {
282 	return __raw_readb(addr);
283 }
284 
285 static inline u16 sbus_readw(const volatile void __iomem *addr)
286 {
287 	return __raw_readw(addr);
288 }
289 
290 static inline u32 sbus_readl(const volatile void __iomem *addr)
291 {
292 	return __raw_readl(addr);
293 }
294 
295 static inline u64 sbus_readq(const volatile void __iomem *addr)
296 {
297 	return __raw_readq(addr);
298 }
299 
300 static inline void sbus_writeb(u8 b, volatile void __iomem *addr)
301 {
302 	__raw_writeb(b, addr);
303 }
304 
305 static inline void sbus_writew(u16 w, volatile void __iomem *addr)
306 {
307 	__raw_writew(w, addr);
308 }
309 
310 static inline void sbus_writel(u32 l, volatile void __iomem *addr)
311 {
312 	__raw_writel(l, addr);
313 }
314 
315 static inline void sbus_writeq(u64 q, volatile void __iomem *addr)
316 {
317 	__raw_writeq(q, addr);
318 }
319 
320 static inline void sbus_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
321 {
322 	while(n--) {
323 		sbus_writeb(c, dst);
324 		dst++;
325 	}
326 }
327 
328 static inline void memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
329 {
330 	volatile void __iomem *d = dst;
331 
332 	while (n--) {
333 		writeb(c, d);
334 		d++;
335 	}
336 }
337 
338 static inline void sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
339 				      __kernel_size_t n)
340 {
341 	char *d = dst;
342 
343 	while (n--) {
344 		char tmp = sbus_readb(src);
345 		*d++ = tmp;
346 		src++;
347 	}
348 }
349 
350 
351 static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
352 				 __kernel_size_t n)
353 {
354 	char *d = dst;
355 
356 	while (n--) {
357 		char tmp = readb(src);
358 		*d++ = tmp;
359 		src++;
360 	}
361 }
362 
363 static inline void sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
364 				    __kernel_size_t n)
365 {
366 	const char *s = src;
367 	volatile void __iomem *d = dst;
368 
369 	while (n--) {
370 		char tmp = *s++;
371 		sbus_writeb(tmp, d);
372 		d++;
373 	}
374 }
375 
376 static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
377 			       __kernel_size_t n)
378 {
379 	const char *s = src;
380 	volatile void __iomem *d = dst;
381 
382 	while (n--) {
383 		char tmp = *s++;
384 		writeb(tmp, d);
385 		d++;
386 	}
387 }
388 
389 #define mmiowb()
390 
391 #ifdef __KERNEL__
392 
393 /* On sparc64 we have the whole physical IO address space accessible
394  * using physically addressed loads and stores, so this does nothing.
395  */
396 static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
397 {
398 	return (void __iomem *)offset;
399 }
400 
401 #define ioremap_nocache(X,Y)		ioremap((X),(Y))
402 #define ioremap_wc(X,Y)			ioremap((X),(Y))
403 
404 static inline void iounmap(volatile void __iomem *addr)
405 {
406 }
407 
408 #define ioread8(X)			readb(X)
409 #define ioread16(X)			readw(X)
410 #define ioread16be(X)			__raw_readw(X)
411 #define ioread32(X)			readl(X)
412 #define ioread32be(X)			__raw_readl(X)
413 #define iowrite8(val,X)			writeb(val,X)
414 #define iowrite16(val,X)		writew(val,X)
415 #define iowrite16be(val,X)		__raw_writew(val,X)
416 #define iowrite32(val,X)		writel(val,X)
417 #define iowrite32be(val,X)		__raw_writel(val,X)
418 
419 /* Create a virtual mapping cookie for an IO port range */
420 void __iomem *ioport_map(unsigned long port, unsigned int nr);
421 void ioport_unmap(void __iomem *);
422 
423 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
424 struct pci_dev;
425 void pci_iounmap(struct pci_dev *dev, void __iomem *);
426 
427 static inline int sbus_can_dma_64bit(void)
428 {
429 	return 1;
430 }
431 static inline int sbus_can_burst64(void)
432 {
433 	return 1;
434 }
435 struct device;
436 void sbus_set_sbus64(struct device *, int);
437 
438 /*
439  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
440  * access
441  */
442 #define xlate_dev_mem_ptr(p)	__va(p)
443 
444 /*
445  * Convert a virtual cached pointer to an uncached pointer
446  */
447 #define xlate_dev_kmem_ptr(p)	p
448 
449 #endif
450 
451 #endif /* !(__SPARC64_IO_H) */
452