xref: /linux/arch/alpha/include/asm/io.h (revision ca853314e78b0a65c20b6a889a23c31f918d4aa2)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ALPHA_IO_H
3 #define __ALPHA_IO_H
4 
5 #ifdef __KERNEL__
6 
7 #include <linux/kernel.h>
8 #include <linux/mm.h>
9 #include <asm/compiler.h>
10 #include <asm/machvec.h>
11 #include <asm/hwrpb.h>
12 
13 /* The generic header contains only prototypes.  Including it ensures that
14    the implementation we have here matches that interface.  */
15 #include <asm-generic/iomap.h>
16 
17 /* We don't use IO slowdowns on the Alpha, but.. */
18 #define __SLOW_DOWN_IO	do { } while (0)
19 #define SLOW_DOWN_IO	do { } while (0)
20 
21 /*
22  * Virtual -> physical identity mapping starts at this offset
23  */
24 #ifdef USE_48_BIT_KSEG
25 #define IDENT_ADDR     0xffff800000000000UL
26 #else
27 #define IDENT_ADDR     0xfffffc0000000000UL
28 #endif
29 
30 /*
31  * We try to avoid hae updates (thus the cache), but when we
32  * do need to update the hae, we need to do it atomically, so
33  * that any interrupts wouldn't get confused with the hae
34  * register not being up-to-date with respect to the hardware
35  * value.
36  */
37 extern inline void __set_hae(unsigned long new_hae)
38 {
39 	unsigned long flags = swpipl(IPL_MAX);
40 
41 	barrier();
42 
43 	alpha_mv.hae_cache = new_hae;
44 	*alpha_mv.hae_register = new_hae;
45 	mb();
46 	/* Re-read to make sure it was written.  */
47 	new_hae = *alpha_mv.hae_register;
48 
49 	setipl(flags);
50 	barrier();
51 }
52 
53 extern inline void set_hae(unsigned long new_hae)
54 {
55 	if (new_hae != alpha_mv.hae_cache)
56 		__set_hae(new_hae);
57 }
58 
59 /*
60  * Change virtual addresses to physical addresses and vv.
61  */
62 #ifdef USE_48_BIT_KSEG
63 static inline unsigned long virt_to_phys(void *address)
64 {
65 	return (unsigned long)address - IDENT_ADDR;
66 }
67 
68 static inline void * phys_to_virt(unsigned long address)
69 {
70 	return (void *) (address + IDENT_ADDR);
71 }
72 #else
73 static inline unsigned long virt_to_phys(void *address)
74 {
75         unsigned long phys = (unsigned long)address;
76 
77 	/* Sign-extend from bit 41.  */
78 	phys <<= (64 - 41);
79 	phys = (long)phys >> (64 - 41);
80 
81 	/* Crop to the physical address width of the processor.  */
82         phys &= (1ul << hwrpb->pa_bits) - 1;
83 
84         return phys;
85 }
86 
87 static inline void * phys_to_virt(unsigned long address)
88 {
89         return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1)));
90 }
91 #endif
92 
93 #define page_to_phys(page)	page_to_pa(page)
94 
95 /* Maximum PIO space address supported?  */
96 #define IO_SPACE_LIMIT 0xffff
97 
98 /*
99  * Change addresses as seen by the kernel (virtual) to addresses as
100  * seen by a device (bus), and vice versa.
101  *
102  * Note that this only works for a limited range of kernel addresses,
103  * and very well may not span all memory.  Consider this interface
104  * deprecated in favour of the DMA-mapping API.
105  */
106 extern unsigned long __direct_map_base;
107 extern unsigned long __direct_map_size;
108 
109 static inline unsigned long __deprecated virt_to_bus(void *address)
110 {
111 	unsigned long phys = virt_to_phys(address);
112 	unsigned long bus = phys + __direct_map_base;
113 	return phys <= __direct_map_size ? bus : 0;
114 }
115 #define isa_virt_to_bus virt_to_bus
116 
117 static inline void * __deprecated bus_to_virt(unsigned long address)
118 {
119 	void *virt;
120 
121 	/* This check is a sanity check but also ensures that bus address 0
122 	   maps to virtual address 0 which is useful to detect null pointers
123 	   (the NCR driver is much simpler if NULL pointers are preserved).  */
124 	address -= __direct_map_base;
125 	virt = phys_to_virt(address);
126 	return (long)address <= 0 ? NULL : virt;
127 }
128 #define isa_bus_to_virt bus_to_virt
129 
130 /*
131  * There are different chipsets to interface the Alpha CPUs to the world.
132  */
133 
134 #define IO_CONCAT(a,b)	_IO_CONCAT(a,b)
135 #define _IO_CONCAT(a,b)	a ## _ ## b
136 
137 #ifdef CONFIG_ALPHA_GENERIC
138 
139 /* In a generic kernel, we always go through the machine vector.  */
140 
141 #define REMAP1(TYPE, NAME, QUAL)					\
142 static inline TYPE generic_##NAME(QUAL void __iomem *addr)		\
143 {									\
144 	return alpha_mv.mv_##NAME(addr);				\
145 }
146 
147 #define REMAP2(TYPE, NAME, QUAL)					\
148 static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr)	\
149 {									\
150 	alpha_mv.mv_##NAME(b, addr);					\
151 }
152 
153 REMAP1(unsigned int, ioread8, const)
154 REMAP1(unsigned int, ioread16, const)
155 REMAP1(unsigned int, ioread32, const)
156 REMAP1(u8, readb, const volatile)
157 REMAP1(u16, readw, const volatile)
158 REMAP1(u32, readl, const volatile)
159 REMAP1(u64, readq, const volatile)
160 
161 REMAP2(u8, iowrite8, /**/)
162 REMAP2(u16, iowrite16, /**/)
163 REMAP2(u32, iowrite32, /**/)
164 REMAP2(u8, writeb, volatile)
165 REMAP2(u16, writew, volatile)
166 REMAP2(u32, writel, volatile)
167 REMAP2(u64, writeq, volatile)
168 
169 #undef REMAP1
170 #undef REMAP2
171 
172 extern inline void __iomem *generic_ioportmap(unsigned long a)
173 {
174 	return alpha_mv.mv_ioportmap(a);
175 }
176 
177 static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s)
178 {
179 	return alpha_mv.mv_ioremap(a, s);
180 }
181 
182 static inline void generic_iounmap(volatile void __iomem *a)
183 {
184 	return alpha_mv.mv_iounmap(a);
185 }
186 
187 static inline int generic_is_ioaddr(unsigned long a)
188 {
189 	return alpha_mv.mv_is_ioaddr(a);
190 }
191 
192 static inline int generic_is_mmio(const volatile void __iomem *a)
193 {
194 	return alpha_mv.mv_is_mmio(a);
195 }
196 
197 #define __IO_PREFIX		generic
198 #define generic_trivial_rw_bw	0
199 #define generic_trivial_rw_lq	0
200 #define generic_trivial_io_bw	0
201 #define generic_trivial_io_lq	0
202 #define generic_trivial_iounmap	0
203 
204 #else
205 
206 #if defined(CONFIG_ALPHA_APECS)
207 # include <asm/core_apecs.h>
208 #elif defined(CONFIG_ALPHA_CIA)
209 # include <asm/core_cia.h>
210 #elif defined(CONFIG_ALPHA_IRONGATE)
211 # include <asm/core_irongate.h>
212 #elif defined(CONFIG_ALPHA_JENSEN)
213 # include <asm/jensen.h>
214 #elif defined(CONFIG_ALPHA_LCA)
215 # include <asm/core_lca.h>
216 #elif defined(CONFIG_ALPHA_MARVEL)
217 # include <asm/core_marvel.h>
218 #elif defined(CONFIG_ALPHA_MCPCIA)
219 # include <asm/core_mcpcia.h>
220 #elif defined(CONFIG_ALPHA_POLARIS)
221 # include <asm/core_polaris.h>
222 #elif defined(CONFIG_ALPHA_T2)
223 # include <asm/core_t2.h>
224 #elif defined(CONFIG_ALPHA_TSUNAMI)
225 # include <asm/core_tsunami.h>
226 #elif defined(CONFIG_ALPHA_TITAN)
227 # include <asm/core_titan.h>
228 #elif defined(CONFIG_ALPHA_WILDFIRE)
229 # include <asm/core_wildfire.h>
230 #else
231 #error "What system is this?"
232 #endif
233 
234 #endif /* GENERIC */
235 
236 /*
237  * We always have external versions of these routines.
238  */
239 extern u8		inb(unsigned long port);
240 extern u16		inw(unsigned long port);
241 extern u32		inl(unsigned long port);
242 extern void		outb(u8 b, unsigned long port);
243 extern void		outw(u16 b, unsigned long port);
244 extern void		outl(u32 b, unsigned long port);
245 
246 extern u8		readb(const volatile void __iomem *addr);
247 extern u16		readw(const volatile void __iomem *addr);
248 extern u32		readl(const volatile void __iomem *addr);
249 extern u64		readq(const volatile void __iomem *addr);
250 extern void		writeb(u8 b, volatile void __iomem *addr);
251 extern void		writew(u16 b, volatile void __iomem *addr);
252 extern void		writel(u32 b, volatile void __iomem *addr);
253 extern void		writeq(u64 b, volatile void __iomem *addr);
254 
255 extern u8		__raw_readb(const volatile void __iomem *addr);
256 extern u16		__raw_readw(const volatile void __iomem *addr);
257 extern u32		__raw_readl(const volatile void __iomem *addr);
258 extern u64		__raw_readq(const volatile void __iomem *addr);
259 extern void		__raw_writeb(u8 b, volatile void __iomem *addr);
260 extern void		__raw_writew(u16 b, volatile void __iomem *addr);
261 extern void		__raw_writel(u32 b, volatile void __iomem *addr);
262 extern void		__raw_writeq(u64 b, volatile void __iomem *addr);
263 
264 /*
265  * Mapping from port numbers to __iomem space is pretty easy.
266  */
267 
268 /* These two have to be extern inline because of the extern prototype from
269    <asm-generic/iomap.h>.  It is not legal to mix "extern" and "static" for
270    the same declaration.  */
271 extern inline void __iomem *ioport_map(unsigned long port, unsigned int size)
272 {
273 	return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
274 }
275 
276 extern inline void ioport_unmap(void __iomem *addr)
277 {
278 }
279 
280 static inline void __iomem *ioremap(unsigned long port, unsigned long size)
281 {
282 	return IO_CONCAT(__IO_PREFIX,ioremap) (port, size);
283 }
284 
285 #define ioremap_wc ioremap
286 #define ioremap_uc ioremap
287 
288 static inline void iounmap(volatile void __iomem *addr)
289 {
290 	IO_CONCAT(__IO_PREFIX,iounmap)(addr);
291 }
292 
293 static inline int __is_ioaddr(unsigned long addr)
294 {
295 	return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr);
296 }
297 #define __is_ioaddr(a)		__is_ioaddr((unsigned long)(a))
298 
299 static inline int __is_mmio(const volatile void __iomem *addr)
300 {
301 	return IO_CONCAT(__IO_PREFIX,is_mmio)(addr);
302 }
303 
304 
305 /*
306  * If the actual I/O bits are sufficiently trivial, then expand inline.
307  */
308 
309 #if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
310 extern inline unsigned int ioread8(const void __iomem *addr)
311 {
312 	unsigned int ret;
313 	mb();
314 	ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
315 	mb();
316 	return ret;
317 }
318 
319 extern inline unsigned int ioread16(const void __iomem *addr)
320 {
321 	unsigned int ret;
322 	mb();
323 	ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
324 	mb();
325 	return ret;
326 }
327 
328 extern inline void iowrite8(u8 b, void __iomem *addr)
329 {
330 	mb();
331 	IO_CONCAT(__IO_PREFIX, iowrite8)(b, addr);
332 }
333 
334 extern inline void iowrite16(u16 b, void __iomem *addr)
335 {
336 	mb();
337 	IO_CONCAT(__IO_PREFIX, iowrite16)(b, addr);
338 }
339 
340 extern inline u8 inb(unsigned long port)
341 {
342 	return ioread8(ioport_map(port, 1));
343 }
344 
345 extern inline u16 inw(unsigned long port)
346 {
347 	return ioread16(ioport_map(port, 2));
348 }
349 
350 extern inline void outb(u8 b, unsigned long port)
351 {
352 	iowrite8(b, ioport_map(port, 1));
353 }
354 
355 extern inline void outw(u16 b, unsigned long port)
356 {
357 	iowrite16(b, ioport_map(port, 2));
358 }
359 #endif
360 
361 #if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
362 extern inline unsigned int ioread32(const void __iomem *addr)
363 {
364 	unsigned int ret;
365 	mb();
366 	ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
367 	mb();
368 	return ret;
369 }
370 
371 extern inline void iowrite32(u32 b, void __iomem *addr)
372 {
373 	mb();
374 	IO_CONCAT(__IO_PREFIX, iowrite32)(b, addr);
375 }
376 
377 extern inline u32 inl(unsigned long port)
378 {
379 	return ioread32(ioport_map(port, 4));
380 }
381 
382 extern inline void outl(u32 b, unsigned long port)
383 {
384 	iowrite32(b, ioport_map(port, 4));
385 }
386 #endif
387 
388 #if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
389 extern inline u8 __raw_readb(const volatile void __iomem *addr)
390 {
391 	return IO_CONCAT(__IO_PREFIX,readb)(addr);
392 }
393 
394 extern inline u16 __raw_readw(const volatile void __iomem *addr)
395 {
396 	return IO_CONCAT(__IO_PREFIX,readw)(addr);
397 }
398 
399 extern inline void __raw_writeb(u8 b, volatile void __iomem *addr)
400 {
401 	IO_CONCAT(__IO_PREFIX,writeb)(b, addr);
402 }
403 
404 extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
405 {
406 	IO_CONCAT(__IO_PREFIX,writew)(b, addr);
407 }
408 
409 extern inline u8 readb(const volatile void __iomem *addr)
410 {
411 	u8 ret;
412 	mb();
413 	ret = __raw_readb(addr);
414 	mb();
415 	return ret;
416 }
417 
418 extern inline u16 readw(const volatile void __iomem *addr)
419 {
420 	u16 ret;
421 	mb();
422 	ret = __raw_readw(addr);
423 	mb();
424 	return ret;
425 }
426 
427 extern inline void writeb(u8 b, volatile void __iomem *addr)
428 {
429 	mb();
430 	__raw_writeb(b, addr);
431 }
432 
433 extern inline void writew(u16 b, volatile void __iomem *addr)
434 {
435 	mb();
436 	__raw_writew(b, addr);
437 }
438 #endif
439 
440 #if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
441 extern inline u32 __raw_readl(const volatile void __iomem *addr)
442 {
443 	return IO_CONCAT(__IO_PREFIX,readl)(addr);
444 }
445 
446 extern inline u64 __raw_readq(const volatile void __iomem *addr)
447 {
448 	return IO_CONCAT(__IO_PREFIX,readq)(addr);
449 }
450 
451 extern inline void __raw_writel(u32 b, volatile void __iomem *addr)
452 {
453 	IO_CONCAT(__IO_PREFIX,writel)(b, addr);
454 }
455 
456 extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
457 {
458 	IO_CONCAT(__IO_PREFIX,writeq)(b, addr);
459 }
460 
461 extern inline u32 readl(const volatile void __iomem *addr)
462 {
463 	u32 ret;
464 	mb();
465 	ret = __raw_readl(addr);
466 	mb();
467 	return ret;
468 }
469 
470 extern inline u64 readq(const volatile void __iomem *addr)
471 {
472 	u64 ret;
473 	mb();
474 	ret = __raw_readq(addr);
475 	mb();
476 	return ret;
477 }
478 
479 extern inline void writel(u32 b, volatile void __iomem *addr)
480 {
481 	mb();
482 	__raw_writel(b, addr);
483 }
484 
485 extern inline void writeq(u64 b, volatile void __iomem *addr)
486 {
487 	mb();
488 	__raw_writeq(b, addr);
489 }
490 #endif
491 
492 #define ioread16be(p) swab16(ioread16(p))
493 #define ioread32be(p) swab32(ioread32(p))
494 #define iowrite16be(v,p) iowrite16(swab16(v), (p))
495 #define iowrite32be(v,p) iowrite32(swab32(v), (p))
496 
497 #define inb_p		inb
498 #define inw_p		inw
499 #define inl_p		inl
500 #define outb_p		outb
501 #define outw_p		outw
502 #define outl_p		outl
503 
504 extern u8 readb_relaxed(const volatile void __iomem *addr);
505 extern u16 readw_relaxed(const volatile void __iomem *addr);
506 extern u32 readl_relaxed(const volatile void __iomem *addr);
507 extern u64 readq_relaxed(const volatile void __iomem *addr);
508 
509 #if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
510 extern inline u8 readb_relaxed(const volatile void __iomem *addr)
511 {
512 	mb();
513 	return __raw_readb(addr);
514 }
515 
516 extern inline u16 readw_relaxed(const volatile void __iomem *addr)
517 {
518 	mb();
519 	return __raw_readw(addr);
520 }
521 #endif
522 
523 #if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
524 extern inline u32 readl_relaxed(const volatile void __iomem *addr)
525 {
526 	mb();
527 	return __raw_readl(addr);
528 }
529 
530 extern inline u64 readq_relaxed(const volatile void __iomem *addr)
531 {
532 	mb();
533 	return __raw_readq(addr);
534 }
535 #endif
536 
537 #define writeb_relaxed	writeb
538 #define writew_relaxed	writew
539 #define writel_relaxed	writel
540 #define writeq_relaxed	writeq
541 
542 /*
543  * String version of IO memory access ops:
544  */
545 extern void memcpy_fromio(void *, const volatile void __iomem *, long);
546 extern void memcpy_toio(volatile void __iomem *, const void *, long);
547 extern void _memset_c_io(volatile void __iomem *, unsigned long, long);
548 
549 static inline void memset_io(volatile void __iomem *addr, u8 c, long len)
550 {
551 	_memset_c_io(addr, 0x0101010101010101UL * c, len);
552 }
553 
554 #define __HAVE_ARCH_MEMSETW_IO
555 static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len)
556 {
557 	_memset_c_io(addr, 0x0001000100010001UL * c, len);
558 }
559 
560 /*
561  * String versions of in/out ops:
562  */
563 extern void insb (unsigned long port, void *dst, unsigned long count);
564 extern void insw (unsigned long port, void *dst, unsigned long count);
565 extern void insl (unsigned long port, void *dst, unsigned long count);
566 extern void outsb (unsigned long port, const void *src, unsigned long count);
567 extern void outsw (unsigned long port, const void *src, unsigned long count);
568 extern void outsl (unsigned long port, const void *src, unsigned long count);
569 
570 /*
571  * The Alpha Jensen hardware for some rather strange reason puts
572  * the RTC clock at 0x170 instead of 0x70. Probably due to some
573  * misguided idea about using 0x70 for NMI stuff.
574  *
575  * These defines will override the defaults when doing RTC queries
576  */
577 
578 #ifdef CONFIG_ALPHA_GENERIC
579 # define RTC_PORT(x)	((x) + alpha_mv.rtc_port)
580 #else
581 # ifdef CONFIG_ALPHA_JENSEN
582 #  define RTC_PORT(x)	(0x170+(x))
583 # else
584 #  define RTC_PORT(x)	(0x70 + (x))
585 # endif
586 #endif
587 #define RTC_ALWAYS_BCD	0
588 
589 /*
590  * Some mucking forons use if[n]def writeq to check if platform has it.
591  * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them
592  * to play with; for now just use cpp anti-recursion logics and make sure
593  * that damn thing is defined and expands to itself.
594  */
595 
596 #define writeq writeq
597 #define readq readq
598 
599 /*
600  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
601  * access
602  */
603 #define xlate_dev_mem_ptr(p)	__va(p)
604 
605 /*
606  * Convert a virtual cached pointer to an uncached pointer
607  */
608 #define xlate_dev_kmem_ptr(p)	p
609 
610 #endif /* __KERNEL__ */
611 
612 #endif /* __ALPHA_IO_H */
613