1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ALPHA_IO_H
3 #define __ALPHA_IO_H
4
5 #ifdef __KERNEL__
6
7 #include <linux/kernel.h>
8 #include <linux/mm.h>
9 #include <asm/compiler.h>
10 #include <asm/machvec.h>
11 #include <asm/hwrpb.h>
12
13 /*
14 * Virtual -> physical identity mapping starts at this offset
15 */
16 #ifdef USE_48_BIT_KSEG
17 #define IDENT_ADDR 0xffff800000000000UL
18 #else
19 #define IDENT_ADDR 0xfffffc0000000000UL
20 #endif
21
22 /*
23 * We try to avoid hae updates (thus the cache), but when we
24 * do need to update the hae, we need to do it atomically, so
25 * that any interrupts wouldn't get confused with the hae
26 * register not being up-to-date with respect to the hardware
27 * value.
28 */
__set_hae(unsigned long new_hae)29 extern inline void __set_hae(unsigned long new_hae)
30 {
31 unsigned long flags = swpipl(IPL_MAX);
32
33 barrier();
34
35 alpha_mv.hae_cache = new_hae;
36 *alpha_mv.hae_register = new_hae;
37 mb();
38 /* Re-read to make sure it was written. */
39 new_hae = *alpha_mv.hae_register;
40
41 setipl(flags);
42 barrier();
43 }
44
set_hae(unsigned long new_hae)45 extern inline void set_hae(unsigned long new_hae)
46 {
47 if (new_hae != alpha_mv.hae_cache)
48 __set_hae(new_hae);
49 }
50
51 /*
52 * Change virtual addresses to physical addresses and vv.
53 */
54 #ifdef USE_48_BIT_KSEG
virt_to_phys(volatile void * address)55 static inline unsigned long virt_to_phys(volatile void *address)
56 {
57 return (unsigned long)address - IDENT_ADDR;
58 }
59
phys_to_virt(unsigned long address)60 static inline void * phys_to_virt(unsigned long address)
61 {
62 return (void *) (address + IDENT_ADDR);
63 }
64 #else
virt_to_phys(volatile void * address)65 static inline unsigned long virt_to_phys(volatile void *address)
66 {
67 unsigned long phys = (unsigned long)address;
68
69 /* Sign-extend from bit 41. */
70 phys <<= (64 - 41);
71 phys = (long)phys >> (64 - 41);
72
73 /* Crop to the physical address width of the processor. */
74 phys &= (1ul << hwrpb->pa_bits) - 1;
75
76 return phys;
77 }
78
phys_to_virt(unsigned long address)79 static inline void * phys_to_virt(unsigned long address)
80 {
81 return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1)));
82 }
83 #endif
84
85 #define virt_to_phys virt_to_phys
86 #define phys_to_virt phys_to_virt
87
88 /* Maximum PIO space address supported? */
89 #define IO_SPACE_LIMIT 0xffff
90
91 /*
92 * Change addresses as seen by the kernel (virtual) to addresses as
93 * seen by a device (bus), and vice versa.
94 *
95 * Note that this only works for a limited range of kernel addresses,
96 * and very well may not span all memory. Consider this interface
97 * deprecated in favour of the DMA-mapping API.
98 */
99 extern unsigned long __direct_map_base;
100 extern unsigned long __direct_map_size;
101
isa_virt_to_bus(volatile void * address)102 static inline unsigned long __deprecated isa_virt_to_bus(volatile void *address)
103 {
104 unsigned long phys = virt_to_phys(address);
105 unsigned long bus = phys + __direct_map_base;
106 return phys <= __direct_map_size ? bus : 0;
107 }
108 #define isa_virt_to_bus isa_virt_to_bus
109
isa_bus_to_virt(unsigned long address)110 static inline void * __deprecated isa_bus_to_virt(unsigned long address)
111 {
112 void *virt;
113
114 /* This check is a sanity check but also ensures that bus address 0
115 maps to virtual address 0 which is useful to detect null pointers
116 (the NCR driver is much simpler if NULL pointers are preserved). */
117 address -= __direct_map_base;
118 virt = phys_to_virt(address);
119 return (long)address <= 0 ? NULL : virt;
120 }
121 #define isa_bus_to_virt isa_bus_to_virt
122
123 /*
124 * There are different chipsets to interface the Alpha CPUs to the world.
125 */
126
127 #define IO_CONCAT(a,b) _IO_CONCAT(a,b)
128 #define _IO_CONCAT(a,b) a ## _ ## b
129
130 #ifdef CONFIG_ALPHA_GENERIC
131
132 /* In a generic kernel, we always go through the machine vector. */
133
134 #define REMAP1(TYPE, NAME, QUAL) \
135 static inline TYPE generic_##NAME(QUAL void __iomem *addr) \
136 { \
137 return alpha_mv.mv_##NAME(addr); \
138 }
139
140 #define REMAP2(TYPE, NAME, QUAL) \
141 static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \
142 { \
143 alpha_mv.mv_##NAME(b, addr); \
144 }
145
REMAP1(unsigned int,ioread8,const)146 REMAP1(unsigned int, ioread8, const)
147 REMAP1(unsigned int, ioread16, const)
148 REMAP1(unsigned int, ioread32, const)
149 REMAP1(u64, ioread64, const)
150 REMAP1(u8, readb, const volatile)
151 REMAP1(u16, readw, const volatile)
152 REMAP1(u32, readl, const volatile)
153 REMAP1(u64, readq, const volatile)
154
155 REMAP2(u8, iowrite8, /**/)
156 REMAP2(u16, iowrite16, /**/)
157 REMAP2(u32, iowrite32, /**/)
158 REMAP2(u64, iowrite64, /**/)
159 REMAP2(u8, writeb, volatile)
160 REMAP2(u16, writew, volatile)
161 REMAP2(u32, writel, volatile)
162 REMAP2(u64, writeq, volatile)
163
164 #undef REMAP1
165 #undef REMAP2
166
167 extern inline void __iomem *generic_ioportmap(unsigned long a)
168 {
169 return alpha_mv.mv_ioportmap(a);
170 }
171
generic_ioremap(unsigned long a,unsigned long s)172 static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s)
173 {
174 return alpha_mv.mv_ioremap(a, s);
175 }
176
generic_iounmap(volatile void __iomem * a)177 static inline void generic_iounmap(volatile void __iomem *a)
178 {
179 return alpha_mv.mv_iounmap(a);
180 }
181
generic_is_ioaddr(unsigned long a)182 static inline int generic_is_ioaddr(unsigned long a)
183 {
184 return alpha_mv.mv_is_ioaddr(a);
185 }
186
generic_is_mmio(const volatile void __iomem * a)187 static inline int generic_is_mmio(const volatile void __iomem *a)
188 {
189 return alpha_mv.mv_is_mmio(a);
190 }
191
192 #define __IO_PREFIX generic
193 #define generic_trivial_rw_bw 0
194 #define generic_trivial_rw_lq 0
195 #define generic_trivial_io_bw 0
196 #define generic_trivial_io_lq 0
197 #define generic_trivial_iounmap 0
198
199 #else
200
201 #if defined(CONFIG_ALPHA_CIA)
202 # include <asm/core_cia.h>
203 #elif defined(CONFIG_ALPHA_IRONGATE)
204 # include <asm/core_irongate.h>
205 #elif defined(CONFIG_ALPHA_MARVEL)
206 # include <asm/core_marvel.h>
207 #elif defined(CONFIG_ALPHA_MCPCIA)
208 # include <asm/core_mcpcia.h>
209 #elif defined(CONFIG_ALPHA_POLARIS)
210 # include <asm/core_polaris.h>
211 #elif defined(CONFIG_ALPHA_T2)
212 # include <asm/core_t2.h>
213 #elif defined(CONFIG_ALPHA_TSUNAMI)
214 # include <asm/core_tsunami.h>
215 #elif defined(CONFIG_ALPHA_TITAN)
216 # include <asm/core_titan.h>
217 #elif defined(CONFIG_ALPHA_WILDFIRE)
218 # include <asm/core_wildfire.h>
219 #else
220 #error "What system is this?"
221 #endif
222
223 #endif /* GENERIC */
224
225 /*
226 * We always have external versions of these routines.
227 */
228 extern u8 inb(unsigned long port);
229 extern u16 inw(unsigned long port);
230 extern u32 inl(unsigned long port);
231 extern void outb(u8 b, unsigned long port);
232 extern void outw(u16 b, unsigned long port);
233 extern void outl(u32 b, unsigned long port);
234 #define inb inb
235 #define inw inw
236 #define inl inl
237 #define outb outb
238 #define outw outw
239 #define outl outl
240
241 extern u8 readb(const volatile void __iomem *addr);
242 extern u16 readw(const volatile void __iomem *addr);
243 extern u32 readl(const volatile void __iomem *addr);
244 extern u64 readq(const volatile void __iomem *addr);
245 extern void writeb(u8 b, volatile void __iomem *addr);
246 extern void writew(u16 b, volatile void __iomem *addr);
247 extern void writel(u32 b, volatile void __iomem *addr);
248 extern void writeq(u64 b, volatile void __iomem *addr);
249 #define readb readb
250 #define readw readw
251 #define readl readl
252 #define readq readq
253 #define writeb writeb
254 #define writew writew
255 #define writel writel
256 #define writeq writeq
257
258 extern u8 __raw_readb(const volatile void __iomem *addr);
259 extern u16 __raw_readw(const volatile void __iomem *addr);
260 extern u32 __raw_readl(const volatile void __iomem *addr);
261 extern u64 __raw_readq(const volatile void __iomem *addr);
262 extern void __raw_writeb(u8 b, volatile void __iomem *addr);
263 extern void __raw_writew(u16 b, volatile void __iomem *addr);
264 extern void __raw_writel(u32 b, volatile void __iomem *addr);
265 extern void __raw_writeq(u64 b, volatile void __iomem *addr);
266 #define __raw_readb __raw_readb
267 #define __raw_readw __raw_readw
268 #define __raw_readl __raw_readl
269 #define __raw_readq __raw_readq
270 #define __raw_writeb __raw_writeb
271 #define __raw_writew __raw_writew
272 #define __raw_writel __raw_writel
273 #define __raw_writeq __raw_writeq
274
275 extern unsigned int ioread8(const void __iomem *);
276 extern unsigned int ioread16(const void __iomem *);
277 extern unsigned int ioread32(const void __iomem *);
278 extern u64 ioread64(const void __iomem *);
279
280 extern void iowrite8(u8, void __iomem *);
281 extern void iowrite16(u16, void __iomem *);
282 extern void iowrite32(u32, void __iomem *);
283 extern void iowrite64(u64, void __iomem *);
284
285 extern void ioread8_rep(const void __iomem *port, void *buf, unsigned long count);
286 extern void ioread16_rep(const void __iomem *port, void *buf, unsigned long count);
287 extern void ioread32_rep(const void __iomem *port, void *buf, unsigned long count);
288
289 extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count);
290 extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
291 extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count);
292
ioport_map(unsigned long port,unsigned int size)293 extern inline void __iomem *ioport_map(unsigned long port, unsigned int size)
294 {
295 return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
296 }
297
ioport_unmap(void __iomem * addr)298 extern inline void ioport_unmap(void __iomem *addr)
299 {
300 }
301
302 #define ioport_map ioport_map
303 #define ioport_unmap ioport_unmap
304
ioremap(unsigned long port,unsigned long size)305 static inline void __iomem *ioremap(unsigned long port, unsigned long size)
306 {
307 return IO_CONCAT(__IO_PREFIX,ioremap) (port, size);
308 }
309
310 #define ioremap_wc ioremap
311
iounmap(volatile void __iomem * addr)312 static inline void iounmap(volatile void __iomem *addr)
313 {
314 IO_CONCAT(__IO_PREFIX,iounmap)(addr);
315 }
316
__is_ioaddr(unsigned long addr)317 static inline int __is_ioaddr(unsigned long addr)
318 {
319 return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr);
320 }
321 #define __is_ioaddr(a) __is_ioaddr((unsigned long)(a))
322
__is_mmio(const volatile void __iomem * addr)323 static inline int __is_mmio(const volatile void __iomem *addr)
324 {
325 return IO_CONCAT(__IO_PREFIX,is_mmio)(addr);
326 }
327
328
329 /*
330 * If the actual I/O bits are sufficiently trivial, then expand inline.
331 */
332
333 #if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
ioread8(const void __iomem * addr)334 extern inline unsigned int ioread8(const void __iomem *addr)
335 {
336 unsigned int ret;
337 mb();
338 ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
339 mb();
340 return ret;
341 }
342
ioread16(const void __iomem * addr)343 extern inline unsigned int ioread16(const void __iomem *addr)
344 {
345 unsigned int ret;
346 mb();
347 ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
348 mb();
349 return ret;
350 }
351
iowrite8(u8 b,void __iomem * addr)352 extern inline void iowrite8(u8 b, void __iomem *addr)
353 {
354 mb();
355 IO_CONCAT(__IO_PREFIX, iowrite8)(b, addr);
356 }
357
iowrite16(u16 b,void __iomem * addr)358 extern inline void iowrite16(u16 b, void __iomem *addr)
359 {
360 mb();
361 IO_CONCAT(__IO_PREFIX, iowrite16)(b, addr);
362 }
363
inb(unsigned long port)364 extern inline u8 inb(unsigned long port)
365 {
366 return ioread8(ioport_map(port, 1));
367 }
368
inw(unsigned long port)369 extern inline u16 inw(unsigned long port)
370 {
371 return ioread16(ioport_map(port, 2));
372 }
373
outb(u8 b,unsigned long port)374 extern inline void outb(u8 b, unsigned long port)
375 {
376 iowrite8(b, ioport_map(port, 1));
377 }
378
outw(u16 b,unsigned long port)379 extern inline void outw(u16 b, unsigned long port)
380 {
381 iowrite16(b, ioport_map(port, 2));
382 }
383 #endif
384
385 #define ioread8 ioread8
386 #define ioread16 ioread16
387 #define iowrite8 iowrite8
388 #define iowrite16 iowrite16
389
390 #if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
ioread32(const void __iomem * addr)391 extern inline unsigned int ioread32(const void __iomem *addr)
392 {
393 unsigned int ret;
394 mb();
395 ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
396 mb();
397 return ret;
398 }
399
ioread64(const void __iomem * addr)400 extern inline u64 ioread64(const void __iomem *addr)
401 {
402 unsigned int ret;
403 mb();
404 ret = IO_CONCAT(__IO_PREFIX,ioread64)(addr);
405 mb();
406 return ret;
407 }
408
iowrite32(u32 b,void __iomem * addr)409 extern inline void iowrite32(u32 b, void __iomem *addr)
410 {
411 mb();
412 IO_CONCAT(__IO_PREFIX, iowrite32)(b, addr);
413 }
414
iowrite64(u64 b,void __iomem * addr)415 extern inline void iowrite64(u64 b, void __iomem *addr)
416 {
417 mb();
418 IO_CONCAT(__IO_PREFIX, iowrite64)(b, addr);
419 }
420
inl(unsigned long port)421 extern inline u32 inl(unsigned long port)
422 {
423 return ioread32(ioport_map(port, 4));
424 }
425
outl(u32 b,unsigned long port)426 extern inline void outl(u32 b, unsigned long port)
427 {
428 iowrite32(b, ioport_map(port, 4));
429 }
430 #endif
431
432 #define ioread32 ioread32
433 #define ioread64 ioread64
434 #define iowrite32 iowrite32
435 #define iowrite64 iowrite64
436
437 #if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
__raw_readb(const volatile void __iomem * addr)438 extern inline u8 __raw_readb(const volatile void __iomem *addr)
439 {
440 return IO_CONCAT(__IO_PREFIX,readb)(addr);
441 }
442
__raw_readw(const volatile void __iomem * addr)443 extern inline u16 __raw_readw(const volatile void __iomem *addr)
444 {
445 return IO_CONCAT(__IO_PREFIX,readw)(addr);
446 }
447
__raw_writeb(u8 b,volatile void __iomem * addr)448 extern inline void __raw_writeb(u8 b, volatile void __iomem *addr)
449 {
450 IO_CONCAT(__IO_PREFIX,writeb)(b, addr);
451 }
452
__raw_writew(u16 b,volatile void __iomem * addr)453 extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
454 {
455 IO_CONCAT(__IO_PREFIX,writew)(b, addr);
456 }
457
readb(const volatile void __iomem * addr)458 extern inline u8 readb(const volatile void __iomem *addr)
459 {
460 u8 ret;
461 mb();
462 ret = __raw_readb(addr);
463 mb();
464 return ret;
465 }
466
readw(const volatile void __iomem * addr)467 extern inline u16 readw(const volatile void __iomem *addr)
468 {
469 u16 ret;
470 mb();
471 ret = __raw_readw(addr);
472 mb();
473 return ret;
474 }
475
writeb(u8 b,volatile void __iomem * addr)476 extern inline void writeb(u8 b, volatile void __iomem *addr)
477 {
478 mb();
479 __raw_writeb(b, addr);
480 }
481
writew(u16 b,volatile void __iomem * addr)482 extern inline void writew(u16 b, volatile void __iomem *addr)
483 {
484 mb();
485 __raw_writew(b, addr);
486 }
487 #endif
488
489 #if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
__raw_readl(const volatile void __iomem * addr)490 extern inline u32 __raw_readl(const volatile void __iomem *addr)
491 {
492 return IO_CONCAT(__IO_PREFIX,readl)(addr);
493 }
494
__raw_readq(const volatile void __iomem * addr)495 extern inline u64 __raw_readq(const volatile void __iomem *addr)
496 {
497 return IO_CONCAT(__IO_PREFIX,readq)(addr);
498 }
499
__raw_writel(u32 b,volatile void __iomem * addr)500 extern inline void __raw_writel(u32 b, volatile void __iomem *addr)
501 {
502 IO_CONCAT(__IO_PREFIX,writel)(b, addr);
503 }
504
__raw_writeq(u64 b,volatile void __iomem * addr)505 extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
506 {
507 IO_CONCAT(__IO_PREFIX,writeq)(b, addr);
508 }
509
readl(const volatile void __iomem * addr)510 extern inline u32 readl(const volatile void __iomem *addr)
511 {
512 u32 ret;
513 mb();
514 ret = __raw_readl(addr);
515 mb();
516 return ret;
517 }
518
readq(const volatile void __iomem * addr)519 extern inline u64 readq(const volatile void __iomem *addr)
520 {
521 u64 ret;
522 mb();
523 ret = __raw_readq(addr);
524 mb();
525 return ret;
526 }
527
writel(u32 b,volatile void __iomem * addr)528 extern inline void writel(u32 b, volatile void __iomem *addr)
529 {
530 mb();
531 __raw_writel(b, addr);
532 }
533
writeq(u64 b,volatile void __iomem * addr)534 extern inline void writeq(u64 b, volatile void __iomem *addr)
535 {
536 mb();
537 __raw_writeq(b, addr);
538 }
539 #endif
540
541 #define ioread16be(p) swab16(ioread16(p))
542 #define ioread32be(p) swab32(ioread32(p))
543 #define ioread64be(p) swab64(ioread64(p))
544 #define iowrite16be(v,p) iowrite16(swab16(v), (p))
545 #define iowrite32be(v,p) iowrite32(swab32(v), (p))
546 #define iowrite64be(v,p) iowrite64(swab64(v), (p))
547
548 #define inb_p inb
549 #define inw_p inw
550 #define inl_p inl
551 #define outb_p outb
552 #define outw_p outw
553 #define outl_p outl
554
555 extern u8 readb_relaxed(const volatile void __iomem *addr);
556 extern u16 readw_relaxed(const volatile void __iomem *addr);
557 extern u32 readl_relaxed(const volatile void __iomem *addr);
558 extern u64 readq_relaxed(const volatile void __iomem *addr);
559 #define readb_relaxed readb_relaxed
560 #define readw_relaxed readw_relaxed
561 #define readl_relaxed readl_relaxed
562 #define readq_relaxed readq_relaxed
563
564 #if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
readb_relaxed(const volatile void __iomem * addr)565 extern inline u8 readb_relaxed(const volatile void __iomem *addr)
566 {
567 mb();
568 return __raw_readb(addr);
569 }
570
readw_relaxed(const volatile void __iomem * addr)571 extern inline u16 readw_relaxed(const volatile void __iomem *addr)
572 {
573 mb();
574 return __raw_readw(addr);
575 }
576 #endif
577
578 #if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
readl_relaxed(const volatile void __iomem * addr)579 extern inline u32 readl_relaxed(const volatile void __iomem *addr)
580 {
581 mb();
582 return __raw_readl(addr);
583 }
584
readq_relaxed(const volatile void __iomem * addr)585 extern inline u64 readq_relaxed(const volatile void __iomem *addr)
586 {
587 mb();
588 return __raw_readq(addr);
589 }
590 #endif
591
592 #define writeb_relaxed writeb
593 #define writew_relaxed writew
594 #define writel_relaxed writel
595 #define writeq_relaxed writeq
596
597 /*
598 * String version of IO memory access ops:
599 */
600 extern void memcpy_fromio(void *, const volatile void __iomem *, long);
601 extern void memcpy_toio(volatile void __iomem *, const void *, long);
602 extern void _memset_c_io(volatile void __iomem *, unsigned long, long);
603
memset_io(volatile void __iomem * addr,u8 c,long len)604 static inline void memset_io(volatile void __iomem *addr, u8 c, long len)
605 {
606 _memset_c_io(addr, 0x0101010101010101UL * c, len);
607 }
608
609 #define __HAVE_ARCH_MEMSETW_IO
memsetw_io(volatile void __iomem * addr,u16 c,long len)610 static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len)
611 {
612 _memset_c_io(addr, 0x0001000100010001UL * c, len);
613 }
614
615 #define memset_io memset_io
616 #define memcpy_fromio memcpy_fromio
617 #define memcpy_toio memcpy_toio
618
619 /*
620 * String versions of in/out ops:
621 */
622 extern void insb (unsigned long port, void *dst, unsigned long count);
623 extern void insw (unsigned long port, void *dst, unsigned long count);
624 extern void insl (unsigned long port, void *dst, unsigned long count);
625 extern void outsb (unsigned long port, const void *src, unsigned long count);
626 extern void outsw (unsigned long port, const void *src, unsigned long count);
627 extern void outsl (unsigned long port, const void *src, unsigned long count);
628
629 #define insb insb
630 #define insw insw
631 #define insl insl
632 #define outsb outsb
633 #define outsw outsw
634 #define outsl outsl
635
636 #define RTC_PORT(x) (0x70 + (x))
637 #define RTC_ALWAYS_BCD 0
638
639 #define ioread64 ioread64
640 #define iowrite64 iowrite64
641 #define ioread8_rep ioread8_rep
642 #define ioread16_rep ioread16_rep
643 #define ioread32_rep ioread32_rep
644 #define iowrite8_rep iowrite8_rep
645 #define iowrite16_rep iowrite16_rep
646 #define iowrite32_rep iowrite32_rep
647 #define pci_iounmap pci_iounmap
648
649 #include <asm-generic/io.h>
650
651 #endif /* __KERNEL__ */
652
653 #endif /* __ALPHA_IO_H */
654