1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef __ALPHA_IO_H 3 #define __ALPHA_IO_H 4 5 #ifdef __KERNEL__ 6 7 #include <linux/kernel.h> 8 #include <linux/mm.h> 9 #include <asm/compiler.h> 10 #include <asm/machvec.h> 11 #include <asm/hwrpb.h> 12 13 /* The generic header contains only prototypes. Including it ensures that 14 the implementation we have here matches that interface. */ 15 #include <asm-generic/iomap.h> 16 17 /* We don't use IO slowdowns on the Alpha, but.. */ 18 #define __SLOW_DOWN_IO do { } while (0) 19 #define SLOW_DOWN_IO do { } while (0) 20 21 /* 22 * Virtual -> physical identity mapping starts at this offset 23 */ 24 #ifdef USE_48_BIT_KSEG 25 #define IDENT_ADDR 0xffff800000000000UL 26 #else 27 #define IDENT_ADDR 0xfffffc0000000000UL 28 #endif 29 30 /* 31 * We try to avoid hae updates (thus the cache), but when we 32 * do need to update the hae, we need to do it atomically, so 33 * that any interrupts wouldn't get confused with the hae 34 * register not being up-to-date with respect to the hardware 35 * value. 36 */ 37 extern inline void __set_hae(unsigned long new_hae) 38 { 39 unsigned long flags = swpipl(IPL_MAX); 40 41 barrier(); 42 43 alpha_mv.hae_cache = new_hae; 44 *alpha_mv.hae_register = new_hae; 45 mb(); 46 /* Re-read to make sure it was written. */ 47 new_hae = *alpha_mv.hae_register; 48 49 setipl(flags); 50 barrier(); 51 } 52 53 extern inline void set_hae(unsigned long new_hae) 54 { 55 if (new_hae != alpha_mv.hae_cache) 56 __set_hae(new_hae); 57 } 58 59 /* 60 * Change virtual addresses to physical addresses and vv. 61 */ 62 #ifdef USE_48_BIT_KSEG 63 static inline unsigned long virt_to_phys(volatile void *address) 64 { 65 return (unsigned long)address - IDENT_ADDR; 66 } 67 68 static inline void * phys_to_virt(unsigned long address) 69 { 70 return (void *) (address + IDENT_ADDR); 71 } 72 #else 73 static inline unsigned long virt_to_phys(volatile void *address) 74 { 75 unsigned long phys = (unsigned long)address; 76 77 /* Sign-extend from bit 41. */ 78 phys <<= (64 - 41); 79 phys = (long)phys >> (64 - 41); 80 81 /* Crop to the physical address width of the processor. */ 82 phys &= (1ul << hwrpb->pa_bits) - 1; 83 84 return phys; 85 } 86 87 static inline void * phys_to_virt(unsigned long address) 88 { 89 return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1))); 90 } 91 #endif 92 93 #define virt_to_phys virt_to_phys 94 #define phys_to_virt phys_to_virt 95 #define page_to_phys(page) page_to_pa(page) 96 97 /* Maximum PIO space address supported? */ 98 #define IO_SPACE_LIMIT 0xffff 99 100 /* 101 * Change addresses as seen by the kernel (virtual) to addresses as 102 * seen by a device (bus), and vice versa. 103 * 104 * Note that this only works for a limited range of kernel addresses, 105 * and very well may not span all memory. Consider this interface 106 * deprecated in favour of the DMA-mapping API. 107 */ 108 extern unsigned long __direct_map_base; 109 extern unsigned long __direct_map_size; 110 111 static inline unsigned long __deprecated isa_virt_to_bus(volatile void *address) 112 { 113 unsigned long phys = virt_to_phys(address); 114 unsigned long bus = phys + __direct_map_base; 115 return phys <= __direct_map_size ? bus : 0; 116 } 117 #define isa_virt_to_bus isa_virt_to_bus 118 119 static inline void * __deprecated isa_bus_to_virt(unsigned long address) 120 { 121 void *virt; 122 123 /* This check is a sanity check but also ensures that bus address 0 124 maps to virtual address 0 which is useful to detect null pointers 125 (the NCR driver is much simpler if NULL pointers are preserved). */ 126 address -= __direct_map_base; 127 virt = phys_to_virt(address); 128 return (long)address <= 0 ? NULL : virt; 129 } 130 #define isa_bus_to_virt isa_bus_to_virt 131 132 /* 133 * There are different chipsets to interface the Alpha CPUs to the world. 134 */ 135 136 #define IO_CONCAT(a,b) _IO_CONCAT(a,b) 137 #define _IO_CONCAT(a,b) a ## _ ## b 138 139 #ifdef CONFIG_ALPHA_GENERIC 140 141 /* In a generic kernel, we always go through the machine vector. */ 142 143 #define REMAP1(TYPE, NAME, QUAL) \ 144 static inline TYPE generic_##NAME(QUAL void __iomem *addr) \ 145 { \ 146 return alpha_mv.mv_##NAME(addr); \ 147 } 148 149 #define REMAP2(TYPE, NAME, QUAL) \ 150 static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr) \ 151 { \ 152 alpha_mv.mv_##NAME(b, addr); \ 153 } 154 155 REMAP1(unsigned int, ioread8, const) 156 REMAP1(unsigned int, ioread16, const) 157 REMAP1(unsigned int, ioread32, const) 158 REMAP1(u8, readb, const volatile) 159 REMAP1(u16, readw, const volatile) 160 REMAP1(u32, readl, const volatile) 161 REMAP1(u64, readq, const volatile) 162 163 REMAP2(u8, iowrite8, /**/) 164 REMAP2(u16, iowrite16, /**/) 165 REMAP2(u32, iowrite32, /**/) 166 REMAP2(u8, writeb, volatile) 167 REMAP2(u16, writew, volatile) 168 REMAP2(u32, writel, volatile) 169 REMAP2(u64, writeq, volatile) 170 171 #undef REMAP1 172 #undef REMAP2 173 174 extern inline void __iomem *generic_ioportmap(unsigned long a) 175 { 176 return alpha_mv.mv_ioportmap(a); 177 } 178 179 static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s) 180 { 181 return alpha_mv.mv_ioremap(a, s); 182 } 183 184 static inline void generic_iounmap(volatile void __iomem *a) 185 { 186 return alpha_mv.mv_iounmap(a); 187 } 188 189 static inline int generic_is_ioaddr(unsigned long a) 190 { 191 return alpha_mv.mv_is_ioaddr(a); 192 } 193 194 static inline int generic_is_mmio(const volatile void __iomem *a) 195 { 196 return alpha_mv.mv_is_mmio(a); 197 } 198 199 #define __IO_PREFIX generic 200 #define generic_trivial_rw_bw 0 201 #define generic_trivial_rw_lq 0 202 #define generic_trivial_io_bw 0 203 #define generic_trivial_io_lq 0 204 #define generic_trivial_iounmap 0 205 206 #else 207 208 #if defined(CONFIG_ALPHA_APECS) 209 # include <asm/core_apecs.h> 210 #elif defined(CONFIG_ALPHA_CIA) 211 # include <asm/core_cia.h> 212 #elif defined(CONFIG_ALPHA_IRONGATE) 213 # include <asm/core_irongate.h> 214 #elif defined(CONFIG_ALPHA_JENSEN) 215 # include <asm/jensen.h> 216 #elif defined(CONFIG_ALPHA_LCA) 217 # include <asm/core_lca.h> 218 #elif defined(CONFIG_ALPHA_MARVEL) 219 # include <asm/core_marvel.h> 220 #elif defined(CONFIG_ALPHA_MCPCIA) 221 # include <asm/core_mcpcia.h> 222 #elif defined(CONFIG_ALPHA_POLARIS) 223 # include <asm/core_polaris.h> 224 #elif defined(CONFIG_ALPHA_T2) 225 # include <asm/core_t2.h> 226 #elif defined(CONFIG_ALPHA_TSUNAMI) 227 # include <asm/core_tsunami.h> 228 #elif defined(CONFIG_ALPHA_TITAN) 229 # include <asm/core_titan.h> 230 #elif defined(CONFIG_ALPHA_WILDFIRE) 231 # include <asm/core_wildfire.h> 232 #else 233 #error "What system is this?" 234 #endif 235 236 #endif /* GENERIC */ 237 238 /* 239 * We always have external versions of these routines. 240 */ 241 extern u8 inb(unsigned long port); 242 extern u16 inw(unsigned long port); 243 extern u32 inl(unsigned long port); 244 extern void outb(u8 b, unsigned long port); 245 extern void outw(u16 b, unsigned long port); 246 extern void outl(u32 b, unsigned long port); 247 #define inb inb 248 #define inw inw 249 #define inl inl 250 #define outb outb 251 #define outw outw 252 #define outl outl 253 254 extern u8 readb(const volatile void __iomem *addr); 255 extern u16 readw(const volatile void __iomem *addr); 256 extern u32 readl(const volatile void __iomem *addr); 257 extern u64 readq(const volatile void __iomem *addr); 258 extern void writeb(u8 b, volatile void __iomem *addr); 259 extern void writew(u16 b, volatile void __iomem *addr); 260 extern void writel(u32 b, volatile void __iomem *addr); 261 extern void writeq(u64 b, volatile void __iomem *addr); 262 #define readb readb 263 #define readw readw 264 #define readl readl 265 #define readq readq 266 #define writeb writeb 267 #define writew writew 268 #define writel writel 269 #define writeq writeq 270 271 extern u8 __raw_readb(const volatile void __iomem *addr); 272 extern u16 __raw_readw(const volatile void __iomem *addr); 273 extern u32 __raw_readl(const volatile void __iomem *addr); 274 extern u64 __raw_readq(const volatile void __iomem *addr); 275 extern void __raw_writeb(u8 b, volatile void __iomem *addr); 276 extern void __raw_writew(u16 b, volatile void __iomem *addr); 277 extern void __raw_writel(u32 b, volatile void __iomem *addr); 278 extern void __raw_writeq(u64 b, volatile void __iomem *addr); 279 #define __raw_readb __raw_readb 280 #define __raw_readw __raw_readw 281 #define __raw_readl __raw_readl 282 #define __raw_readq __raw_readq 283 #define __raw_writeb __raw_writeb 284 #define __raw_writew __raw_writew 285 #define __raw_writel __raw_writel 286 #define __raw_writeq __raw_writeq 287 288 /* 289 * Mapping from port numbers to __iomem space is pretty easy. 290 */ 291 292 /* These two have to be extern inline because of the extern prototype from 293 <asm-generic/iomap.h>. It is not legal to mix "extern" and "static" for 294 the same declaration. */ 295 extern inline void __iomem *ioport_map(unsigned long port, unsigned int size) 296 { 297 return IO_CONCAT(__IO_PREFIX,ioportmap) (port); 298 } 299 300 extern inline void ioport_unmap(void __iomem *addr) 301 { 302 } 303 304 #define ioport_map ioport_map 305 #define ioport_unmap ioport_unmap 306 307 static inline void __iomem *ioremap(unsigned long port, unsigned long size) 308 { 309 return IO_CONCAT(__IO_PREFIX,ioremap) (port, size); 310 } 311 312 #define ioremap_wc ioremap 313 #define ioremap_uc ioremap 314 315 static inline void iounmap(volatile void __iomem *addr) 316 { 317 IO_CONCAT(__IO_PREFIX,iounmap)(addr); 318 } 319 320 static inline int __is_ioaddr(unsigned long addr) 321 { 322 return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr); 323 } 324 #define __is_ioaddr(a) __is_ioaddr((unsigned long)(a)) 325 326 static inline int __is_mmio(const volatile void __iomem *addr) 327 { 328 return IO_CONCAT(__IO_PREFIX,is_mmio)(addr); 329 } 330 331 332 /* 333 * If the actual I/O bits are sufficiently trivial, then expand inline. 334 */ 335 336 #if IO_CONCAT(__IO_PREFIX,trivial_io_bw) 337 extern inline unsigned int ioread8(const void __iomem *addr) 338 { 339 unsigned int ret; 340 mb(); 341 ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr); 342 mb(); 343 return ret; 344 } 345 346 extern inline unsigned int ioread16(const void __iomem *addr) 347 { 348 unsigned int ret; 349 mb(); 350 ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr); 351 mb(); 352 return ret; 353 } 354 355 extern inline void iowrite8(u8 b, void __iomem *addr) 356 { 357 mb(); 358 IO_CONCAT(__IO_PREFIX, iowrite8)(b, addr); 359 } 360 361 extern inline void iowrite16(u16 b, void __iomem *addr) 362 { 363 mb(); 364 IO_CONCAT(__IO_PREFIX, iowrite16)(b, addr); 365 } 366 367 extern inline u8 inb(unsigned long port) 368 { 369 return ioread8(ioport_map(port, 1)); 370 } 371 372 extern inline u16 inw(unsigned long port) 373 { 374 return ioread16(ioport_map(port, 2)); 375 } 376 377 extern inline void outb(u8 b, unsigned long port) 378 { 379 iowrite8(b, ioport_map(port, 1)); 380 } 381 382 extern inline void outw(u16 b, unsigned long port) 383 { 384 iowrite16(b, ioport_map(port, 2)); 385 } 386 #endif 387 388 #define ioread8 ioread8 389 #define ioread16 ioread16 390 #define iowrite8 iowrite8 391 #define iowrite16 iowrite16 392 393 #if IO_CONCAT(__IO_PREFIX,trivial_io_lq) 394 extern inline unsigned int ioread32(const void __iomem *addr) 395 { 396 unsigned int ret; 397 mb(); 398 ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr); 399 mb(); 400 return ret; 401 } 402 403 extern inline void iowrite32(u32 b, void __iomem *addr) 404 { 405 mb(); 406 IO_CONCAT(__IO_PREFIX, iowrite32)(b, addr); 407 } 408 409 extern inline u32 inl(unsigned long port) 410 { 411 return ioread32(ioport_map(port, 4)); 412 } 413 414 extern inline void outl(u32 b, unsigned long port) 415 { 416 iowrite32(b, ioport_map(port, 4)); 417 } 418 #endif 419 420 #define ioread32 ioread32 421 #define iowrite32 iowrite32 422 423 #if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1 424 extern inline u8 __raw_readb(const volatile void __iomem *addr) 425 { 426 return IO_CONCAT(__IO_PREFIX,readb)(addr); 427 } 428 429 extern inline u16 __raw_readw(const volatile void __iomem *addr) 430 { 431 return IO_CONCAT(__IO_PREFIX,readw)(addr); 432 } 433 434 extern inline void __raw_writeb(u8 b, volatile void __iomem *addr) 435 { 436 IO_CONCAT(__IO_PREFIX,writeb)(b, addr); 437 } 438 439 extern inline void __raw_writew(u16 b, volatile void __iomem *addr) 440 { 441 IO_CONCAT(__IO_PREFIX,writew)(b, addr); 442 } 443 444 extern inline u8 readb(const volatile void __iomem *addr) 445 { 446 u8 ret; 447 mb(); 448 ret = __raw_readb(addr); 449 mb(); 450 return ret; 451 } 452 453 extern inline u16 readw(const volatile void __iomem *addr) 454 { 455 u16 ret; 456 mb(); 457 ret = __raw_readw(addr); 458 mb(); 459 return ret; 460 } 461 462 extern inline void writeb(u8 b, volatile void __iomem *addr) 463 { 464 mb(); 465 __raw_writeb(b, addr); 466 } 467 468 extern inline void writew(u16 b, volatile void __iomem *addr) 469 { 470 mb(); 471 __raw_writew(b, addr); 472 } 473 #endif 474 475 #if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1 476 extern inline u32 __raw_readl(const volatile void __iomem *addr) 477 { 478 return IO_CONCAT(__IO_PREFIX,readl)(addr); 479 } 480 481 extern inline u64 __raw_readq(const volatile void __iomem *addr) 482 { 483 return IO_CONCAT(__IO_PREFIX,readq)(addr); 484 } 485 486 extern inline void __raw_writel(u32 b, volatile void __iomem *addr) 487 { 488 IO_CONCAT(__IO_PREFIX,writel)(b, addr); 489 } 490 491 extern inline void __raw_writeq(u64 b, volatile void __iomem *addr) 492 { 493 IO_CONCAT(__IO_PREFIX,writeq)(b, addr); 494 } 495 496 extern inline u32 readl(const volatile void __iomem *addr) 497 { 498 u32 ret; 499 mb(); 500 ret = __raw_readl(addr); 501 mb(); 502 return ret; 503 } 504 505 extern inline u64 readq(const volatile void __iomem *addr) 506 { 507 u64 ret; 508 mb(); 509 ret = __raw_readq(addr); 510 mb(); 511 return ret; 512 } 513 514 extern inline void writel(u32 b, volatile void __iomem *addr) 515 { 516 mb(); 517 __raw_writel(b, addr); 518 } 519 520 extern inline void writeq(u64 b, volatile void __iomem *addr) 521 { 522 mb(); 523 __raw_writeq(b, addr); 524 } 525 #endif 526 527 #define ioread16be(p) swab16(ioread16(p)) 528 #define ioread32be(p) swab32(ioread32(p)) 529 #define iowrite16be(v,p) iowrite16(swab16(v), (p)) 530 #define iowrite32be(v,p) iowrite32(swab32(v), (p)) 531 532 #define inb_p inb 533 #define inw_p inw 534 #define inl_p inl 535 #define outb_p outb 536 #define outw_p outw 537 #define outl_p outl 538 539 extern u8 readb_relaxed(const volatile void __iomem *addr); 540 extern u16 readw_relaxed(const volatile void __iomem *addr); 541 extern u32 readl_relaxed(const volatile void __iomem *addr); 542 extern u64 readq_relaxed(const volatile void __iomem *addr); 543 #define readb_relaxed readb_relaxed 544 #define readw_relaxed readw_relaxed 545 #define readl_relaxed readl_relaxed 546 #define readq_relaxed readq_relaxed 547 548 #if IO_CONCAT(__IO_PREFIX,trivial_io_bw) 549 extern inline u8 readb_relaxed(const volatile void __iomem *addr) 550 { 551 mb(); 552 return __raw_readb(addr); 553 } 554 555 extern inline u16 readw_relaxed(const volatile void __iomem *addr) 556 { 557 mb(); 558 return __raw_readw(addr); 559 } 560 #endif 561 562 #if IO_CONCAT(__IO_PREFIX,trivial_io_lq) 563 extern inline u32 readl_relaxed(const volatile void __iomem *addr) 564 { 565 mb(); 566 return __raw_readl(addr); 567 } 568 569 extern inline u64 readq_relaxed(const volatile void __iomem *addr) 570 { 571 mb(); 572 return __raw_readq(addr); 573 } 574 #endif 575 576 #define writeb_relaxed writeb 577 #define writew_relaxed writew 578 #define writel_relaxed writel 579 #define writeq_relaxed writeq 580 581 /* 582 * String version of IO memory access ops: 583 */ 584 extern void memcpy_fromio(void *, const volatile void __iomem *, long); 585 extern void memcpy_toio(volatile void __iomem *, const void *, long); 586 extern void _memset_c_io(volatile void __iomem *, unsigned long, long); 587 588 static inline void memset_io(volatile void __iomem *addr, u8 c, long len) 589 { 590 _memset_c_io(addr, 0x0101010101010101UL * c, len); 591 } 592 593 #define __HAVE_ARCH_MEMSETW_IO 594 static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len) 595 { 596 _memset_c_io(addr, 0x0001000100010001UL * c, len); 597 } 598 599 #define memset_io memset_io 600 #define memcpy_fromio memcpy_fromio 601 #define memcpy_toio memcpy_toio 602 603 /* 604 * String versions of in/out ops: 605 */ 606 extern void insb (unsigned long port, void *dst, unsigned long count); 607 extern void insw (unsigned long port, void *dst, unsigned long count); 608 extern void insl (unsigned long port, void *dst, unsigned long count); 609 extern void outsb (unsigned long port, const void *src, unsigned long count); 610 extern void outsw (unsigned long port, const void *src, unsigned long count); 611 extern void outsl (unsigned long port, const void *src, unsigned long count); 612 613 #define insb insb 614 #define insw insw 615 #define insl insl 616 #define outsb outsb 617 #define outsw outsw 618 #define outsl outsl 619 620 /* 621 * The Alpha Jensen hardware for some rather strange reason puts 622 * the RTC clock at 0x170 instead of 0x70. Probably due to some 623 * misguided idea about using 0x70 for NMI stuff. 624 * 625 * These defines will override the defaults when doing RTC queries 626 */ 627 628 #ifdef CONFIG_ALPHA_GENERIC 629 # define RTC_PORT(x) ((x) + alpha_mv.rtc_port) 630 #else 631 # ifdef CONFIG_ALPHA_JENSEN 632 # define RTC_PORT(x) (0x170+(x)) 633 # else 634 # define RTC_PORT(x) (0x70 + (x)) 635 # endif 636 #endif 637 #define RTC_ALWAYS_BCD 0 638 639 /* 640 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 641 * access 642 */ 643 #define xlate_dev_mem_ptr(p) __va(p) 644 645 /* 646 * These get provided from <asm-generic/iomap.h> since alpha does not 647 * select GENERIC_IOMAP. 648 */ 649 #define ioread64 ioread64 650 #define iowrite64 iowrite64 651 #define ioread64be ioread64be 652 #define iowrite64be iowrite64be 653 #define ioread8_rep ioread8_rep 654 #define ioread16_rep ioread16_rep 655 #define ioread32_rep ioread32_rep 656 #define iowrite8_rep iowrite8_rep 657 #define iowrite16_rep iowrite16_rep 658 #define iowrite32_rep iowrite32_rep 659 #define pci_iounmap pci_iounmap 660 661 #include <asm-generic/io.h> 662 663 #endif /* __KERNEL__ */ 664 665 #endif /* __ALPHA_IO_H */ 666