1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* Generic I/O port emulation. 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 #ifndef __ASM_GENERIC_IO_H 8 #define __ASM_GENERIC_IO_H 9 10 #include <asm/page.h> /* I/O is all done through memory accesses */ 11 #include <linux/string.h> /* for memset() and memcpy() */ 12 #include <linux/types.h> 13 14 #ifdef CONFIG_GENERIC_IOMAP 15 #include <asm-generic/iomap.h> 16 #endif 17 18 #include <asm/mmiowb.h> 19 #include <asm-generic/pci_iomap.h> 20 21 #ifndef __io_br 22 #define __io_br() barrier() 23 #endif 24 25 /* prevent prefetching of coherent DMA data ahead of a dma-complete */ 26 #ifndef __io_ar 27 #ifdef rmb 28 #define __io_ar(v) rmb() 29 #else 30 #define __io_ar(v) barrier() 31 #endif 32 #endif 33 34 /* flush writes to coherent DMA data before possibly triggering a DMA read */ 35 #ifndef __io_bw 36 #ifdef wmb 37 #define __io_bw() wmb() 38 #else 39 #define __io_bw() barrier() 40 #endif 41 #endif 42 43 /* serialize device access against a spin_unlock, usually handled there. */ 44 #ifndef __io_aw 45 #define __io_aw() mmiowb_set_pending() 46 #endif 47 48 #ifndef __io_pbw 49 #define __io_pbw() __io_bw() 50 #endif 51 52 #ifndef __io_paw 53 #define __io_paw() __io_aw() 54 #endif 55 56 #ifndef __io_pbr 57 #define __io_pbr() __io_br() 58 #endif 59 60 #ifndef __io_par 61 #define __io_par(v) __io_ar(v) 62 #endif 63 64 65 /* 66 * __raw_{read,write}{b,w,l,q}() access memory in native endianness. 67 * 68 * On some architectures memory mapped IO needs to be accessed differently. 69 * On the simple architectures, we just read/write the memory location 70 * directly. 71 */ 72 73 #ifndef __raw_readb 74 #define __raw_readb __raw_readb 75 static inline u8 __raw_readb(const volatile void __iomem *addr) 76 { 77 return *(const volatile u8 __force *)addr; 78 } 79 #endif 80 81 #ifndef __raw_readw 82 #define __raw_readw __raw_readw 83 static inline u16 __raw_readw(const volatile void __iomem *addr) 84 { 85 return *(const volatile u16 __force *)addr; 86 } 87 #endif 88 89 #ifndef __raw_readl 90 #define __raw_readl __raw_readl 91 static inline u32 __raw_readl(const volatile void __iomem *addr) 92 { 93 return *(const volatile u32 __force *)addr; 94 } 95 #endif 96 97 #ifdef CONFIG_64BIT 98 #ifndef __raw_readq 99 #define __raw_readq __raw_readq 100 static inline u64 __raw_readq(const volatile void __iomem *addr) 101 { 102 return *(const volatile u64 __force *)addr; 103 } 104 #endif 105 #endif /* CONFIG_64BIT */ 106 107 #ifndef __raw_writeb 108 #define __raw_writeb __raw_writeb 109 static inline void __raw_writeb(u8 value, volatile void __iomem *addr) 110 { 111 *(volatile u8 __force *)addr = value; 112 } 113 #endif 114 115 #ifndef __raw_writew 116 #define __raw_writew __raw_writew 117 static inline void __raw_writew(u16 value, volatile void __iomem *addr) 118 { 119 *(volatile u16 __force *)addr = value; 120 } 121 #endif 122 123 #ifndef __raw_writel 124 #define __raw_writel __raw_writel 125 static inline void __raw_writel(u32 value, volatile void __iomem *addr) 126 { 127 *(volatile u32 __force *)addr = value; 128 } 129 #endif 130 131 #ifdef CONFIG_64BIT 132 #ifndef __raw_writeq 133 #define __raw_writeq __raw_writeq 134 static inline void __raw_writeq(u64 value, volatile void __iomem *addr) 135 { 136 *(volatile u64 __force *)addr = value; 137 } 138 #endif 139 #endif /* CONFIG_64BIT */ 140 141 /* 142 * {read,write}{b,w,l,q}() access little endian memory and return result in 143 * native endianness. 144 */ 145 146 #ifndef readb 147 #define readb readb 148 static inline u8 readb(const volatile void __iomem *addr) 149 { 150 u8 val; 151 152 __io_br(); 153 val = __raw_readb(addr); 154 __io_ar(val); 155 return val; 156 } 157 #endif 158 159 #ifndef readw 160 #define readw readw 161 static inline u16 readw(const volatile void __iomem *addr) 162 { 163 u16 val; 164 165 __io_br(); 166 val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); 167 __io_ar(val); 168 return val; 169 } 170 #endif 171 172 #ifndef readl 173 #define readl readl 174 static inline u32 readl(const volatile void __iomem *addr) 175 { 176 u32 val; 177 178 __io_br(); 179 val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); 180 __io_ar(val); 181 return val; 182 } 183 #endif 184 185 #ifdef CONFIG_64BIT 186 #ifndef readq 187 #define readq readq 188 static inline u64 readq(const volatile void __iomem *addr) 189 { 190 u64 val; 191 192 __io_br(); 193 val = __le64_to_cpu(__raw_readq(addr)); 194 __io_ar(val); 195 return val; 196 } 197 #endif 198 #endif /* CONFIG_64BIT */ 199 200 #ifndef writeb 201 #define writeb writeb 202 static inline void writeb(u8 value, volatile void __iomem *addr) 203 { 204 __io_bw(); 205 __raw_writeb(value, addr); 206 __io_aw(); 207 } 208 #endif 209 210 #ifndef writew 211 #define writew writew 212 static inline void writew(u16 value, volatile void __iomem *addr) 213 { 214 __io_bw(); 215 __raw_writew((u16 __force)cpu_to_le16(value), addr); 216 __io_aw(); 217 } 218 #endif 219 220 #ifndef writel 221 #define writel writel 222 static inline void writel(u32 value, volatile void __iomem *addr) 223 { 224 __io_bw(); 225 __raw_writel((u32 __force)__cpu_to_le32(value), addr); 226 __io_aw(); 227 } 228 #endif 229 230 #ifdef CONFIG_64BIT 231 #ifndef writeq 232 #define writeq writeq 233 static inline void writeq(u64 value, volatile void __iomem *addr) 234 { 235 __io_bw(); 236 __raw_writeq(__cpu_to_le64(value), addr); 237 __io_aw(); 238 } 239 #endif 240 #endif /* CONFIG_64BIT */ 241 242 /* 243 * {read,write}{b,w,l,q}_relaxed() are like the regular version, but 244 * are not guaranteed to provide ordering against spinlocks or memory 245 * accesses. 246 */ 247 #ifndef readb_relaxed 248 #define readb_relaxed readb_relaxed 249 static inline u8 readb_relaxed(const volatile void __iomem *addr) 250 { 251 return __raw_readb(addr); 252 } 253 #endif 254 255 #ifndef readw_relaxed 256 #define readw_relaxed readw_relaxed 257 static inline u16 readw_relaxed(const volatile void __iomem *addr) 258 { 259 return __le16_to_cpu(__raw_readw(addr)); 260 } 261 #endif 262 263 #ifndef readl_relaxed 264 #define readl_relaxed readl_relaxed 265 static inline u32 readl_relaxed(const volatile void __iomem *addr) 266 { 267 return __le32_to_cpu(__raw_readl(addr)); 268 } 269 #endif 270 271 #if defined(readq) && !defined(readq_relaxed) 272 #define readq_relaxed readq_relaxed 273 static inline u64 readq_relaxed(const volatile void __iomem *addr) 274 { 275 return __le64_to_cpu(__raw_readq(addr)); 276 } 277 #endif 278 279 #ifndef writeb_relaxed 280 #define writeb_relaxed writeb_relaxed 281 static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) 282 { 283 __raw_writeb(value, addr); 284 } 285 #endif 286 287 #ifndef writew_relaxed 288 #define writew_relaxed writew_relaxed 289 static inline void writew_relaxed(u16 value, volatile void __iomem *addr) 290 { 291 __raw_writew(cpu_to_le16(value), addr); 292 } 293 #endif 294 295 #ifndef writel_relaxed 296 #define writel_relaxed writel_relaxed 297 static inline void writel_relaxed(u32 value, volatile void __iomem *addr) 298 { 299 __raw_writel(__cpu_to_le32(value), addr); 300 } 301 #endif 302 303 #if defined(writeq) && !defined(writeq_relaxed) 304 #define writeq_relaxed writeq_relaxed 305 static inline void writeq_relaxed(u64 value, volatile void __iomem *addr) 306 { 307 __raw_writeq(__cpu_to_le64(value), addr); 308 } 309 #endif 310 311 /* 312 * {read,write}s{b,w,l,q}() repeatedly access the same memory address in 313 * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times). 314 */ 315 #ifndef readsb 316 #define readsb readsb 317 static inline void readsb(const volatile void __iomem *addr, void *buffer, 318 unsigned int count) 319 { 320 if (count) { 321 u8 *buf = buffer; 322 323 do { 324 u8 x = __raw_readb(addr); 325 *buf++ = x; 326 } while (--count); 327 } 328 } 329 #endif 330 331 #ifndef readsw 332 #define readsw readsw 333 static inline void readsw(const volatile void __iomem *addr, void *buffer, 334 unsigned int count) 335 { 336 if (count) { 337 u16 *buf = buffer; 338 339 do { 340 u16 x = __raw_readw(addr); 341 *buf++ = x; 342 } while (--count); 343 } 344 } 345 #endif 346 347 #ifndef readsl 348 #define readsl readsl 349 static inline void readsl(const volatile void __iomem *addr, void *buffer, 350 unsigned int count) 351 { 352 if (count) { 353 u32 *buf = buffer; 354 355 do { 356 u32 x = __raw_readl(addr); 357 *buf++ = x; 358 } while (--count); 359 } 360 } 361 #endif 362 363 #ifdef CONFIG_64BIT 364 #ifndef readsq 365 #define readsq readsq 366 static inline void readsq(const volatile void __iomem *addr, void *buffer, 367 unsigned int count) 368 { 369 if (count) { 370 u64 *buf = buffer; 371 372 do { 373 u64 x = __raw_readq(addr); 374 *buf++ = x; 375 } while (--count); 376 } 377 } 378 #endif 379 #endif /* CONFIG_64BIT */ 380 381 #ifndef writesb 382 #define writesb writesb 383 static inline void writesb(volatile void __iomem *addr, const void *buffer, 384 unsigned int count) 385 { 386 if (count) { 387 const u8 *buf = buffer; 388 389 do { 390 __raw_writeb(*buf++, addr); 391 } while (--count); 392 } 393 } 394 #endif 395 396 #ifndef writesw 397 #define writesw writesw 398 static inline void writesw(volatile void __iomem *addr, const void *buffer, 399 unsigned int count) 400 { 401 if (count) { 402 const u16 *buf = buffer; 403 404 do { 405 __raw_writew(*buf++, addr); 406 } while (--count); 407 } 408 } 409 #endif 410 411 #ifndef writesl 412 #define writesl writesl 413 static inline void writesl(volatile void __iomem *addr, const void *buffer, 414 unsigned int count) 415 { 416 if (count) { 417 const u32 *buf = buffer; 418 419 do { 420 __raw_writel(*buf++, addr); 421 } while (--count); 422 } 423 } 424 #endif 425 426 #ifdef CONFIG_64BIT 427 #ifndef writesq 428 #define writesq writesq 429 static inline void writesq(volatile void __iomem *addr, const void *buffer, 430 unsigned int count) 431 { 432 if (count) { 433 const u64 *buf = buffer; 434 435 do { 436 __raw_writeq(*buf++, addr); 437 } while (--count); 438 } 439 } 440 #endif 441 #endif /* CONFIG_64BIT */ 442 443 #ifndef PCI_IOBASE 444 #define PCI_IOBASE ((void __iomem *)0) 445 #endif 446 447 #ifndef IO_SPACE_LIMIT 448 #define IO_SPACE_LIMIT 0xffff 449 #endif 450 451 /* 452 * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be 453 * implemented on hardware that needs an additional delay for I/O accesses to 454 * take effect. 455 */ 456 457 #if !defined(inb) && !defined(_inb) 458 #define _inb _inb 459 static inline u8 _inb(unsigned long addr) 460 { 461 u8 val; 462 463 __io_pbr(); 464 val = __raw_readb(PCI_IOBASE + addr); 465 __io_par(val); 466 return val; 467 } 468 #endif 469 470 #if !defined(inw) && !defined(_inw) 471 #define _inw _inw 472 static inline u16 _inw(unsigned long addr) 473 { 474 u16 val; 475 476 __io_pbr(); 477 val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr)); 478 __io_par(val); 479 return val; 480 } 481 #endif 482 483 #if !defined(inl) && !defined(_inl) 484 #define _inl _inl 485 static inline u32 _inl(unsigned long addr) 486 { 487 u32 val; 488 489 __io_pbr(); 490 val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr)); 491 __io_par(val); 492 return val; 493 } 494 #endif 495 496 #if !defined(outb) && !defined(_outb) 497 #define _outb _outb 498 static inline void _outb(u8 value, unsigned long addr) 499 { 500 __io_pbw(); 501 __raw_writeb(value, PCI_IOBASE + addr); 502 __io_paw(); 503 } 504 #endif 505 506 #if !defined(outw) && !defined(_outw) 507 #define _outw _outw 508 static inline void _outw(u16 value, unsigned long addr) 509 { 510 __io_pbw(); 511 __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr); 512 __io_paw(); 513 } 514 #endif 515 516 #if !defined(outl) && !defined(_outl) 517 #define _outl _outl 518 static inline void _outl(u32 value, unsigned long addr) 519 { 520 __io_pbw(); 521 __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr); 522 __io_paw(); 523 } 524 #endif 525 526 #include <linux/logic_pio.h> 527 528 #ifndef inb 529 #define inb _inb 530 #endif 531 532 #ifndef inw 533 #define inw _inw 534 #endif 535 536 #ifndef inl 537 #define inl _inl 538 #endif 539 540 #ifndef outb 541 #define outb _outb 542 #endif 543 544 #ifndef outw 545 #define outw _outw 546 #endif 547 548 #ifndef outl 549 #define outl _outl 550 #endif 551 552 #ifndef inb_p 553 #define inb_p inb_p 554 static inline u8 inb_p(unsigned long addr) 555 { 556 return inb(addr); 557 } 558 #endif 559 560 #ifndef inw_p 561 #define inw_p inw_p 562 static inline u16 inw_p(unsigned long addr) 563 { 564 return inw(addr); 565 } 566 #endif 567 568 #ifndef inl_p 569 #define inl_p inl_p 570 static inline u32 inl_p(unsigned long addr) 571 { 572 return inl(addr); 573 } 574 #endif 575 576 #ifndef outb_p 577 #define outb_p outb_p 578 static inline void outb_p(u8 value, unsigned long addr) 579 { 580 outb(value, addr); 581 } 582 #endif 583 584 #ifndef outw_p 585 #define outw_p outw_p 586 static inline void outw_p(u16 value, unsigned long addr) 587 { 588 outw(value, addr); 589 } 590 #endif 591 592 #ifndef outl_p 593 #define outl_p outl_p 594 static inline void outl_p(u32 value, unsigned long addr) 595 { 596 outl(value, addr); 597 } 598 #endif 599 600 /* 601 * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a 602 * single I/O port multiple times. 603 */ 604 605 #ifndef insb 606 #define insb insb 607 static inline void insb(unsigned long addr, void *buffer, unsigned int count) 608 { 609 readsb(PCI_IOBASE + addr, buffer, count); 610 } 611 #endif 612 613 #ifndef insw 614 #define insw insw 615 static inline void insw(unsigned long addr, void *buffer, unsigned int count) 616 { 617 readsw(PCI_IOBASE + addr, buffer, count); 618 } 619 #endif 620 621 #ifndef insl 622 #define insl insl 623 static inline void insl(unsigned long addr, void *buffer, unsigned int count) 624 { 625 readsl(PCI_IOBASE + addr, buffer, count); 626 } 627 #endif 628 629 #ifndef outsb 630 #define outsb outsb 631 static inline void outsb(unsigned long addr, const void *buffer, 632 unsigned int count) 633 { 634 writesb(PCI_IOBASE + addr, buffer, count); 635 } 636 #endif 637 638 #ifndef outsw 639 #define outsw outsw 640 static inline void outsw(unsigned long addr, const void *buffer, 641 unsigned int count) 642 { 643 writesw(PCI_IOBASE + addr, buffer, count); 644 } 645 #endif 646 647 #ifndef outsl 648 #define outsl outsl 649 static inline void outsl(unsigned long addr, const void *buffer, 650 unsigned int count) 651 { 652 writesl(PCI_IOBASE + addr, buffer, count); 653 } 654 #endif 655 656 #ifndef insb_p 657 #define insb_p insb_p 658 static inline void insb_p(unsigned long addr, void *buffer, unsigned int count) 659 { 660 insb(addr, buffer, count); 661 } 662 #endif 663 664 #ifndef insw_p 665 #define insw_p insw_p 666 static inline void insw_p(unsigned long addr, void *buffer, unsigned int count) 667 { 668 insw(addr, buffer, count); 669 } 670 #endif 671 672 #ifndef insl_p 673 #define insl_p insl_p 674 static inline void insl_p(unsigned long addr, void *buffer, unsigned int count) 675 { 676 insl(addr, buffer, count); 677 } 678 #endif 679 680 #ifndef outsb_p 681 #define outsb_p outsb_p 682 static inline void outsb_p(unsigned long addr, const void *buffer, 683 unsigned int count) 684 { 685 outsb(addr, buffer, count); 686 } 687 #endif 688 689 #ifndef outsw_p 690 #define outsw_p outsw_p 691 static inline void outsw_p(unsigned long addr, const void *buffer, 692 unsigned int count) 693 { 694 outsw(addr, buffer, count); 695 } 696 #endif 697 698 #ifndef outsl_p 699 #define outsl_p outsl_p 700 static inline void outsl_p(unsigned long addr, const void *buffer, 701 unsigned int count) 702 { 703 outsl(addr, buffer, count); 704 } 705 #endif 706 707 #ifndef CONFIG_GENERIC_IOMAP 708 #ifndef ioread8 709 #define ioread8 ioread8 710 static inline u8 ioread8(const volatile void __iomem *addr) 711 { 712 return readb(addr); 713 } 714 #endif 715 716 #ifndef ioread16 717 #define ioread16 ioread16 718 static inline u16 ioread16(const volatile void __iomem *addr) 719 { 720 return readw(addr); 721 } 722 #endif 723 724 #ifndef ioread32 725 #define ioread32 ioread32 726 static inline u32 ioread32(const volatile void __iomem *addr) 727 { 728 return readl(addr); 729 } 730 #endif 731 732 #ifdef CONFIG_64BIT 733 #ifndef ioread64 734 #define ioread64 ioread64 735 static inline u64 ioread64(const volatile void __iomem *addr) 736 { 737 return readq(addr); 738 } 739 #endif 740 #endif /* CONFIG_64BIT */ 741 742 #ifndef iowrite8 743 #define iowrite8 iowrite8 744 static inline void iowrite8(u8 value, volatile void __iomem *addr) 745 { 746 writeb(value, addr); 747 } 748 #endif 749 750 #ifndef iowrite16 751 #define iowrite16 iowrite16 752 static inline void iowrite16(u16 value, volatile void __iomem *addr) 753 { 754 writew(value, addr); 755 } 756 #endif 757 758 #ifndef iowrite32 759 #define iowrite32 iowrite32 760 static inline void iowrite32(u32 value, volatile void __iomem *addr) 761 { 762 writel(value, addr); 763 } 764 #endif 765 766 #ifdef CONFIG_64BIT 767 #ifndef iowrite64 768 #define iowrite64 iowrite64 769 static inline void iowrite64(u64 value, volatile void __iomem *addr) 770 { 771 writeq(value, addr); 772 } 773 #endif 774 #endif /* CONFIG_64BIT */ 775 776 #ifndef ioread16be 777 #define ioread16be ioread16be 778 static inline u16 ioread16be(const volatile void __iomem *addr) 779 { 780 return swab16(readw(addr)); 781 } 782 #endif 783 784 #ifndef ioread32be 785 #define ioread32be ioread32be 786 static inline u32 ioread32be(const volatile void __iomem *addr) 787 { 788 return swab32(readl(addr)); 789 } 790 #endif 791 792 #ifdef CONFIG_64BIT 793 #ifndef ioread64be 794 #define ioread64be ioread64be 795 static inline u64 ioread64be(const volatile void __iomem *addr) 796 { 797 return swab64(readq(addr)); 798 } 799 #endif 800 #endif /* CONFIG_64BIT */ 801 802 #ifndef iowrite16be 803 #define iowrite16be iowrite16be 804 static inline void iowrite16be(u16 value, void volatile __iomem *addr) 805 { 806 writew(swab16(value), addr); 807 } 808 #endif 809 810 #ifndef iowrite32be 811 #define iowrite32be iowrite32be 812 static inline void iowrite32be(u32 value, volatile void __iomem *addr) 813 { 814 writel(swab32(value), addr); 815 } 816 #endif 817 818 #ifdef CONFIG_64BIT 819 #ifndef iowrite64be 820 #define iowrite64be iowrite64be 821 static inline void iowrite64be(u64 value, volatile void __iomem *addr) 822 { 823 writeq(swab64(value), addr); 824 } 825 #endif 826 #endif /* CONFIG_64BIT */ 827 828 #ifndef ioread8_rep 829 #define ioread8_rep ioread8_rep 830 static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, 831 unsigned int count) 832 { 833 readsb(addr, buffer, count); 834 } 835 #endif 836 837 #ifndef ioread16_rep 838 #define ioread16_rep ioread16_rep 839 static inline void ioread16_rep(const volatile void __iomem *addr, 840 void *buffer, unsigned int count) 841 { 842 readsw(addr, buffer, count); 843 } 844 #endif 845 846 #ifndef ioread32_rep 847 #define ioread32_rep ioread32_rep 848 static inline void ioread32_rep(const volatile void __iomem *addr, 849 void *buffer, unsigned int count) 850 { 851 readsl(addr, buffer, count); 852 } 853 #endif 854 855 #ifdef CONFIG_64BIT 856 #ifndef ioread64_rep 857 #define ioread64_rep ioread64_rep 858 static inline void ioread64_rep(const volatile void __iomem *addr, 859 void *buffer, unsigned int count) 860 { 861 readsq(addr, buffer, count); 862 } 863 #endif 864 #endif /* CONFIG_64BIT */ 865 866 #ifndef iowrite8_rep 867 #define iowrite8_rep iowrite8_rep 868 static inline void iowrite8_rep(volatile void __iomem *addr, 869 const void *buffer, 870 unsigned int count) 871 { 872 writesb(addr, buffer, count); 873 } 874 #endif 875 876 #ifndef iowrite16_rep 877 #define iowrite16_rep iowrite16_rep 878 static inline void iowrite16_rep(volatile void __iomem *addr, 879 const void *buffer, 880 unsigned int count) 881 { 882 writesw(addr, buffer, count); 883 } 884 #endif 885 886 #ifndef iowrite32_rep 887 #define iowrite32_rep iowrite32_rep 888 static inline void iowrite32_rep(volatile void __iomem *addr, 889 const void *buffer, 890 unsigned int count) 891 { 892 writesl(addr, buffer, count); 893 } 894 #endif 895 896 #ifdef CONFIG_64BIT 897 #ifndef iowrite64_rep 898 #define iowrite64_rep iowrite64_rep 899 static inline void iowrite64_rep(volatile void __iomem *addr, 900 const void *buffer, 901 unsigned int count) 902 { 903 writesq(addr, buffer, count); 904 } 905 #endif 906 #endif /* CONFIG_64BIT */ 907 #endif /* CONFIG_GENERIC_IOMAP */ 908 909 #ifdef __KERNEL__ 910 911 #include <linux/vmalloc.h> 912 #define __io_virt(x) ((void __force *)(x)) 913 914 /* 915 * Change virtual addresses to physical addresses and vv. 916 * These are pretty trivial 917 */ 918 #ifndef virt_to_phys 919 #define virt_to_phys virt_to_phys 920 static inline unsigned long virt_to_phys(volatile void *address) 921 { 922 return __pa((unsigned long)address); 923 } 924 #endif 925 926 #ifndef phys_to_virt 927 #define phys_to_virt phys_to_virt 928 static inline void *phys_to_virt(unsigned long address) 929 { 930 return __va(address); 931 } 932 #endif 933 934 /** 935 * DOC: ioremap() and ioremap_*() variants 936 * 937 * Architectures with an MMU are expected to provide ioremap() and iounmap() 938 * themselves or rely on GENERIC_IOREMAP. For NOMMU architectures we provide 939 * a default nop-op implementation that expect that the physical address used 940 * for MMIO are already marked as uncached, and can be used as kernel virtual 941 * addresses. 942 * 943 * ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes 944 * for specific drivers if the architecture choses to implement them. If they 945 * are not implemented we fall back to plain ioremap. Conversely, ioremap_np() 946 * can provide stricter non-posted write semantics if the architecture 947 * implements them. 948 */ 949 #ifndef CONFIG_MMU 950 #ifndef ioremap 951 #define ioremap ioremap 952 static inline void __iomem *ioremap(phys_addr_t offset, size_t size) 953 { 954 return (void __iomem *)(unsigned long)offset; 955 } 956 #endif 957 958 #ifndef iounmap 959 #define iounmap iounmap 960 static inline void iounmap(void __iomem *addr) 961 { 962 } 963 #endif 964 #elif defined(CONFIG_GENERIC_IOREMAP) 965 #include <linux/pgtable.h> 966 967 void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot); 968 void iounmap(volatile void __iomem *addr); 969 970 static inline void __iomem *ioremap(phys_addr_t addr, size_t size) 971 { 972 /* _PAGE_IOREMAP needs to be supplied by the architecture */ 973 return ioremap_prot(addr, size, _PAGE_IOREMAP); 974 } 975 #endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */ 976 977 #ifndef ioremap_wc 978 #define ioremap_wc ioremap 979 #endif 980 981 #ifndef ioremap_wt 982 #define ioremap_wt ioremap 983 #endif 984 985 /* 986 * ioremap_uc is special in that we do require an explicit architecture 987 * implementation. In general you do not want to use this function in a 988 * driver and use plain ioremap, which is uncached by default. Similarly 989 * architectures should not implement it unless they have a very good 990 * reason. 991 */ 992 #ifndef ioremap_uc 993 #define ioremap_uc ioremap_uc 994 static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) 995 { 996 return NULL; 997 } 998 #endif 999 1000 /* 1001 * ioremap_np needs an explicit architecture implementation, as it 1002 * requests stronger semantics than regular ioremap(). Portable drivers 1003 * should instead use one of the higher-level abstractions, like 1004 * devm_ioremap_resource(), to choose the correct variant for any given 1005 * device and bus. Portable drivers with a good reason to want non-posted 1006 * write semantics should always provide an ioremap() fallback in case 1007 * ioremap_np() is not available. 1008 */ 1009 #ifndef ioremap_np 1010 #define ioremap_np ioremap_np 1011 static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size) 1012 { 1013 return NULL; 1014 } 1015 #endif 1016 1017 #ifdef CONFIG_HAS_IOPORT_MAP 1018 #ifndef CONFIG_GENERIC_IOMAP 1019 #ifndef ioport_map 1020 #define ioport_map ioport_map 1021 static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) 1022 { 1023 port &= IO_SPACE_LIMIT; 1024 return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port; 1025 } 1026 #define __pci_ioport_unmap __pci_ioport_unmap 1027 static inline void __pci_ioport_unmap(void __iomem *p) 1028 { 1029 uintptr_t start = (uintptr_t) PCI_IOBASE; 1030 uintptr_t addr = (uintptr_t) p; 1031 1032 if (addr >= start && addr < start + IO_SPACE_LIMIT) 1033 return; 1034 iounmap(p); 1035 } 1036 #endif 1037 1038 #ifndef ioport_unmap 1039 #define ioport_unmap ioport_unmap 1040 static inline void ioport_unmap(void __iomem *p) 1041 { 1042 } 1043 #endif 1044 #else /* CONFIG_GENERIC_IOMAP */ 1045 extern void __iomem *ioport_map(unsigned long port, unsigned int nr); 1046 extern void ioport_unmap(void __iomem *p); 1047 #endif /* CONFIG_GENERIC_IOMAP */ 1048 #endif /* CONFIG_HAS_IOPORT_MAP */ 1049 1050 #ifndef CONFIG_GENERIC_IOMAP 1051 struct pci_dev; 1052 extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); 1053 1054 #ifndef __pci_ioport_unmap 1055 static inline void __pci_ioport_unmap(void __iomem *p) {} 1056 #endif 1057 1058 #ifndef pci_iounmap 1059 #define pci_iounmap pci_iounmap 1060 static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) 1061 { 1062 __pci_ioport_unmap(p); 1063 } 1064 #endif 1065 #endif /* CONFIG_GENERIC_IOMAP */ 1066 1067 /* 1068 * Convert a virtual cached pointer to an uncached pointer 1069 */ 1070 #ifndef xlate_dev_kmem_ptr 1071 #define xlate_dev_kmem_ptr xlate_dev_kmem_ptr 1072 static inline void *xlate_dev_kmem_ptr(void *addr) 1073 { 1074 return addr; 1075 } 1076 #endif 1077 1078 #ifndef xlate_dev_mem_ptr 1079 #define xlate_dev_mem_ptr xlate_dev_mem_ptr 1080 static inline void *xlate_dev_mem_ptr(phys_addr_t addr) 1081 { 1082 return __va(addr); 1083 } 1084 #endif 1085 1086 #ifndef unxlate_dev_mem_ptr 1087 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr 1088 static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) 1089 { 1090 } 1091 #endif 1092 1093 #ifdef CONFIG_VIRT_TO_BUS 1094 #ifndef virt_to_bus 1095 static inline unsigned long virt_to_bus(void *address) 1096 { 1097 return (unsigned long)address; 1098 } 1099 1100 static inline void *bus_to_virt(unsigned long address) 1101 { 1102 return (void *)address; 1103 } 1104 #endif 1105 #endif 1106 1107 #ifndef memset_io 1108 #define memset_io memset_io 1109 /** 1110 * memset_io Set a range of I/O memory to a constant value 1111 * @addr: The beginning of the I/O-memory range to set 1112 * @val: The value to set the memory to 1113 * @count: The number of bytes to set 1114 * 1115 * Set a range of I/O memory to a given value. 1116 */ 1117 static inline void memset_io(volatile void __iomem *addr, int value, 1118 size_t size) 1119 { 1120 memset(__io_virt(addr), value, size); 1121 } 1122 #endif 1123 1124 #ifndef memcpy_fromio 1125 #define memcpy_fromio memcpy_fromio 1126 /** 1127 * memcpy_fromio Copy a block of data from I/O memory 1128 * @dst: The (RAM) destination for the copy 1129 * @src: The (I/O memory) source for the data 1130 * @count: The number of bytes to copy 1131 * 1132 * Copy a block of data from I/O memory. 1133 */ 1134 static inline void memcpy_fromio(void *buffer, 1135 const volatile void __iomem *addr, 1136 size_t size) 1137 { 1138 memcpy(buffer, __io_virt(addr), size); 1139 } 1140 #endif 1141 1142 #ifndef memcpy_toio 1143 #define memcpy_toio memcpy_toio 1144 /** 1145 * memcpy_toio Copy a block of data into I/O memory 1146 * @dst: The (I/O memory) destination for the copy 1147 * @src: The (RAM) source for the data 1148 * @count: The number of bytes to copy 1149 * 1150 * Copy a block of data to I/O memory. 1151 */ 1152 static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, 1153 size_t size) 1154 { 1155 memcpy(__io_virt(addr), buffer, size); 1156 } 1157 #endif 1158 1159 #ifndef CONFIG_GENERIC_DEVMEM_IS_ALLOWED 1160 extern int devmem_is_allowed(unsigned long pfn); 1161 #endif 1162 1163 #endif /* __KERNEL__ */ 1164 1165 #endif /* __ASM_GENERIC_IO_H */ 1166