1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* Generic I/O port emulation. 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 #ifndef __ASM_GENERIC_IO_H 8 #define __ASM_GENERIC_IO_H 9 10 #include <asm/page.h> /* I/O is all done through memory accesses */ 11 #include <linux/string.h> /* for memset() and memcpy() */ 12 #include <linux/sizes.h> 13 #include <linux/types.h> 14 #include <linux/instruction_pointer.h> 15 16 #ifdef CONFIG_GENERIC_IOMAP 17 #include <asm-generic/iomap.h> 18 #endif 19 20 #include <asm/mmiowb.h> 21 #include <asm-generic/pci_iomap.h> 22 23 #ifndef __io_br 24 #define __io_br() barrier() 25 #endif 26 27 /* prevent prefetching of coherent DMA data ahead of a dma-complete */ 28 #ifndef __io_ar 29 #ifdef rmb 30 #define __io_ar(v) rmb() 31 #else 32 #define __io_ar(v) barrier() 33 #endif 34 #endif 35 36 /* flush writes to coherent DMA data before possibly triggering a DMA read */ 37 #ifndef __io_bw 38 #ifdef wmb 39 #define __io_bw() wmb() 40 #else 41 #define __io_bw() barrier() 42 #endif 43 #endif 44 45 /* serialize device access against a spin_unlock, usually handled there. */ 46 #ifndef __io_aw 47 #define __io_aw() mmiowb_set_pending() 48 #endif 49 50 #ifndef __io_pbw 51 #define __io_pbw() __io_bw() 52 #endif 53 54 #ifndef __io_paw 55 #define __io_paw() __io_aw() 56 #endif 57 58 #ifndef __io_pbr 59 #define __io_pbr() __io_br() 60 #endif 61 62 #ifndef __io_par 63 #define __io_par(v) __io_ar(v) 64 #endif 65 66 /* 67 * "__DISABLE_TRACE_MMIO__" flag can be used to disable MMIO tracing for 68 * specific kernel drivers in case of excessive/unwanted logging. 69 * 70 * Usage: Add a #define flag at the beginning of the driver file. 71 * Ex: #define __DISABLE_TRACE_MMIO__ 72 * #include <...> 73 * ... 74 */ 75 #if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__)) 76 #include <linux/tracepoint-defs.h> 77 78 DECLARE_TRACEPOINT(rwmmio_write); 79 DECLARE_TRACEPOINT(rwmmio_post_write); 80 DECLARE_TRACEPOINT(rwmmio_read); 81 DECLARE_TRACEPOINT(rwmmio_post_read); 82 83 void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, 84 unsigned long caller_addr, unsigned long caller_addr0); 85 void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, 86 unsigned long caller_addr, unsigned long caller_addr0); 87 void log_read_mmio(u8 width, const volatile void __iomem *addr, 88 unsigned long caller_addr, unsigned long caller_addr0); 89 void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, 90 unsigned long caller_addr, unsigned long caller_addr0); 91 92 #else 93 94 static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, 95 unsigned long caller_addr, unsigned long caller_addr0) {} 96 static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, 97 unsigned long caller_addr, unsigned long caller_addr0) {} 98 static inline void log_read_mmio(u8 width, const volatile void __iomem *addr, 99 unsigned long caller_addr, unsigned long caller_addr0) {} 100 static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, 101 unsigned long caller_addr, unsigned long caller_addr0) {} 102 103 #endif /* CONFIG_TRACE_MMIO_ACCESS */ 104 105 /* 106 * __raw_{read,write}{b,w,l,q}() access memory in native endianness. 107 * 108 * On some architectures memory mapped IO needs to be accessed differently. 109 * On the simple architectures, we just read/write the memory location 110 * directly. 111 */ 112 113 #ifndef __raw_readb 114 #define __raw_readb __raw_readb 115 static inline u8 __raw_readb(const volatile void __iomem *addr) 116 { 117 return *(const volatile u8 __force *)addr; 118 } 119 #endif 120 121 #ifndef __raw_readw 122 #define __raw_readw __raw_readw 123 static inline u16 __raw_readw(const volatile void __iomem *addr) 124 { 125 return *(const volatile u16 __force *)addr; 126 } 127 #endif 128 129 #ifndef __raw_readl 130 #define __raw_readl __raw_readl 131 static inline u32 __raw_readl(const volatile void __iomem *addr) 132 { 133 return *(const volatile u32 __force *)addr; 134 } 135 #endif 136 137 #ifdef CONFIG_64BIT 138 #ifndef __raw_readq 139 #define __raw_readq __raw_readq 140 static inline u64 __raw_readq(const volatile void __iomem *addr) 141 { 142 return *(const volatile u64 __force *)addr; 143 } 144 #endif 145 #endif /* CONFIG_64BIT */ 146 147 #ifndef __raw_writeb 148 #define __raw_writeb __raw_writeb 149 static inline void __raw_writeb(u8 value, volatile void __iomem *addr) 150 { 151 *(volatile u8 __force *)addr = value; 152 } 153 #endif 154 155 #ifndef __raw_writew 156 #define __raw_writew __raw_writew 157 static inline void __raw_writew(u16 value, volatile void __iomem *addr) 158 { 159 *(volatile u16 __force *)addr = value; 160 } 161 #endif 162 163 #ifndef __raw_writel 164 #define __raw_writel __raw_writel 165 static inline void __raw_writel(u32 value, volatile void __iomem *addr) 166 { 167 *(volatile u32 __force *)addr = value; 168 } 169 #endif 170 171 #ifdef CONFIG_64BIT 172 #ifndef __raw_writeq 173 #define __raw_writeq __raw_writeq 174 static inline void __raw_writeq(u64 value, volatile void __iomem *addr) 175 { 176 *(volatile u64 __force *)addr = value; 177 } 178 #endif 179 #endif /* CONFIG_64BIT */ 180 181 /* 182 * {read,write}{b,w,l,q}() access little endian memory and return result in 183 * native endianness. 184 */ 185 186 #ifndef readb 187 #define readb readb 188 static inline u8 readb(const volatile void __iomem *addr) 189 { 190 u8 val; 191 192 log_read_mmio(8, addr, _THIS_IP_, _RET_IP_); 193 __io_br(); 194 val = __raw_readb(addr); 195 __io_ar(val); 196 log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_); 197 return val; 198 } 199 #endif 200 201 #ifndef readw 202 #define readw readw 203 static inline u16 readw(const volatile void __iomem *addr) 204 { 205 u16 val; 206 207 log_read_mmio(16, addr, _THIS_IP_, _RET_IP_); 208 __io_br(); 209 val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); 210 __io_ar(val); 211 log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_); 212 return val; 213 } 214 #endif 215 216 #ifndef readl 217 #define readl readl 218 static inline u32 readl(const volatile void __iomem *addr) 219 { 220 u32 val; 221 222 log_read_mmio(32, addr, _THIS_IP_, _RET_IP_); 223 __io_br(); 224 val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); 225 __io_ar(val); 226 log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_); 227 return val; 228 } 229 #endif 230 231 #ifdef CONFIG_64BIT 232 #ifndef readq 233 #define readq readq 234 static inline u64 readq(const volatile void __iomem *addr) 235 { 236 u64 val; 237 238 log_read_mmio(64, addr, _THIS_IP_, _RET_IP_); 239 __io_br(); 240 val = __le64_to_cpu((__le64 __force)__raw_readq(addr)); 241 __io_ar(val); 242 log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_); 243 return val; 244 } 245 #endif 246 #endif /* CONFIG_64BIT */ 247 248 #ifndef writeb 249 #define writeb writeb 250 static inline void writeb(u8 value, volatile void __iomem *addr) 251 { 252 log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); 253 __io_bw(); 254 __raw_writeb(value, addr); 255 __io_aw(); 256 log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); 257 } 258 #endif 259 260 #ifndef writew 261 #define writew writew 262 static inline void writew(u16 value, volatile void __iomem *addr) 263 { 264 log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); 265 __io_bw(); 266 __raw_writew((u16 __force)cpu_to_le16(value), addr); 267 __io_aw(); 268 log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); 269 } 270 #endif 271 272 #ifndef writel 273 #define writel writel 274 static inline void writel(u32 value, volatile void __iomem *addr) 275 { 276 log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); 277 __io_bw(); 278 __raw_writel((u32 __force)__cpu_to_le32(value), addr); 279 __io_aw(); 280 log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); 281 } 282 #endif 283 284 #ifdef CONFIG_64BIT 285 #ifndef writeq 286 #define writeq writeq 287 static inline void writeq(u64 value, volatile void __iomem *addr) 288 { 289 log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); 290 __io_bw(); 291 __raw_writeq((u64 __force)__cpu_to_le64(value), addr); 292 __io_aw(); 293 log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); 294 } 295 #endif 296 #endif /* CONFIG_64BIT */ 297 298 /* 299 * {read,write}{b,w,l,q}_relaxed() are like the regular version, but 300 * are not guaranteed to provide ordering against spinlocks or memory 301 * accesses. 302 */ 303 #ifndef readb_relaxed 304 #define readb_relaxed readb_relaxed 305 static inline u8 readb_relaxed(const volatile void __iomem *addr) 306 { 307 u8 val; 308 309 log_read_mmio(8, addr, _THIS_IP_, _RET_IP_); 310 val = __raw_readb(addr); 311 log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_); 312 return val; 313 } 314 #endif 315 316 #ifndef readw_relaxed 317 #define readw_relaxed readw_relaxed 318 static inline u16 readw_relaxed(const volatile void __iomem *addr) 319 { 320 u16 val; 321 322 log_read_mmio(16, addr, _THIS_IP_, _RET_IP_); 323 val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); 324 log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_); 325 return val; 326 } 327 #endif 328 329 #ifndef readl_relaxed 330 #define readl_relaxed readl_relaxed 331 static inline u32 readl_relaxed(const volatile void __iomem *addr) 332 { 333 u32 val; 334 335 log_read_mmio(32, addr, _THIS_IP_, _RET_IP_); 336 val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); 337 log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_); 338 return val; 339 } 340 #endif 341 342 #if defined(readq) && !defined(readq_relaxed) 343 #define readq_relaxed readq_relaxed 344 static inline u64 readq_relaxed(const volatile void __iomem *addr) 345 { 346 u64 val; 347 348 log_read_mmio(64, addr, _THIS_IP_, _RET_IP_); 349 val = __le64_to_cpu((__le64 __force)__raw_readq(addr)); 350 log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_); 351 return val; 352 } 353 #endif 354 355 #ifndef writeb_relaxed 356 #define writeb_relaxed writeb_relaxed 357 static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) 358 { 359 log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); 360 __raw_writeb(value, addr); 361 log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); 362 } 363 #endif 364 365 #ifndef writew_relaxed 366 #define writew_relaxed writew_relaxed 367 static inline void writew_relaxed(u16 value, volatile void __iomem *addr) 368 { 369 log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); 370 __raw_writew((u16 __force)cpu_to_le16(value), addr); 371 log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); 372 } 373 #endif 374 375 #ifndef writel_relaxed 376 #define writel_relaxed writel_relaxed 377 static inline void writel_relaxed(u32 value, volatile void __iomem *addr) 378 { 379 log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); 380 __raw_writel((u32 __force)__cpu_to_le32(value), addr); 381 log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); 382 } 383 #endif 384 385 #if defined(writeq) && !defined(writeq_relaxed) 386 #define writeq_relaxed writeq_relaxed 387 static inline void writeq_relaxed(u64 value, volatile void __iomem *addr) 388 { 389 log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); 390 __raw_writeq((u64 __force)__cpu_to_le64(value), addr); 391 log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); 392 } 393 #endif 394 395 /* 396 * {read,write}s{b,w,l,q}() repeatedly access the same memory address in 397 * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times). 398 */ 399 #ifndef readsb 400 #define readsb readsb 401 static inline void readsb(const volatile void __iomem *addr, void *buffer, 402 unsigned int count) 403 { 404 if (count) { 405 u8 *buf = buffer; 406 407 do { 408 u8 x = __raw_readb(addr); 409 *buf++ = x; 410 } while (--count); 411 } 412 } 413 #endif 414 415 #ifndef readsw 416 #define readsw readsw 417 static inline void readsw(const volatile void __iomem *addr, void *buffer, 418 unsigned int count) 419 { 420 if (count) { 421 u16 *buf = buffer; 422 423 do { 424 u16 x = __raw_readw(addr); 425 *buf++ = x; 426 } while (--count); 427 } 428 } 429 #endif 430 431 #ifndef readsl 432 #define readsl readsl 433 static inline void readsl(const volatile void __iomem *addr, void *buffer, 434 unsigned int count) 435 { 436 if (count) { 437 u32 *buf = buffer; 438 439 do { 440 u32 x = __raw_readl(addr); 441 *buf++ = x; 442 } while (--count); 443 } 444 } 445 #endif 446 447 #ifdef CONFIG_64BIT 448 #ifndef readsq 449 #define readsq readsq 450 static inline void readsq(const volatile void __iomem *addr, void *buffer, 451 unsigned int count) 452 { 453 if (count) { 454 u64 *buf = buffer; 455 456 do { 457 u64 x = __raw_readq(addr); 458 *buf++ = x; 459 } while (--count); 460 } 461 } 462 #endif 463 #endif /* CONFIG_64BIT */ 464 465 #ifndef writesb 466 #define writesb writesb 467 static inline void writesb(volatile void __iomem *addr, const void *buffer, 468 unsigned int count) 469 { 470 if (count) { 471 const u8 *buf = buffer; 472 473 do { 474 __raw_writeb(*buf++, addr); 475 } while (--count); 476 } 477 } 478 #endif 479 480 #ifndef writesw 481 #define writesw writesw 482 static inline void writesw(volatile void __iomem *addr, const void *buffer, 483 unsigned int count) 484 { 485 if (count) { 486 const u16 *buf = buffer; 487 488 do { 489 __raw_writew(*buf++, addr); 490 } while (--count); 491 } 492 } 493 #endif 494 495 #ifndef writesl 496 #define writesl writesl 497 static inline void writesl(volatile void __iomem *addr, const void *buffer, 498 unsigned int count) 499 { 500 if (count) { 501 const u32 *buf = buffer; 502 503 do { 504 __raw_writel(*buf++, addr); 505 } while (--count); 506 } 507 } 508 #endif 509 510 #ifdef CONFIG_64BIT 511 #ifndef writesq 512 #define writesq writesq 513 static inline void writesq(volatile void __iomem *addr, const void *buffer, 514 unsigned int count) 515 { 516 if (count) { 517 const u64 *buf = buffer; 518 519 do { 520 __raw_writeq(*buf++, addr); 521 } while (--count); 522 } 523 } 524 #endif 525 #endif /* CONFIG_64BIT */ 526 527 #ifndef PCI_IOBASE 528 #define PCI_IOBASE ((void __iomem *)0) 529 #endif 530 531 #ifndef IO_SPACE_LIMIT 532 #define IO_SPACE_LIMIT 0xffff 533 #endif 534 535 /* 536 * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be 537 * implemented on hardware that needs an additional delay for I/O accesses to 538 * take effect. 539 */ 540 541 #if !defined(inb) && !defined(_inb) 542 #define _inb _inb 543 static inline u8 _inb(unsigned long addr) 544 { 545 u8 val; 546 547 __io_pbr(); 548 val = __raw_readb(PCI_IOBASE + addr); 549 __io_par(val); 550 return val; 551 } 552 #endif 553 554 #if !defined(inw) && !defined(_inw) 555 #define _inw _inw 556 static inline u16 _inw(unsigned long addr) 557 { 558 u16 val; 559 560 __io_pbr(); 561 val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr)); 562 __io_par(val); 563 return val; 564 } 565 #endif 566 567 #if !defined(inl) && !defined(_inl) 568 #define _inl _inl 569 static inline u32 _inl(unsigned long addr) 570 { 571 u32 val; 572 573 __io_pbr(); 574 val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr)); 575 __io_par(val); 576 return val; 577 } 578 #endif 579 580 #if !defined(outb) && !defined(_outb) 581 #define _outb _outb 582 static inline void _outb(u8 value, unsigned long addr) 583 { 584 __io_pbw(); 585 __raw_writeb(value, PCI_IOBASE + addr); 586 __io_paw(); 587 } 588 #endif 589 590 #if !defined(outw) && !defined(_outw) 591 #define _outw _outw 592 static inline void _outw(u16 value, unsigned long addr) 593 { 594 __io_pbw(); 595 __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr); 596 __io_paw(); 597 } 598 #endif 599 600 #if !defined(outl) && !defined(_outl) 601 #define _outl _outl 602 static inline void _outl(u32 value, unsigned long addr) 603 { 604 __io_pbw(); 605 __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr); 606 __io_paw(); 607 } 608 #endif 609 610 #include <linux/logic_pio.h> 611 612 #ifndef inb 613 #define inb _inb 614 #endif 615 616 #ifndef inw 617 #define inw _inw 618 #endif 619 620 #ifndef inl 621 #define inl _inl 622 #endif 623 624 #ifndef outb 625 #define outb _outb 626 #endif 627 628 #ifndef outw 629 #define outw _outw 630 #endif 631 632 #ifndef outl 633 #define outl _outl 634 #endif 635 636 #ifndef inb_p 637 #define inb_p inb_p 638 static inline u8 inb_p(unsigned long addr) 639 { 640 return inb(addr); 641 } 642 #endif 643 644 #ifndef inw_p 645 #define inw_p inw_p 646 static inline u16 inw_p(unsigned long addr) 647 { 648 return inw(addr); 649 } 650 #endif 651 652 #ifndef inl_p 653 #define inl_p inl_p 654 static inline u32 inl_p(unsigned long addr) 655 { 656 return inl(addr); 657 } 658 #endif 659 660 #ifndef outb_p 661 #define outb_p outb_p 662 static inline void outb_p(u8 value, unsigned long addr) 663 { 664 outb(value, addr); 665 } 666 #endif 667 668 #ifndef outw_p 669 #define outw_p outw_p 670 static inline void outw_p(u16 value, unsigned long addr) 671 { 672 outw(value, addr); 673 } 674 #endif 675 676 #ifndef outl_p 677 #define outl_p outl_p 678 static inline void outl_p(u32 value, unsigned long addr) 679 { 680 outl(value, addr); 681 } 682 #endif 683 684 /* 685 * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a 686 * single I/O port multiple times. 687 */ 688 689 #ifndef insb 690 #define insb insb 691 static inline void insb(unsigned long addr, void *buffer, unsigned int count) 692 { 693 readsb(PCI_IOBASE + addr, buffer, count); 694 } 695 #endif 696 697 #ifndef insw 698 #define insw insw 699 static inline void insw(unsigned long addr, void *buffer, unsigned int count) 700 { 701 readsw(PCI_IOBASE + addr, buffer, count); 702 } 703 #endif 704 705 #ifndef insl 706 #define insl insl 707 static inline void insl(unsigned long addr, void *buffer, unsigned int count) 708 { 709 readsl(PCI_IOBASE + addr, buffer, count); 710 } 711 #endif 712 713 #ifndef outsb 714 #define outsb outsb 715 static inline void outsb(unsigned long addr, const void *buffer, 716 unsigned int count) 717 { 718 writesb(PCI_IOBASE + addr, buffer, count); 719 } 720 #endif 721 722 #ifndef outsw 723 #define outsw outsw 724 static inline void outsw(unsigned long addr, const void *buffer, 725 unsigned int count) 726 { 727 writesw(PCI_IOBASE + addr, buffer, count); 728 } 729 #endif 730 731 #ifndef outsl 732 #define outsl outsl 733 static inline void outsl(unsigned long addr, const void *buffer, 734 unsigned int count) 735 { 736 writesl(PCI_IOBASE + addr, buffer, count); 737 } 738 #endif 739 740 #ifndef insb_p 741 #define insb_p insb_p 742 static inline void insb_p(unsigned long addr, void *buffer, unsigned int count) 743 { 744 insb(addr, buffer, count); 745 } 746 #endif 747 748 #ifndef insw_p 749 #define insw_p insw_p 750 static inline void insw_p(unsigned long addr, void *buffer, unsigned int count) 751 { 752 insw(addr, buffer, count); 753 } 754 #endif 755 756 #ifndef insl_p 757 #define insl_p insl_p 758 static inline void insl_p(unsigned long addr, void *buffer, unsigned int count) 759 { 760 insl(addr, buffer, count); 761 } 762 #endif 763 764 #ifndef outsb_p 765 #define outsb_p outsb_p 766 static inline void outsb_p(unsigned long addr, const void *buffer, 767 unsigned int count) 768 { 769 outsb(addr, buffer, count); 770 } 771 #endif 772 773 #ifndef outsw_p 774 #define outsw_p outsw_p 775 static inline void outsw_p(unsigned long addr, const void *buffer, 776 unsigned int count) 777 { 778 outsw(addr, buffer, count); 779 } 780 #endif 781 782 #ifndef outsl_p 783 #define outsl_p outsl_p 784 static inline void outsl_p(unsigned long addr, const void *buffer, 785 unsigned int count) 786 { 787 outsl(addr, buffer, count); 788 } 789 #endif 790 791 #ifndef CONFIG_GENERIC_IOMAP 792 #ifndef ioread8 793 #define ioread8 ioread8 794 static inline u8 ioread8(const volatile void __iomem *addr) 795 { 796 return readb(addr); 797 } 798 #endif 799 800 #ifndef ioread16 801 #define ioread16 ioread16 802 static inline u16 ioread16(const volatile void __iomem *addr) 803 { 804 return readw(addr); 805 } 806 #endif 807 808 #ifndef ioread32 809 #define ioread32 ioread32 810 static inline u32 ioread32(const volatile void __iomem *addr) 811 { 812 return readl(addr); 813 } 814 #endif 815 816 #ifdef CONFIG_64BIT 817 #ifndef ioread64 818 #define ioread64 ioread64 819 static inline u64 ioread64(const volatile void __iomem *addr) 820 { 821 return readq(addr); 822 } 823 #endif 824 #endif /* CONFIG_64BIT */ 825 826 #ifndef iowrite8 827 #define iowrite8 iowrite8 828 static inline void iowrite8(u8 value, volatile void __iomem *addr) 829 { 830 writeb(value, addr); 831 } 832 #endif 833 834 #ifndef iowrite16 835 #define iowrite16 iowrite16 836 static inline void iowrite16(u16 value, volatile void __iomem *addr) 837 { 838 writew(value, addr); 839 } 840 #endif 841 842 #ifndef iowrite32 843 #define iowrite32 iowrite32 844 static inline void iowrite32(u32 value, volatile void __iomem *addr) 845 { 846 writel(value, addr); 847 } 848 #endif 849 850 #ifdef CONFIG_64BIT 851 #ifndef iowrite64 852 #define iowrite64 iowrite64 853 static inline void iowrite64(u64 value, volatile void __iomem *addr) 854 { 855 writeq(value, addr); 856 } 857 #endif 858 #endif /* CONFIG_64BIT */ 859 860 #ifndef ioread16be 861 #define ioread16be ioread16be 862 static inline u16 ioread16be(const volatile void __iomem *addr) 863 { 864 return swab16(readw(addr)); 865 } 866 #endif 867 868 #ifndef ioread32be 869 #define ioread32be ioread32be 870 static inline u32 ioread32be(const volatile void __iomem *addr) 871 { 872 return swab32(readl(addr)); 873 } 874 #endif 875 876 #ifdef CONFIG_64BIT 877 #ifndef ioread64be 878 #define ioread64be ioread64be 879 static inline u64 ioread64be(const volatile void __iomem *addr) 880 { 881 return swab64(readq(addr)); 882 } 883 #endif 884 #endif /* CONFIG_64BIT */ 885 886 #ifndef iowrite16be 887 #define iowrite16be iowrite16be 888 static inline void iowrite16be(u16 value, void volatile __iomem *addr) 889 { 890 writew(swab16(value), addr); 891 } 892 #endif 893 894 #ifndef iowrite32be 895 #define iowrite32be iowrite32be 896 static inline void iowrite32be(u32 value, volatile void __iomem *addr) 897 { 898 writel(swab32(value), addr); 899 } 900 #endif 901 902 #ifdef CONFIG_64BIT 903 #ifndef iowrite64be 904 #define iowrite64be iowrite64be 905 static inline void iowrite64be(u64 value, volatile void __iomem *addr) 906 { 907 writeq(swab64(value), addr); 908 } 909 #endif 910 #endif /* CONFIG_64BIT */ 911 912 #ifndef ioread8_rep 913 #define ioread8_rep ioread8_rep 914 static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, 915 unsigned int count) 916 { 917 readsb(addr, buffer, count); 918 } 919 #endif 920 921 #ifndef ioread16_rep 922 #define ioread16_rep ioread16_rep 923 static inline void ioread16_rep(const volatile void __iomem *addr, 924 void *buffer, unsigned int count) 925 { 926 readsw(addr, buffer, count); 927 } 928 #endif 929 930 #ifndef ioread32_rep 931 #define ioread32_rep ioread32_rep 932 static inline void ioread32_rep(const volatile void __iomem *addr, 933 void *buffer, unsigned int count) 934 { 935 readsl(addr, buffer, count); 936 } 937 #endif 938 939 #ifdef CONFIG_64BIT 940 #ifndef ioread64_rep 941 #define ioread64_rep ioread64_rep 942 static inline void ioread64_rep(const volatile void __iomem *addr, 943 void *buffer, unsigned int count) 944 { 945 readsq(addr, buffer, count); 946 } 947 #endif 948 #endif /* CONFIG_64BIT */ 949 950 #ifndef iowrite8_rep 951 #define iowrite8_rep iowrite8_rep 952 static inline void iowrite8_rep(volatile void __iomem *addr, 953 const void *buffer, 954 unsigned int count) 955 { 956 writesb(addr, buffer, count); 957 } 958 #endif 959 960 #ifndef iowrite16_rep 961 #define iowrite16_rep iowrite16_rep 962 static inline void iowrite16_rep(volatile void __iomem *addr, 963 const void *buffer, 964 unsigned int count) 965 { 966 writesw(addr, buffer, count); 967 } 968 #endif 969 970 #ifndef iowrite32_rep 971 #define iowrite32_rep iowrite32_rep 972 static inline void iowrite32_rep(volatile void __iomem *addr, 973 const void *buffer, 974 unsigned int count) 975 { 976 writesl(addr, buffer, count); 977 } 978 #endif 979 980 #ifdef CONFIG_64BIT 981 #ifndef iowrite64_rep 982 #define iowrite64_rep iowrite64_rep 983 static inline void iowrite64_rep(volatile void __iomem *addr, 984 const void *buffer, 985 unsigned int count) 986 { 987 writesq(addr, buffer, count); 988 } 989 #endif 990 #endif /* CONFIG_64BIT */ 991 #endif /* CONFIG_GENERIC_IOMAP */ 992 993 #ifdef __KERNEL__ 994 995 #define __io_virt(x) ((void __force *)(x)) 996 997 /* 998 * Change virtual addresses to physical addresses and vv. 999 * These are pretty trivial 1000 */ 1001 #ifndef virt_to_phys 1002 #define virt_to_phys virt_to_phys 1003 static inline unsigned long virt_to_phys(volatile void *address) 1004 { 1005 return __pa((unsigned long)address); 1006 } 1007 #endif 1008 1009 #ifndef phys_to_virt 1010 #define phys_to_virt phys_to_virt 1011 static inline void *phys_to_virt(unsigned long address) 1012 { 1013 return __va(address); 1014 } 1015 #endif 1016 1017 /** 1018 * DOC: ioremap() and ioremap_*() variants 1019 * 1020 * Architectures with an MMU are expected to provide ioremap() and iounmap() 1021 * themselves or rely on GENERIC_IOREMAP. For NOMMU architectures we provide 1022 * a default nop-op implementation that expect that the physical address used 1023 * for MMIO are already marked as uncached, and can be used as kernel virtual 1024 * addresses. 1025 * 1026 * ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes 1027 * for specific drivers if the architecture choses to implement them. If they 1028 * are not implemented we fall back to plain ioremap. Conversely, ioremap_np() 1029 * can provide stricter non-posted write semantics if the architecture 1030 * implements them. 1031 */ 1032 #ifndef CONFIG_MMU 1033 #ifndef ioremap 1034 #define ioremap ioremap 1035 static inline void __iomem *ioremap(phys_addr_t offset, size_t size) 1036 { 1037 return (void __iomem *)(unsigned long)offset; 1038 } 1039 #endif 1040 1041 #ifndef iounmap 1042 #define iounmap iounmap 1043 static inline void iounmap(volatile void __iomem *addr) 1044 { 1045 } 1046 #endif 1047 #elif defined(CONFIG_GENERIC_IOREMAP) 1048 #include <linux/pgtable.h> 1049 1050 void __iomem *generic_ioremap_prot(phys_addr_t phys_addr, size_t size, 1051 pgprot_t prot); 1052 1053 void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, 1054 unsigned long prot); 1055 void iounmap(volatile void __iomem *addr); 1056 void generic_iounmap(volatile void __iomem *addr); 1057 1058 #ifndef ioremap 1059 #define ioremap ioremap 1060 static inline void __iomem *ioremap(phys_addr_t addr, size_t size) 1061 { 1062 /* _PAGE_IOREMAP needs to be supplied by the architecture */ 1063 return ioremap_prot(addr, size, _PAGE_IOREMAP); 1064 } 1065 #endif 1066 #endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */ 1067 1068 #ifndef ioremap_wc 1069 #define ioremap_wc ioremap 1070 #endif 1071 1072 #ifndef ioremap_wt 1073 #define ioremap_wt ioremap 1074 #endif 1075 1076 /* 1077 * ioremap_uc is special in that we do require an explicit architecture 1078 * implementation. In general you do not want to use this function in a 1079 * driver and use plain ioremap, which is uncached by default. Similarly 1080 * architectures should not implement it unless they have a very good 1081 * reason. 1082 */ 1083 #ifndef ioremap_uc 1084 #define ioremap_uc ioremap_uc 1085 static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) 1086 { 1087 return NULL; 1088 } 1089 #endif 1090 1091 /* 1092 * ioremap_np needs an explicit architecture implementation, as it 1093 * requests stronger semantics than regular ioremap(). Portable drivers 1094 * should instead use one of the higher-level abstractions, like 1095 * devm_ioremap_resource(), to choose the correct variant for any given 1096 * device and bus. Portable drivers with a good reason to want non-posted 1097 * write semantics should always provide an ioremap() fallback in case 1098 * ioremap_np() is not available. 1099 */ 1100 #ifndef ioremap_np 1101 #define ioremap_np ioremap_np 1102 static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size) 1103 { 1104 return NULL; 1105 } 1106 #endif 1107 1108 #ifdef CONFIG_HAS_IOPORT_MAP 1109 #ifndef CONFIG_GENERIC_IOMAP 1110 #ifndef ioport_map 1111 #define ioport_map ioport_map 1112 static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) 1113 { 1114 port &= IO_SPACE_LIMIT; 1115 return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port; 1116 } 1117 #define ARCH_HAS_GENERIC_IOPORT_MAP 1118 #endif 1119 1120 #ifndef ioport_unmap 1121 #define ioport_unmap ioport_unmap 1122 static inline void ioport_unmap(void __iomem *p) 1123 { 1124 } 1125 #endif 1126 #else /* CONFIG_GENERIC_IOMAP */ 1127 extern void __iomem *ioport_map(unsigned long port, unsigned int nr); 1128 extern void ioport_unmap(void __iomem *p); 1129 #endif /* CONFIG_GENERIC_IOMAP */ 1130 #endif /* CONFIG_HAS_IOPORT_MAP */ 1131 1132 #ifndef CONFIG_GENERIC_IOMAP 1133 #ifndef pci_iounmap 1134 #define ARCH_WANTS_GENERIC_PCI_IOUNMAP 1135 #endif 1136 #endif 1137 1138 #ifndef xlate_dev_mem_ptr 1139 #define xlate_dev_mem_ptr xlate_dev_mem_ptr 1140 static inline void *xlate_dev_mem_ptr(phys_addr_t addr) 1141 { 1142 return __va(addr); 1143 } 1144 #endif 1145 1146 #ifndef unxlate_dev_mem_ptr 1147 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr 1148 static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) 1149 { 1150 } 1151 #endif 1152 1153 #ifndef memset_io 1154 #define memset_io memset_io 1155 /** 1156 * memset_io Set a range of I/O memory to a constant value 1157 * @addr: The beginning of the I/O-memory range to set 1158 * @val: The value to set the memory to 1159 * @count: The number of bytes to set 1160 * 1161 * Set a range of I/O memory to a given value. 1162 */ 1163 static inline void memset_io(volatile void __iomem *addr, int value, 1164 size_t size) 1165 { 1166 memset(__io_virt(addr), value, size); 1167 } 1168 #endif 1169 1170 #ifndef memcpy_fromio 1171 #define memcpy_fromio memcpy_fromio 1172 /** 1173 * memcpy_fromio Copy a block of data from I/O memory 1174 * @dst: The (RAM) destination for the copy 1175 * @src: The (I/O memory) source for the data 1176 * @count: The number of bytes to copy 1177 * 1178 * Copy a block of data from I/O memory. 1179 */ 1180 static inline void memcpy_fromio(void *buffer, 1181 const volatile void __iomem *addr, 1182 size_t size) 1183 { 1184 memcpy(buffer, __io_virt(addr), size); 1185 } 1186 #endif 1187 1188 #ifndef memcpy_toio 1189 #define memcpy_toio memcpy_toio 1190 /** 1191 * memcpy_toio Copy a block of data into I/O memory 1192 * @dst: The (I/O memory) destination for the copy 1193 * @src: The (RAM) source for the data 1194 * @count: The number of bytes to copy 1195 * 1196 * Copy a block of data to I/O memory. 1197 */ 1198 static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, 1199 size_t size) 1200 { 1201 memcpy(__io_virt(addr), buffer, size); 1202 } 1203 #endif 1204 1205 extern int devmem_is_allowed(unsigned long pfn); 1206 1207 #endif /* __KERNEL__ */ 1208 1209 #endif /* __ASM_GENERIC_IO_H */ 1210