1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* Generic I/O port emulation. 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 #ifndef __ASM_GENERIC_IO_H 8 #define __ASM_GENERIC_IO_H 9 10 #include <asm/page.h> /* I/O is all done through memory accesses */ 11 #include <linux/string.h> /* for memset() and memcpy() */ 12 #include <linux/sizes.h> 13 #include <linux/types.h> 14 #include <linux/instruction_pointer.h> 15 16 #ifdef CONFIG_GENERIC_IOMAP 17 #include <asm-generic/iomap.h> 18 #endif 19 20 #include <asm/mmiowb.h> 21 #include <asm-generic/pci_iomap.h> 22 23 #ifndef __io_br 24 #define __io_br() barrier() 25 #endif 26 27 /* prevent prefetching of coherent DMA data ahead of a dma-complete */ 28 #ifndef __io_ar 29 #ifdef rmb 30 #define __io_ar(v) rmb() 31 #else 32 #define __io_ar(v) barrier() 33 #endif 34 #endif 35 36 /* flush writes to coherent DMA data before possibly triggering a DMA read */ 37 #ifndef __io_bw 38 #ifdef wmb 39 #define __io_bw() wmb() 40 #else 41 #define __io_bw() barrier() 42 #endif 43 #endif 44 45 /* serialize device access against a spin_unlock, usually handled there. */ 46 #ifndef __io_aw 47 #define __io_aw() mmiowb_set_pending() 48 #endif 49 50 #ifndef __io_pbw 51 #define __io_pbw() __io_bw() 52 #endif 53 54 #ifndef __io_paw 55 #define __io_paw() __io_aw() 56 #endif 57 58 #ifndef __io_pbr 59 #define __io_pbr() __io_br() 60 #endif 61 62 #ifndef __io_par 63 #define __io_par(v) __io_ar(v) 64 #endif 65 66 /* 67 * "__DISABLE_TRACE_MMIO__" flag can be used to disable MMIO tracing for 68 * specific kernel drivers in case of excessive/unwanted logging. 69 * 70 * Usage: Add a #define flag at the beginning of the driver file. 71 * Ex: #define __DISABLE_TRACE_MMIO__ 72 * #include <...> 73 * ... 74 */ 75 #if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__)) 76 #include <linux/tracepoint-defs.h> 77 78 #define rwmmio_tracepoint_enabled(tracepoint) tracepoint_enabled(tracepoint) 79 DECLARE_TRACEPOINT(rwmmio_write); 80 DECLARE_TRACEPOINT(rwmmio_post_write); 81 DECLARE_TRACEPOINT(rwmmio_read); 82 DECLARE_TRACEPOINT(rwmmio_post_read); 83 84 void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, 85 unsigned long caller_addr, unsigned long caller_addr0); 86 void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, 87 unsigned long caller_addr, unsigned long caller_addr0); 88 void log_read_mmio(u8 width, const volatile void __iomem *addr, 89 unsigned long caller_addr, unsigned long caller_addr0); 90 void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, 91 unsigned long caller_addr, unsigned long caller_addr0); 92 93 #else 94 95 #define rwmmio_tracepoint_enabled(tracepoint) false 96 static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, 97 unsigned long caller_addr, unsigned long caller_addr0) {} 98 static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, 99 unsigned long caller_addr, unsigned long caller_addr0) {} 100 static inline void log_read_mmio(u8 width, const volatile void __iomem *addr, 101 unsigned long caller_addr, unsigned long caller_addr0) {} 102 static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, 103 unsigned long caller_addr, unsigned long caller_addr0) {} 104 105 #endif /* CONFIG_TRACE_MMIO_ACCESS */ 106 107 /* 108 * __raw_{read,write}{b,w,l,q}() access memory in native endianness. 109 * 110 * On some architectures memory mapped IO needs to be accessed differently. 111 * On the simple architectures, we just read/write the memory location 112 * directly. 113 */ 114 115 #ifndef __raw_readb 116 #define __raw_readb __raw_readb 117 static inline u8 __raw_readb(const volatile void __iomem *addr) 118 { 119 return *(const volatile u8 __force *)addr; 120 } 121 #endif 122 123 #ifndef __raw_readw 124 #define __raw_readw __raw_readw 125 static inline u16 __raw_readw(const volatile void __iomem *addr) 126 { 127 return *(const volatile u16 __force *)addr; 128 } 129 #endif 130 131 #ifndef __raw_readl 132 #define __raw_readl __raw_readl 133 static inline u32 __raw_readl(const volatile void __iomem *addr) 134 { 135 return *(const volatile u32 __force *)addr; 136 } 137 #endif 138 139 #ifdef CONFIG_64BIT 140 #ifndef __raw_readq 141 #define __raw_readq __raw_readq 142 static inline u64 __raw_readq(const volatile void __iomem *addr) 143 { 144 return *(const volatile u64 __force *)addr; 145 } 146 #endif 147 #endif /* CONFIG_64BIT */ 148 149 #ifndef __raw_writeb 150 #define __raw_writeb __raw_writeb 151 static inline void __raw_writeb(u8 value, volatile void __iomem *addr) 152 { 153 *(volatile u8 __force *)addr = value; 154 } 155 #endif 156 157 #ifndef __raw_writew 158 #define __raw_writew __raw_writew 159 static inline void __raw_writew(u16 value, volatile void __iomem *addr) 160 { 161 *(volatile u16 __force *)addr = value; 162 } 163 #endif 164 165 #ifndef __raw_writel 166 #define __raw_writel __raw_writel 167 static inline void __raw_writel(u32 value, volatile void __iomem *addr) 168 { 169 *(volatile u32 __force *)addr = value; 170 } 171 #endif 172 173 #ifdef CONFIG_64BIT 174 #ifndef __raw_writeq 175 #define __raw_writeq __raw_writeq 176 static inline void __raw_writeq(u64 value, volatile void __iomem *addr) 177 { 178 *(volatile u64 __force *)addr = value; 179 } 180 #endif 181 #endif /* CONFIG_64BIT */ 182 183 /* 184 * {read,write}{b,w,l,q}() access little endian memory and return result in 185 * native endianness. 186 */ 187 188 #ifndef readb 189 #define readb readb 190 static inline u8 readb(const volatile void __iomem *addr) 191 { 192 u8 val; 193 194 if (rwmmio_tracepoint_enabled(rwmmio_read)) 195 log_read_mmio(8, addr, _THIS_IP_, _RET_IP_); 196 __io_br(); 197 val = __raw_readb(addr); 198 __io_ar(val); 199 if (rwmmio_tracepoint_enabled(rwmmio_post_read)) 200 log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_); 201 return val; 202 } 203 #endif 204 205 #ifndef readw 206 #define readw readw 207 static inline u16 readw(const volatile void __iomem *addr) 208 { 209 u16 val; 210 211 if (rwmmio_tracepoint_enabled(rwmmio_read)) 212 log_read_mmio(16, addr, _THIS_IP_, _RET_IP_); 213 __io_br(); 214 val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); 215 __io_ar(val); 216 if (rwmmio_tracepoint_enabled(rwmmio_post_read)) 217 log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_); 218 return val; 219 } 220 #endif 221 222 #ifndef readl 223 #define readl readl 224 static inline u32 readl(const volatile void __iomem *addr) 225 { 226 u32 val; 227 228 if (rwmmio_tracepoint_enabled(rwmmio_read)) 229 log_read_mmio(32, addr, _THIS_IP_, _RET_IP_); 230 __io_br(); 231 val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); 232 __io_ar(val); 233 if (rwmmio_tracepoint_enabled(rwmmio_post_read)) 234 log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_); 235 return val; 236 } 237 #endif 238 239 #ifdef CONFIG_64BIT 240 #ifndef readq 241 #define readq readq 242 static inline u64 readq(const volatile void __iomem *addr) 243 { 244 u64 val; 245 246 if (rwmmio_tracepoint_enabled(rwmmio_read)) 247 log_read_mmio(64, addr, _THIS_IP_, _RET_IP_); 248 __io_br(); 249 val = __le64_to_cpu((__le64 __force)__raw_readq(addr)); 250 __io_ar(val); 251 if (rwmmio_tracepoint_enabled(rwmmio_post_read)) 252 log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_); 253 return val; 254 } 255 #endif 256 #endif /* CONFIG_64BIT */ 257 258 #ifndef writeb 259 #define writeb writeb 260 static inline void writeb(u8 value, volatile void __iomem *addr) 261 { 262 if (rwmmio_tracepoint_enabled(rwmmio_write)) 263 log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); 264 __io_bw(); 265 __raw_writeb(value, addr); 266 __io_aw(); 267 if (rwmmio_tracepoint_enabled(rwmmio_post_write)) 268 log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); 269 } 270 #endif 271 272 #ifndef writew 273 #define writew writew 274 static inline void writew(u16 value, volatile void __iomem *addr) 275 { 276 if (rwmmio_tracepoint_enabled(rwmmio_write)) 277 log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); 278 __io_bw(); 279 __raw_writew((u16 __force)cpu_to_le16(value), addr); 280 __io_aw(); 281 if (rwmmio_tracepoint_enabled(rwmmio_post_write)) 282 log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); 283 } 284 #endif 285 286 #ifndef writel 287 #define writel writel 288 static inline void writel(u32 value, volatile void __iomem *addr) 289 { 290 if (rwmmio_tracepoint_enabled(rwmmio_write)) 291 log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); 292 __io_bw(); 293 __raw_writel((u32 __force)__cpu_to_le32(value), addr); 294 __io_aw(); 295 if (rwmmio_tracepoint_enabled(rwmmio_post_write)) 296 log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); 297 } 298 #endif 299 300 #ifdef CONFIG_64BIT 301 #ifndef writeq 302 #define writeq writeq 303 static inline void writeq(u64 value, volatile void __iomem *addr) 304 { 305 if (rwmmio_tracepoint_enabled(rwmmio_write)) 306 log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); 307 __io_bw(); 308 __raw_writeq((u64 __force)__cpu_to_le64(value), addr); 309 __io_aw(); 310 if (rwmmio_tracepoint_enabled(rwmmio_post_write)) 311 log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); 312 } 313 #endif 314 #endif /* CONFIG_64BIT */ 315 316 /* 317 * {read,write}{b,w,l,q}_relaxed() are like the regular version, but 318 * are not guaranteed to provide ordering against spinlocks or memory 319 * accesses. 320 */ 321 #ifndef readb_relaxed 322 #define readb_relaxed readb_relaxed 323 static inline u8 readb_relaxed(const volatile void __iomem *addr) 324 { 325 u8 val; 326 327 if (rwmmio_tracepoint_enabled(rwmmio_read)) 328 log_read_mmio(8, addr, _THIS_IP_, _RET_IP_); 329 val = __raw_readb(addr); 330 if (rwmmio_tracepoint_enabled(rwmmio_post_read)) 331 log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_); 332 return val; 333 } 334 #endif 335 336 #ifndef readw_relaxed 337 #define readw_relaxed readw_relaxed 338 static inline u16 readw_relaxed(const volatile void __iomem *addr) 339 { 340 u16 val; 341 342 if (rwmmio_tracepoint_enabled(rwmmio_read)) 343 log_read_mmio(16, addr, _THIS_IP_, _RET_IP_); 344 val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); 345 if (rwmmio_tracepoint_enabled(rwmmio_post_read)) 346 log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_); 347 return val; 348 } 349 #endif 350 351 #ifndef readl_relaxed 352 #define readl_relaxed readl_relaxed 353 static inline u32 readl_relaxed(const volatile void __iomem *addr) 354 { 355 u32 val; 356 357 if (rwmmio_tracepoint_enabled(rwmmio_read)) 358 log_read_mmio(32, addr, _THIS_IP_, _RET_IP_); 359 val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); 360 if (rwmmio_tracepoint_enabled(rwmmio_post_read)) 361 log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_); 362 return val; 363 } 364 #endif 365 366 #if defined(readq) && !defined(readq_relaxed) 367 #define readq_relaxed readq_relaxed 368 static inline u64 readq_relaxed(const volatile void __iomem *addr) 369 { 370 u64 val; 371 372 if (rwmmio_tracepoint_enabled(rwmmio_read)) 373 log_read_mmio(64, addr, _THIS_IP_, _RET_IP_); 374 val = __le64_to_cpu((__le64 __force)__raw_readq(addr)); 375 if (rwmmio_tracepoint_enabled(rwmmio_post_read)) 376 log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_); 377 return val; 378 } 379 #endif 380 381 #ifndef writeb_relaxed 382 #define writeb_relaxed writeb_relaxed 383 static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) 384 { 385 if (rwmmio_tracepoint_enabled(rwmmio_write)) 386 log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); 387 __raw_writeb(value, addr); 388 if (rwmmio_tracepoint_enabled(rwmmio_post_write)) 389 log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_); 390 } 391 #endif 392 393 #ifndef writew_relaxed 394 #define writew_relaxed writew_relaxed 395 static inline void writew_relaxed(u16 value, volatile void __iomem *addr) 396 { 397 if (rwmmio_tracepoint_enabled(rwmmio_write)) 398 log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); 399 __raw_writew((u16 __force)cpu_to_le16(value), addr); 400 if (rwmmio_tracepoint_enabled(rwmmio_post_write)) 401 log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_); 402 } 403 #endif 404 405 #ifndef writel_relaxed 406 #define writel_relaxed writel_relaxed 407 static inline void writel_relaxed(u32 value, volatile void __iomem *addr) 408 { 409 if (rwmmio_tracepoint_enabled(rwmmio_write)) 410 log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); 411 __raw_writel((u32 __force)__cpu_to_le32(value), addr); 412 if (rwmmio_tracepoint_enabled(rwmmio_post_write)) 413 log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_); 414 } 415 #endif 416 417 #if defined(writeq) && !defined(writeq_relaxed) 418 #define writeq_relaxed writeq_relaxed 419 static inline void writeq_relaxed(u64 value, volatile void __iomem *addr) 420 { 421 if (rwmmio_tracepoint_enabled(rwmmio_write)) 422 log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); 423 __raw_writeq((u64 __force)__cpu_to_le64(value), addr); 424 if (rwmmio_tracepoint_enabled(rwmmio_post_write)) 425 log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_); 426 } 427 #endif 428 429 /* 430 * {read,write}s{b,w,l,q}() repeatedly access the same memory address in 431 * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times). 432 */ 433 #ifndef readsb 434 #define readsb readsb 435 static inline void readsb(const volatile void __iomem *addr, void *buffer, 436 unsigned int count) 437 { 438 if (count) { 439 u8 *buf = buffer; 440 441 do { 442 u8 x = __raw_readb(addr); 443 *buf++ = x; 444 } while (--count); 445 } 446 } 447 #endif 448 449 #ifndef readsw 450 #define readsw readsw 451 static inline void readsw(const volatile void __iomem *addr, void *buffer, 452 unsigned int count) 453 { 454 if (count) { 455 u16 *buf = buffer; 456 457 do { 458 u16 x = __raw_readw(addr); 459 *buf++ = x; 460 } while (--count); 461 } 462 } 463 #endif 464 465 #ifndef readsl 466 #define readsl readsl 467 static inline void readsl(const volatile void __iomem *addr, void *buffer, 468 unsigned int count) 469 { 470 if (count) { 471 u32 *buf = buffer; 472 473 do { 474 u32 x = __raw_readl(addr); 475 *buf++ = x; 476 } while (--count); 477 } 478 } 479 #endif 480 481 #ifdef CONFIG_64BIT 482 #ifndef readsq 483 #define readsq readsq 484 static inline void readsq(const volatile void __iomem *addr, void *buffer, 485 unsigned int count) 486 { 487 if (count) { 488 u64 *buf = buffer; 489 490 do { 491 u64 x = __raw_readq(addr); 492 *buf++ = x; 493 } while (--count); 494 } 495 } 496 #endif 497 #endif /* CONFIG_64BIT */ 498 499 #ifndef writesb 500 #define writesb writesb 501 static inline void writesb(volatile void __iomem *addr, const void *buffer, 502 unsigned int count) 503 { 504 if (count) { 505 const u8 *buf = buffer; 506 507 do { 508 __raw_writeb(*buf++, addr); 509 } while (--count); 510 } 511 } 512 #endif 513 514 #ifndef writesw 515 #define writesw writesw 516 static inline void writesw(volatile void __iomem *addr, const void *buffer, 517 unsigned int count) 518 { 519 if (count) { 520 const u16 *buf = buffer; 521 522 do { 523 __raw_writew(*buf++, addr); 524 } while (--count); 525 } 526 } 527 #endif 528 529 #ifndef writesl 530 #define writesl writesl 531 static inline void writesl(volatile void __iomem *addr, const void *buffer, 532 unsigned int count) 533 { 534 if (count) { 535 const u32 *buf = buffer; 536 537 do { 538 __raw_writel(*buf++, addr); 539 } while (--count); 540 } 541 } 542 #endif 543 544 #ifdef CONFIG_64BIT 545 #ifndef writesq 546 #define writesq writesq 547 static inline void writesq(volatile void __iomem *addr, const void *buffer, 548 unsigned int count) 549 { 550 if (count) { 551 const u64 *buf = buffer; 552 553 do { 554 __raw_writeq(*buf++, addr); 555 } while (--count); 556 } 557 } 558 #endif 559 #endif /* CONFIG_64BIT */ 560 561 #ifndef PCI_IOBASE 562 #define PCI_IOBASE ((void __iomem *)0) 563 #endif 564 565 #ifndef IO_SPACE_LIMIT 566 #define IO_SPACE_LIMIT 0xffff 567 #endif 568 569 /* 570 * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be 571 * implemented on hardware that needs an additional delay for I/O accesses to 572 * take effect. 573 */ 574 575 #if !defined(inb) && !defined(_inb) 576 #define _inb _inb 577 #ifdef CONFIG_HAS_IOPORT 578 static inline u8 _inb(unsigned long addr) 579 { 580 u8 val; 581 582 __io_pbr(); 583 val = __raw_readb(PCI_IOBASE + addr); 584 __io_par(val); 585 return val; 586 } 587 #else 588 u8 _inb(unsigned long addr) 589 __compiletime_error("inb()) requires CONFIG_HAS_IOPORT"); 590 #endif 591 #endif 592 593 #if !defined(inw) && !defined(_inw) 594 #define _inw _inw 595 #ifdef CONFIG_HAS_IOPORT 596 static inline u16 _inw(unsigned long addr) 597 { 598 u16 val; 599 600 __io_pbr(); 601 val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr)); 602 __io_par(val); 603 return val; 604 } 605 #else 606 u16 _inw(unsigned long addr) 607 __compiletime_error("inw() requires CONFIG_HAS_IOPORT"); 608 #endif 609 #endif 610 611 #if !defined(inl) && !defined(_inl) 612 #define _inl _inl 613 #ifdef CONFIG_HAS_IOPORT 614 static inline u32 _inl(unsigned long addr) 615 { 616 u32 val; 617 618 __io_pbr(); 619 val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr)); 620 __io_par(val); 621 return val; 622 } 623 #else 624 u32 _inl(unsigned long addr) 625 __compiletime_error("inl() requires CONFIG_HAS_IOPORT"); 626 #endif 627 #endif 628 629 #if !defined(outb) && !defined(_outb) 630 #define _outb _outb 631 #ifdef CONFIG_HAS_IOPORT 632 static inline void _outb(u8 value, unsigned long addr) 633 { 634 __io_pbw(); 635 __raw_writeb(value, PCI_IOBASE + addr); 636 __io_paw(); 637 } 638 #else 639 void _outb(u8 value, unsigned long addr) 640 __compiletime_error("outb() requires CONFIG_HAS_IOPORT"); 641 #endif 642 #endif 643 644 #if !defined(outw) && !defined(_outw) 645 #define _outw _outw 646 #ifdef CONFIG_HAS_IOPORT 647 static inline void _outw(u16 value, unsigned long addr) 648 { 649 __io_pbw(); 650 __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr); 651 __io_paw(); 652 } 653 #else 654 void _outw(u16 value, unsigned long addr) 655 __compiletime_error("outw() requires CONFIG_HAS_IOPORT"); 656 #endif 657 #endif 658 659 #if !defined(outl) && !defined(_outl) 660 #define _outl _outl 661 #ifdef CONFIG_HAS_IOPORT 662 static inline void _outl(u32 value, unsigned long addr) 663 { 664 __io_pbw(); 665 __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr); 666 __io_paw(); 667 } 668 #else 669 void _outl(u32 value, unsigned long addr) 670 __compiletime_error("outl() requires CONFIG_HAS_IOPORT"); 671 #endif 672 #endif 673 674 #include <linux/logic_pio.h> 675 676 #ifndef inb 677 #define inb _inb 678 #endif 679 680 #ifndef inw 681 #define inw _inw 682 #endif 683 684 #ifndef inl 685 #define inl _inl 686 #endif 687 688 #ifndef outb 689 #define outb _outb 690 #endif 691 692 #ifndef outw 693 #define outw _outw 694 #endif 695 696 #ifndef outl 697 #define outl _outl 698 #endif 699 700 #ifndef inb_p 701 #define inb_p inb_p 702 static inline u8 inb_p(unsigned long addr) 703 { 704 return inb(addr); 705 } 706 #endif 707 708 #ifndef inw_p 709 #define inw_p inw_p 710 static inline u16 inw_p(unsigned long addr) 711 { 712 return inw(addr); 713 } 714 #endif 715 716 #ifndef inl_p 717 #define inl_p inl_p 718 static inline u32 inl_p(unsigned long addr) 719 { 720 return inl(addr); 721 } 722 #endif 723 724 #ifndef outb_p 725 #define outb_p outb_p 726 static inline void outb_p(u8 value, unsigned long addr) 727 { 728 outb(value, addr); 729 } 730 #endif 731 732 #ifndef outw_p 733 #define outw_p outw_p 734 static inline void outw_p(u16 value, unsigned long addr) 735 { 736 outw(value, addr); 737 } 738 #endif 739 740 #ifndef outl_p 741 #define outl_p outl_p 742 static inline void outl_p(u32 value, unsigned long addr) 743 { 744 outl(value, addr); 745 } 746 #endif 747 748 /* 749 * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a 750 * single I/O port multiple times. 751 */ 752 753 #ifndef insb 754 #define insb insb 755 #ifdef CONFIG_HAS_IOPORT 756 static inline void insb(unsigned long addr, void *buffer, unsigned int count) 757 { 758 readsb(PCI_IOBASE + addr, buffer, count); 759 } 760 #else 761 void insb(unsigned long addr, void *buffer, unsigned int count) 762 __compiletime_error("insb() requires HAS_IOPORT"); 763 #endif 764 #endif 765 766 #ifndef insw 767 #define insw insw 768 #ifdef CONFIG_HAS_IOPORT 769 static inline void insw(unsigned long addr, void *buffer, unsigned int count) 770 { 771 readsw(PCI_IOBASE + addr, buffer, count); 772 } 773 #else 774 void insw(unsigned long addr, void *buffer, unsigned int count) 775 __compiletime_error("insw() requires HAS_IOPORT"); 776 #endif 777 #endif 778 779 #ifndef insl 780 #define insl insl 781 #ifdef CONFIG_HAS_IOPORT 782 static inline void insl(unsigned long addr, void *buffer, unsigned int count) 783 { 784 readsl(PCI_IOBASE + addr, buffer, count); 785 } 786 #else 787 void insl(unsigned long addr, void *buffer, unsigned int count) 788 __compiletime_error("insl() requires HAS_IOPORT"); 789 #endif 790 #endif 791 792 #ifndef outsb 793 #define outsb outsb 794 #ifdef CONFIG_HAS_IOPORT 795 static inline void outsb(unsigned long addr, const void *buffer, 796 unsigned int count) 797 { 798 writesb(PCI_IOBASE + addr, buffer, count); 799 } 800 #else 801 void outsb(unsigned long addr, const void *buffer, unsigned int count) 802 __compiletime_error("outsb() requires HAS_IOPORT"); 803 #endif 804 #endif 805 806 #ifndef outsw 807 #define outsw outsw 808 #ifdef CONFIG_HAS_IOPORT 809 static inline void outsw(unsigned long addr, const void *buffer, 810 unsigned int count) 811 { 812 writesw(PCI_IOBASE + addr, buffer, count); 813 } 814 #else 815 void outsw(unsigned long addr, const void *buffer, unsigned int count) 816 __compiletime_error("outsw() requires HAS_IOPORT"); 817 #endif 818 #endif 819 820 #ifndef outsl 821 #define outsl outsl 822 #ifdef CONFIG_HAS_IOPORT 823 static inline void outsl(unsigned long addr, const void *buffer, 824 unsigned int count) 825 { 826 writesl(PCI_IOBASE + addr, buffer, count); 827 } 828 #else 829 void outsl(unsigned long addr, const void *buffer, unsigned int count) 830 __compiletime_error("outsl() requires HAS_IOPORT"); 831 #endif 832 #endif 833 834 #ifndef insb_p 835 #define insb_p insb_p 836 static inline void insb_p(unsigned long addr, void *buffer, unsigned int count) 837 { 838 insb(addr, buffer, count); 839 } 840 #endif 841 842 #ifndef insw_p 843 #define insw_p insw_p 844 static inline void insw_p(unsigned long addr, void *buffer, unsigned int count) 845 { 846 insw(addr, buffer, count); 847 } 848 #endif 849 850 #ifndef insl_p 851 #define insl_p insl_p 852 static inline void insl_p(unsigned long addr, void *buffer, unsigned int count) 853 { 854 insl(addr, buffer, count); 855 } 856 #endif 857 858 #ifndef outsb_p 859 #define outsb_p outsb_p 860 static inline void outsb_p(unsigned long addr, const void *buffer, 861 unsigned int count) 862 { 863 outsb(addr, buffer, count); 864 } 865 #endif 866 867 #ifndef outsw_p 868 #define outsw_p outsw_p 869 static inline void outsw_p(unsigned long addr, const void *buffer, 870 unsigned int count) 871 { 872 outsw(addr, buffer, count); 873 } 874 #endif 875 876 #ifndef outsl_p 877 #define outsl_p outsl_p 878 static inline void outsl_p(unsigned long addr, const void *buffer, 879 unsigned int count) 880 { 881 outsl(addr, buffer, count); 882 } 883 #endif 884 885 #ifndef CONFIG_GENERIC_IOMAP 886 #ifndef ioread8 887 #define ioread8 ioread8 888 static inline u8 ioread8(const volatile void __iomem *addr) 889 { 890 return readb(addr); 891 } 892 #endif 893 894 #ifndef ioread16 895 #define ioread16 ioread16 896 static inline u16 ioread16(const volatile void __iomem *addr) 897 { 898 return readw(addr); 899 } 900 #endif 901 902 #ifndef ioread32 903 #define ioread32 ioread32 904 static inline u32 ioread32(const volatile void __iomem *addr) 905 { 906 return readl(addr); 907 } 908 #endif 909 910 #ifdef CONFIG_64BIT 911 #ifndef ioread64 912 #define ioread64 ioread64 913 static inline u64 ioread64(const volatile void __iomem *addr) 914 { 915 return readq(addr); 916 } 917 #endif 918 #endif /* CONFIG_64BIT */ 919 920 #ifndef iowrite8 921 #define iowrite8 iowrite8 922 static inline void iowrite8(u8 value, volatile void __iomem *addr) 923 { 924 writeb(value, addr); 925 } 926 #endif 927 928 #ifndef iowrite16 929 #define iowrite16 iowrite16 930 static inline void iowrite16(u16 value, volatile void __iomem *addr) 931 { 932 writew(value, addr); 933 } 934 #endif 935 936 #ifndef iowrite32 937 #define iowrite32 iowrite32 938 static inline void iowrite32(u32 value, volatile void __iomem *addr) 939 { 940 writel(value, addr); 941 } 942 #endif 943 944 #ifdef CONFIG_64BIT 945 #ifndef iowrite64 946 #define iowrite64 iowrite64 947 static inline void iowrite64(u64 value, volatile void __iomem *addr) 948 { 949 writeq(value, addr); 950 } 951 #endif 952 #endif /* CONFIG_64BIT */ 953 954 #ifndef ioread16be 955 #define ioread16be ioread16be 956 static inline u16 ioread16be(const volatile void __iomem *addr) 957 { 958 return swab16(readw(addr)); 959 } 960 #endif 961 962 #ifndef ioread32be 963 #define ioread32be ioread32be 964 static inline u32 ioread32be(const volatile void __iomem *addr) 965 { 966 return swab32(readl(addr)); 967 } 968 #endif 969 970 #ifdef CONFIG_64BIT 971 #ifndef ioread64be 972 #define ioread64be ioread64be 973 static inline u64 ioread64be(const volatile void __iomem *addr) 974 { 975 return swab64(readq(addr)); 976 } 977 #endif 978 #endif /* CONFIG_64BIT */ 979 980 #ifndef iowrite16be 981 #define iowrite16be iowrite16be 982 static inline void iowrite16be(u16 value, void volatile __iomem *addr) 983 { 984 writew(swab16(value), addr); 985 } 986 #endif 987 988 #ifndef iowrite32be 989 #define iowrite32be iowrite32be 990 static inline void iowrite32be(u32 value, volatile void __iomem *addr) 991 { 992 writel(swab32(value), addr); 993 } 994 #endif 995 996 #ifdef CONFIG_64BIT 997 #ifndef iowrite64be 998 #define iowrite64be iowrite64be 999 static inline void iowrite64be(u64 value, volatile void __iomem *addr) 1000 { 1001 writeq(swab64(value), addr); 1002 } 1003 #endif 1004 #endif /* CONFIG_64BIT */ 1005 1006 #ifndef ioread8_rep 1007 #define ioread8_rep ioread8_rep 1008 static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer, 1009 unsigned int count) 1010 { 1011 readsb(addr, buffer, count); 1012 } 1013 #endif 1014 1015 #ifndef ioread16_rep 1016 #define ioread16_rep ioread16_rep 1017 static inline void ioread16_rep(const volatile void __iomem *addr, 1018 void *buffer, unsigned int count) 1019 { 1020 readsw(addr, buffer, count); 1021 } 1022 #endif 1023 1024 #ifndef ioread32_rep 1025 #define ioread32_rep ioread32_rep 1026 static inline void ioread32_rep(const volatile void __iomem *addr, 1027 void *buffer, unsigned int count) 1028 { 1029 readsl(addr, buffer, count); 1030 } 1031 #endif 1032 1033 #ifdef CONFIG_64BIT 1034 #ifndef ioread64_rep 1035 #define ioread64_rep ioread64_rep 1036 static inline void ioread64_rep(const volatile void __iomem *addr, 1037 void *buffer, unsigned int count) 1038 { 1039 readsq(addr, buffer, count); 1040 } 1041 #endif 1042 #endif /* CONFIG_64BIT */ 1043 1044 #ifndef iowrite8_rep 1045 #define iowrite8_rep iowrite8_rep 1046 static inline void iowrite8_rep(volatile void __iomem *addr, 1047 const void *buffer, 1048 unsigned int count) 1049 { 1050 writesb(addr, buffer, count); 1051 } 1052 #endif 1053 1054 #ifndef iowrite16_rep 1055 #define iowrite16_rep iowrite16_rep 1056 static inline void iowrite16_rep(volatile void __iomem *addr, 1057 const void *buffer, 1058 unsigned int count) 1059 { 1060 writesw(addr, buffer, count); 1061 } 1062 #endif 1063 1064 #ifndef iowrite32_rep 1065 #define iowrite32_rep iowrite32_rep 1066 static inline void iowrite32_rep(volatile void __iomem *addr, 1067 const void *buffer, 1068 unsigned int count) 1069 { 1070 writesl(addr, buffer, count); 1071 } 1072 #endif 1073 1074 #ifdef CONFIG_64BIT 1075 #ifndef iowrite64_rep 1076 #define iowrite64_rep iowrite64_rep 1077 static inline void iowrite64_rep(volatile void __iomem *addr, 1078 const void *buffer, 1079 unsigned int count) 1080 { 1081 writesq(addr, buffer, count); 1082 } 1083 #endif 1084 #endif /* CONFIG_64BIT */ 1085 #endif /* CONFIG_GENERIC_IOMAP */ 1086 1087 #ifdef __KERNEL__ 1088 1089 #define __io_virt(x) ((void __force *)(x)) 1090 1091 /* 1092 * Change virtual addresses to physical addresses and vv. 1093 * These are pretty trivial 1094 */ 1095 #ifndef virt_to_phys 1096 #define virt_to_phys virt_to_phys 1097 static inline unsigned long virt_to_phys(volatile void *address) 1098 { 1099 return __pa((unsigned long)address); 1100 } 1101 #endif 1102 1103 #ifndef phys_to_virt 1104 #define phys_to_virt phys_to_virt 1105 static inline void *phys_to_virt(unsigned long address) 1106 { 1107 return __va(address); 1108 } 1109 #endif 1110 1111 /** 1112 * DOC: ioremap() and ioremap_*() variants 1113 * 1114 * Architectures with an MMU are expected to provide ioremap() and iounmap() 1115 * themselves or rely on GENERIC_IOREMAP. For NOMMU architectures we provide 1116 * a default nop-op implementation that expect that the physical address used 1117 * for MMIO are already marked as uncached, and can be used as kernel virtual 1118 * addresses. 1119 * 1120 * ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes 1121 * for specific drivers if the architecture choses to implement them. If they 1122 * are not implemented we fall back to plain ioremap. Conversely, ioremap_np() 1123 * can provide stricter non-posted write semantics if the architecture 1124 * implements them. 1125 */ 1126 #ifndef CONFIG_MMU 1127 #ifndef ioremap 1128 #define ioremap ioremap 1129 static inline void __iomem *ioremap(phys_addr_t offset, size_t size) 1130 { 1131 return (void __iomem *)(unsigned long)offset; 1132 } 1133 #endif 1134 1135 #ifndef iounmap 1136 #define iounmap iounmap 1137 static inline void iounmap(volatile void __iomem *addr) 1138 { 1139 } 1140 #endif 1141 #elif defined(CONFIG_GENERIC_IOREMAP) 1142 #include <linux/pgtable.h> 1143 1144 void __iomem *generic_ioremap_prot(phys_addr_t phys_addr, size_t size, 1145 pgprot_t prot); 1146 1147 void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, 1148 pgprot_t prot); 1149 void iounmap(volatile void __iomem *addr); 1150 void generic_iounmap(volatile void __iomem *addr); 1151 1152 #ifndef ioremap 1153 #define ioremap ioremap 1154 static inline void __iomem *ioremap(phys_addr_t addr, size_t size) 1155 { 1156 /* _PAGE_IOREMAP needs to be supplied by the architecture */ 1157 return ioremap_prot(addr, size, __pgprot(_PAGE_IOREMAP)); 1158 } 1159 #endif 1160 #endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */ 1161 1162 #ifndef ioremap_wc 1163 #define ioremap_wc ioremap 1164 #endif 1165 1166 #ifndef ioremap_wt 1167 #define ioremap_wt ioremap 1168 #endif 1169 1170 /* 1171 * ioremap_uc is special in that we do require an explicit architecture 1172 * implementation. In general you do not want to use this function in a 1173 * driver and use plain ioremap, which is uncached by default. Similarly 1174 * architectures should not implement it unless they have a very good 1175 * reason. 1176 */ 1177 #ifndef ioremap_uc 1178 #define ioremap_uc ioremap_uc 1179 static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size) 1180 { 1181 return NULL; 1182 } 1183 #endif 1184 1185 /* 1186 * ioremap_np needs an explicit architecture implementation, as it 1187 * requests stronger semantics than regular ioremap(). Portable drivers 1188 * should instead use one of the higher-level abstractions, like 1189 * devm_ioremap_resource(), to choose the correct variant for any given 1190 * device and bus. Portable drivers with a good reason to want non-posted 1191 * write semantics should always provide an ioremap() fallback in case 1192 * ioremap_np() is not available. 1193 */ 1194 #ifndef ioremap_np 1195 #define ioremap_np ioremap_np 1196 static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size) 1197 { 1198 return NULL; 1199 } 1200 #endif 1201 1202 #ifdef CONFIG_HAS_IOPORT_MAP 1203 #ifndef CONFIG_GENERIC_IOMAP 1204 #ifndef ioport_map 1205 #define ioport_map ioport_map 1206 static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) 1207 { 1208 port &= IO_SPACE_LIMIT; 1209 return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port; 1210 } 1211 #define ARCH_HAS_GENERIC_IOPORT_MAP 1212 #endif 1213 1214 #ifndef ioport_unmap 1215 #define ioport_unmap ioport_unmap 1216 static inline void ioport_unmap(void __iomem *p) 1217 { 1218 } 1219 #endif 1220 #else /* CONFIG_GENERIC_IOMAP */ 1221 extern void __iomem *ioport_map(unsigned long port, unsigned int nr); 1222 extern void ioport_unmap(void __iomem *p); 1223 #endif /* CONFIG_GENERIC_IOMAP */ 1224 #endif /* CONFIG_HAS_IOPORT_MAP */ 1225 1226 #ifndef CONFIG_GENERIC_IOMAP 1227 #ifndef pci_iounmap 1228 #define ARCH_WANTS_GENERIC_PCI_IOUNMAP 1229 #endif 1230 #endif 1231 1232 #ifndef xlate_dev_mem_ptr 1233 #define xlate_dev_mem_ptr xlate_dev_mem_ptr 1234 static inline void *xlate_dev_mem_ptr(phys_addr_t addr) 1235 { 1236 return __va(addr); 1237 } 1238 #endif 1239 1240 #ifndef unxlate_dev_mem_ptr 1241 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr 1242 static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) 1243 { 1244 } 1245 #endif 1246 1247 #ifndef memset_io 1248 /** 1249 * memset_io - Set a range of I/O memory to a constant value 1250 * @addr: The beginning of the I/O-memory range to set 1251 * @val: The value to set the memory to 1252 * @count: The number of bytes to set 1253 * 1254 * Set a range of I/O memory to a given value. 1255 */ 1256 void memset_io(volatile void __iomem *addr, int val, size_t count); 1257 #endif 1258 1259 #ifndef memcpy_fromio 1260 /** 1261 * memcpy_fromio - Copy a block of data from I/O memory 1262 * @dst: The (RAM) destination for the copy 1263 * @src: The (I/O memory) source for the data 1264 * @count: The number of bytes to copy 1265 * 1266 * Copy a block of data from I/O memory. 1267 */ 1268 void memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count); 1269 #endif 1270 1271 #ifndef memcpy_toio 1272 /** 1273 * memcpy_toio - Copy a block of data into I/O memory 1274 * @dst: The (I/O memory) destination for the copy 1275 * @src: The (RAM) source for the data 1276 * @count: The number of bytes to copy 1277 * 1278 * Copy a block of data to I/O memory. 1279 */ 1280 void memcpy_toio(volatile void __iomem *dst, const void *src, size_t count); 1281 #endif 1282 1283 extern int devmem_is_allowed(unsigned long pfn); 1284 1285 #endif /* __KERNEL__ */ 1286 1287 #endif /* __ASM_GENERIC_IO_H */ 1288