1 #ifndef _ASM_X86_IO_H 2 #define _ASM_X86_IO_H 3 4 #define ARCH_HAS_IOREMAP_WC 5 6 #include <linux/compiler.h> 7 #include <asm-generic/int-ll64.h> 8 9 #define build_mmio_read(name, size, type, reg, barrier) \ 10 static inline type name(const volatile void __iomem *addr) \ 11 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \ 12 :"m" (*(volatile type __force *)addr) barrier); return ret; } 13 14 #define build_mmio_write(name, size, type, reg, barrier) \ 15 static inline void name(type val, volatile void __iomem *addr) \ 16 { asm volatile("mov" size " %0,%1": :reg (val), \ 17 "m" (*(volatile type __force *)addr) barrier); } 18 19 build_mmio_read(readb, "b", unsigned char, "=q", :"memory") 20 build_mmio_read(readw, "w", unsigned short, "=r", :"memory") 21 build_mmio_read(readl, "l", unsigned int, "=r", :"memory") 22 23 build_mmio_read(__readb, "b", unsigned char, "=q", ) 24 build_mmio_read(__readw, "w", unsigned short, "=r", ) 25 build_mmio_read(__readl, "l", unsigned int, "=r", ) 26 27 build_mmio_write(writeb, "b", unsigned char, "q", :"memory") 28 build_mmio_write(writew, "w", unsigned short, "r", :"memory") 29 build_mmio_write(writel, "l", unsigned int, "r", :"memory") 30 31 build_mmio_write(__writeb, "b", unsigned char, "q", ) 32 build_mmio_write(__writew, "w", unsigned short, "r", ) 33 build_mmio_write(__writel, "l", unsigned int, "r", ) 34 35 #define readb_relaxed(a) __readb(a) 36 #define readw_relaxed(a) __readw(a) 37 #define readl_relaxed(a) __readl(a) 38 #define __raw_readb __readb 39 #define __raw_readw __readw 40 #define __raw_readl __readl 41 42 #define __raw_writeb __writeb 43 #define __raw_writew __writew 44 #define __raw_writel __writel 45 46 #define mmiowb() barrier() 47 48 #ifdef CONFIG_X86_64 49 50 build_mmio_read(readq, "q", unsigned long, "=r", :"memory") 51 build_mmio_write(writeq, "q", unsigned long, "r", :"memory") 52 53 #else 54 55 static inline __u64 readq(const volatile void __iomem *addr) 56 { 57 const volatile u32 __iomem *p = addr; 58 u32 low, high; 59 60 low = readl(p); 61 high = readl(p + 1); 62 63 return low + ((u64)high << 32); 64 } 65 66 static inline void writeq(__u64 val, volatile void __iomem *addr) 67 { 68 writel(val, addr); 69 writel(val >> 32, addr+4); 70 } 71 72 #endif 73 74 #define readq_relaxed(a) readq(a) 75 76 #define __raw_readq(a) readq(a) 77 #define __raw_writeq(val, addr) writeq(val, addr) 78 79 /* Let people know that we have them */ 80 #define readq readq 81 #define writeq writeq 82 83 #ifdef CONFIG_X86_32 84 # include "io_32.h" 85 #else 86 # include "io_64.h" 87 #endif 88 89 extern void *xlate_dev_mem_ptr(unsigned long phys); 90 extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr); 91 92 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, 93 unsigned long prot_val); 94 extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); 95 96 /* 97 * early_ioremap() and early_iounmap() are for temporary early boot-time 98 * mappings, before the real ioremap() is functional. 99 * A boot-time mapping is currently limited to at most 16 pages. 100 */ 101 extern void early_ioremap_init(void); 102 extern void early_ioremap_reset(void); 103 extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); 104 extern void __iomem *early_memremap(unsigned long offset, unsigned long size); 105 extern void early_iounmap(void __iomem *addr, unsigned long size); 106 extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); 107 108 109 #endif /* _ASM_X86_IO_H */ 110