1*1b8adde7SWilliam Kucharski #ifndef IO_H 2*1b8adde7SWilliam Kucharski #define IO_H 3*1b8adde7SWilliam Kucharski 4*1b8adde7SWilliam Kucharski 5*1b8adde7SWilliam Kucharski /* Amount of relocation etherboot is experiencing */ 6*1b8adde7SWilliam Kucharski extern unsigned long virt_offset; 7*1b8adde7SWilliam Kucharski 8*1b8adde7SWilliam Kucharski /* Don't require identity mapped physical memory, 9*1b8adde7SWilliam Kucharski * osloader.c is the only valid user at the moment. 10*1b8adde7SWilliam Kucharski */ 11*1b8adde7SWilliam Kucharski unsigned long virt_to_phys(volatile const void *virt_addr); 12*1b8adde7SWilliam Kucharski void *phys_to_virt(unsigned long phys_addr); 13*1b8adde7SWilliam Kucharski 14*1b8adde7SWilliam Kucharski /* virt_to_bus converts an addresss inside of etherboot [_start, _end] 15*1b8adde7SWilliam Kucharski * into a memory access cards can use. 16*1b8adde7SWilliam Kucharski */ 17*1b8adde7SWilliam Kucharski #define virt_to_bus virt_to_phys 18*1b8adde7SWilliam Kucharski 19*1b8adde7SWilliam Kucharski 20*1b8adde7SWilliam Kucharski /* bus_to_virt reverses virt_to_bus, the address must be output 21*1b8adde7SWilliam Kucharski * from virt_to_bus to be valid. This function does not work on 22*1b8adde7SWilliam Kucharski * all bus addresses. 23*1b8adde7SWilliam Kucharski */ 24*1b8adde7SWilliam Kucharski #define bus_to_virt phys_to_virt 25*1b8adde7SWilliam Kucharski 26*1b8adde7SWilliam Kucharski /* ioremap converts a random 32bit bus address into something 27*1b8adde7SWilliam Kucharski * etherboot can access. 28*1b8adde7SWilliam Kucharski */ 29*1b8adde7SWilliam Kucharski static inline void *ioremap(unsigned long bus_addr, unsigned long length __unused) 30*1b8adde7SWilliam Kucharski { 31*1b8adde7SWilliam Kucharski return bus_to_virt(bus_addr); 32*1b8adde7SWilliam Kucharski } 33*1b8adde7SWilliam Kucharski 34*1b8adde7SWilliam Kucharski /* iounmap cleans up anything ioremap had to setup */ 35*1b8adde7SWilliam Kucharski static inline void iounmap(void *virt_addr __unused) 36*1b8adde7SWilliam Kucharski { 37*1b8adde7SWilliam Kucharski return; 38*1b8adde7SWilliam Kucharski } 39*1b8adde7SWilliam Kucharski 40*1b8adde7SWilliam Kucharski /* 41*1b8adde7SWilliam Kucharski * This file contains the definitions for the x86 IO instructions 42*1b8adde7SWilliam Kucharski * inb/inw/inl/outb/outw/outl and the "string versions" of the same 43*1b8adde7SWilliam Kucharski * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" 44*1b8adde7SWilliam Kucharski * versions of the single-IO instructions (inb_p/inw_p/..). 45*1b8adde7SWilliam Kucharski * 46*1b8adde7SWilliam Kucharski * This file is not meant to be obfuscating: it's just complicated 47*1b8adde7SWilliam Kucharski * to (a) handle it all in a way that makes gcc able to optimize it 48*1b8adde7SWilliam Kucharski * as well as possible and (b) trying to avoid writing the same thing 49*1b8adde7SWilliam Kucharski * over and over again with slight variations and possibly making a 50*1b8adde7SWilliam Kucharski * mistake somewhere. 51*1b8adde7SWilliam Kucharski */ 52*1b8adde7SWilliam Kucharski 53*1b8adde7SWilliam Kucharski /* 54*1b8adde7SWilliam Kucharski * Thanks to James van Artsdalen for a better timing-fix than 55*1b8adde7SWilliam Kucharski * the two short jumps: using outb's to a nonexistent port seems 56*1b8adde7SWilliam Kucharski * to guarantee better timings even on fast machines. 57*1b8adde7SWilliam Kucharski * 58*1b8adde7SWilliam Kucharski * On the other hand, I'd like to be sure of a non-existent port: 59*1b8adde7SWilliam Kucharski * I feel a bit unsafe about using 0x80 (should be safe, though) 60*1b8adde7SWilliam Kucharski * 61*1b8adde7SWilliam Kucharski * Linus 62*1b8adde7SWilliam Kucharski */ 63*1b8adde7SWilliam Kucharski 64*1b8adde7SWilliam Kucharski #ifdef SLOW_IO_BY_JUMPING 65*1b8adde7SWilliam Kucharski #define __SLOW_DOWN_IO __asm__ __volatile__("jmp 1f\n1:\tjmp 1f\n1:") 66*1b8adde7SWilliam Kucharski #else 67*1b8adde7SWilliam Kucharski #define __SLOW_DOWN_IO __asm__ __volatile__("outb %al,$0x80") 68*1b8adde7SWilliam Kucharski #endif 69*1b8adde7SWilliam Kucharski 70*1b8adde7SWilliam Kucharski #ifdef REALLY_SLOW_IO 71*1b8adde7SWilliam Kucharski #define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; } 72*1b8adde7SWilliam Kucharski #else 73*1b8adde7SWilliam Kucharski #define SLOW_DOWN_IO __SLOW_DOWN_IO 74*1b8adde7SWilliam Kucharski #endif 75*1b8adde7SWilliam Kucharski 76*1b8adde7SWilliam Kucharski /* 77*1b8adde7SWilliam Kucharski * readX/writeX() are used to access memory mapped devices. On some 78*1b8adde7SWilliam Kucharski * architectures the memory mapped IO stuff needs to be accessed 79*1b8adde7SWilliam Kucharski * differently. On the x86 architecture, we just read/write the 80*1b8adde7SWilliam Kucharski * memory location directly. 81*1b8adde7SWilliam Kucharski */ 82*1b8adde7SWilliam Kucharski #define readb(addr) (*(volatile unsigned char *) (addr)) 83*1b8adde7SWilliam Kucharski #define readw(addr) (*(volatile unsigned short *) (addr)) 84*1b8adde7SWilliam Kucharski #define readl(addr) (*(volatile unsigned int *) (addr)) 85*1b8adde7SWilliam Kucharski 86*1b8adde7SWilliam Kucharski #define writeb(b,addr) ((*(volatile unsigned char *) (addr)) = (b)) 87*1b8adde7SWilliam Kucharski #define writew(b,addr) ((*(volatile unsigned short *) (addr)) = (b)) 88*1b8adde7SWilliam Kucharski #define writel(b,addr) ((*(volatile unsigned int *) (addr)) = (b)) 89*1b8adde7SWilliam Kucharski 90*1b8adde7SWilliam Kucharski #define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c)) 91*1b8adde7SWilliam Kucharski #define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c)) 92*1b8adde7SWilliam Kucharski 93*1b8adde7SWilliam Kucharski /* 94*1b8adde7SWilliam Kucharski * Force strict CPU ordering. 95*1b8adde7SWilliam Kucharski * And yes, this is required on UP too when we're talking 96*1b8adde7SWilliam Kucharski * to devices. 97*1b8adde7SWilliam Kucharski * 98*1b8adde7SWilliam Kucharski * For now, "wmb()" doesn't actually do anything, as all 99*1b8adde7SWilliam Kucharski * Intel CPU's follow what Intel calls a *Processor Order*, 100*1b8adde7SWilliam Kucharski * in which all writes are seen in the program order even 101*1b8adde7SWilliam Kucharski * outside the CPU. 102*1b8adde7SWilliam Kucharski * 103*1b8adde7SWilliam Kucharski * I expect future Intel CPU's to have a weaker ordering, 104*1b8adde7SWilliam Kucharski * but I'd also expect them to finally get their act together 105*1b8adde7SWilliam Kucharski * and add some real memory barriers if so. 106*1b8adde7SWilliam Kucharski * 107*1b8adde7SWilliam Kucharski * Some non intel clones support out of order store. wmb() ceases to be a 108*1b8adde7SWilliam Kucharski * nop for these. 109*1b8adde7SWilliam Kucharski */ 110*1b8adde7SWilliam Kucharski 111*1b8adde7SWilliam Kucharski #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory") 112*1b8adde7SWilliam Kucharski #define rmb() mb() 113*1b8adde7SWilliam Kucharski #define wmb() mb(); 114*1b8adde7SWilliam Kucharski 115*1b8adde7SWilliam Kucharski 116*1b8adde7SWilliam Kucharski /* 117*1b8adde7SWilliam Kucharski * Talk about misusing macros.. 118*1b8adde7SWilliam Kucharski */ 119*1b8adde7SWilliam Kucharski 120*1b8adde7SWilliam Kucharski #define __OUT1(s,x) \ 121*1b8adde7SWilliam Kucharski extern void __out##s(unsigned x value, unsigned short port); \ 122*1b8adde7SWilliam Kucharski extern inline void __out##s(unsigned x value, unsigned short port) { 123*1b8adde7SWilliam Kucharski 124*1b8adde7SWilliam Kucharski #define __OUT2(s,s1,s2) \ 125*1b8adde7SWilliam Kucharski __asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1" 126*1b8adde7SWilliam Kucharski 127*1b8adde7SWilliam Kucharski #define __OUT(s,s1,x) \ 128*1b8adde7SWilliam Kucharski __OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "d" (port)); } \ 129*1b8adde7SWilliam Kucharski __OUT1(s##c,x) __OUT2(s,s1,"") : : "a" (value), "id" (port)); } \ 130*1b8adde7SWilliam Kucharski __OUT1(s##_p,x) __OUT2(s,s1,"w") : : "a" (value), "d" (port)); SLOW_DOWN_IO; } \ 131*1b8adde7SWilliam Kucharski __OUT1(s##c_p,x) __OUT2(s,s1,"") : : "a" (value), "id" (port)); SLOW_DOWN_IO; } 132*1b8adde7SWilliam Kucharski 133*1b8adde7SWilliam Kucharski #define __IN1(s,x) \ 134*1b8adde7SWilliam Kucharski extern unsigned x __in##s(unsigned short port); \ 135*1b8adde7SWilliam Kucharski extern inline unsigned x __in##s(unsigned short port) { unsigned x _v; 136*1b8adde7SWilliam Kucharski 137*1b8adde7SWilliam Kucharski #define __IN2(s,s1,s2) \ 138*1b8adde7SWilliam Kucharski __asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0" 139*1b8adde7SWilliam Kucharski 140*1b8adde7SWilliam Kucharski #define __IN(s,s1,x,i...) \ 141*1b8adde7SWilliam Kucharski __IN1(s,x) __IN2(s,s1,"w") : "=a" (_v) : "d" (port) ,##i ); return _v; } \ 142*1b8adde7SWilliam Kucharski __IN1(s##c,x) __IN2(s,s1,"") : "=a" (_v) : "id" (port) ,##i ); return _v; } \ 143*1b8adde7SWilliam Kucharski __IN1(s##_p,x) __IN2(s,s1,"w") : "=a" (_v) : "d" (port) ,##i ); SLOW_DOWN_IO; return _v; } \ 144*1b8adde7SWilliam Kucharski __IN1(s##c_p,x) __IN2(s,s1,"") : "=a" (_v) : "id" (port) ,##i ); SLOW_DOWN_IO; return _v; } 145*1b8adde7SWilliam Kucharski 146*1b8adde7SWilliam Kucharski #define __INS(s) \ 147*1b8adde7SWilliam Kucharski extern void ins##s(unsigned short port, void * addr, unsigned long count); \ 148*1b8adde7SWilliam Kucharski extern inline void ins##s(unsigned short port, void * addr, unsigned long count) \ 149*1b8adde7SWilliam Kucharski { __asm__ __volatile__ ("cld ; rep ; ins" #s \ 150*1b8adde7SWilliam Kucharski : "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } 151*1b8adde7SWilliam Kucharski 152*1b8adde7SWilliam Kucharski #define __OUTS(s) \ 153*1b8adde7SWilliam Kucharski extern void outs##s(unsigned short port, const void * addr, unsigned long count); \ 154*1b8adde7SWilliam Kucharski extern inline void outs##s(unsigned short port, const void * addr, unsigned long count) \ 155*1b8adde7SWilliam Kucharski { __asm__ __volatile__ ("cld ; rep ; outs" #s \ 156*1b8adde7SWilliam Kucharski : "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); } 157*1b8adde7SWilliam Kucharski 158*1b8adde7SWilliam Kucharski __IN(b,"", char) 159*1b8adde7SWilliam Kucharski __IN(w,"",short) 160*1b8adde7SWilliam Kucharski __IN(l,"", long) 161*1b8adde7SWilliam Kucharski 162*1b8adde7SWilliam Kucharski __OUT(b,"b",char) 163*1b8adde7SWilliam Kucharski __OUT(w,"w",short) 164*1b8adde7SWilliam Kucharski __OUT(l,,int) 165*1b8adde7SWilliam Kucharski 166*1b8adde7SWilliam Kucharski __INS(b) 167*1b8adde7SWilliam Kucharski __INS(w) 168*1b8adde7SWilliam Kucharski __INS(l) 169*1b8adde7SWilliam Kucharski 170*1b8adde7SWilliam Kucharski __OUTS(b) 171*1b8adde7SWilliam Kucharski __OUTS(w) 172*1b8adde7SWilliam Kucharski __OUTS(l) 173*1b8adde7SWilliam Kucharski 174*1b8adde7SWilliam Kucharski /* 175*1b8adde7SWilliam Kucharski * Note that due to the way __builtin_constant_p() works, you 176*1b8adde7SWilliam Kucharski * - can't use it inside a inline function (it will never be true) 177*1b8adde7SWilliam Kucharski * - you don't have to worry about side effects within the __builtin.. 178*1b8adde7SWilliam Kucharski */ 179*1b8adde7SWilliam Kucharski #define outb(val,port) \ 180*1b8adde7SWilliam Kucharski ((__builtin_constant_p((port)) && (port) < 256) ? \ 181*1b8adde7SWilliam Kucharski __outbc((val),(port)) : \ 182*1b8adde7SWilliam Kucharski __outb((val),(port))) 183*1b8adde7SWilliam Kucharski 184*1b8adde7SWilliam Kucharski #define inb(port) \ 185*1b8adde7SWilliam Kucharski ((__builtin_constant_p((port)) && (port) < 256) ? \ 186*1b8adde7SWilliam Kucharski __inbc(port) : \ 187*1b8adde7SWilliam Kucharski __inb(port)) 188*1b8adde7SWilliam Kucharski 189*1b8adde7SWilliam Kucharski #define outb_p(val,port) \ 190*1b8adde7SWilliam Kucharski ((__builtin_constant_p((port)) && (port) < 256) ? \ 191*1b8adde7SWilliam Kucharski __outbc_p((val),(port)) : \ 192*1b8adde7SWilliam Kucharski __outb_p((val),(port))) 193*1b8adde7SWilliam Kucharski 194*1b8adde7SWilliam Kucharski #define inb_p(port) \ 195*1b8adde7SWilliam Kucharski ((__builtin_constant_p((port)) && (port) < 256) ? \ 196*1b8adde7SWilliam Kucharski __inbc_p(port) : \ 197*1b8adde7SWilliam Kucharski __inb_p(port)) 198*1b8adde7SWilliam Kucharski 199*1b8adde7SWilliam Kucharski #define outw(val,port) \ 200*1b8adde7SWilliam Kucharski ((__builtin_constant_p((port)) && (port) < 256) ? \ 201*1b8adde7SWilliam Kucharski __outwc((val),(port)) : \ 202*1b8adde7SWilliam Kucharski __outw((val),(port))) 203*1b8adde7SWilliam Kucharski 204*1b8adde7SWilliam Kucharski #define inw(port) \ 205*1b8adde7SWilliam Kucharski ((__builtin_constant_p((port)) && (port) < 256) ? \ 206*1b8adde7SWilliam Kucharski __inwc(port) : \ 207*1b8adde7SWilliam Kucharski __inw(port)) 208*1b8adde7SWilliam Kucharski 209*1b8adde7SWilliam Kucharski #define outw_p(val,port) \ 210*1b8adde7SWilliam Kucharski ((__builtin_constant_p((port)) && (port) < 256) ? \ 211*1b8adde7SWilliam Kucharski __outwc_p((val),(port)) : \ 212*1b8adde7SWilliam Kucharski __outw_p((val),(port))) 213*1b8adde7SWilliam Kucharski 214*1b8adde7SWilliam Kucharski #define inw_p(port) \ 215*1b8adde7SWilliam Kucharski ((__builtin_constant_p((port)) && (port) < 256) ? \ 216*1b8adde7SWilliam Kucharski __inwc_p(port) : \ 217*1b8adde7SWilliam Kucharski __inw_p(port)) 218*1b8adde7SWilliam Kucharski 219*1b8adde7SWilliam Kucharski #define outl(val,port) \ 220*1b8adde7SWilliam Kucharski ((__builtin_constant_p((port)) && (port) < 256) ? \ 221*1b8adde7SWilliam Kucharski __outlc((val),(port)) : \ 222*1b8adde7SWilliam Kucharski __outl((val),(port))) 223*1b8adde7SWilliam Kucharski 224*1b8adde7SWilliam Kucharski #define inl(port) \ 225*1b8adde7SWilliam Kucharski ((__builtin_constant_p((port)) && (port) < 256) ? \ 226*1b8adde7SWilliam Kucharski __inlc(port) : \ 227*1b8adde7SWilliam Kucharski __inl(port)) 228*1b8adde7SWilliam Kucharski 229*1b8adde7SWilliam Kucharski #define outl_p(val,port) \ 230*1b8adde7SWilliam Kucharski ((__builtin_constant_p((port)) && (port) < 256) ? \ 231*1b8adde7SWilliam Kucharski __outlc_p((val),(port)) : \ 232*1b8adde7SWilliam Kucharski __outl_p((val),(port))) 233*1b8adde7SWilliam Kucharski 234*1b8adde7SWilliam Kucharski #define inl_p(port) \ 235*1b8adde7SWilliam Kucharski ((__builtin_constant_p((port)) && (port) < 256) ? \ 236*1b8adde7SWilliam Kucharski __inlc_p(port) : \ 237*1b8adde7SWilliam Kucharski __inl_p(port)) 238*1b8adde7SWilliam Kucharski 239*1b8adde7SWilliam Kucharski #endif /* ETHERBOOT_IO_H */ 240