1 #ifndef IO_H
2 #define IO_H
3
4
5 /* Amount of relocation etherboot is experiencing */
6 extern unsigned long virt_offset;
7
8 /* Don't require identity mapped physical memory,
9 * osloader.c is the only valid user at the moment.
10 */
11 unsigned long virt_to_phys(volatile const void *virt_addr);
12 void *phys_to_virt(unsigned long phys_addr);
13
14 /* virt_to_bus converts an addresss inside of etherboot [_start, _end]
15 * into a memory access cards can use.
16 */
17 #define virt_to_bus virt_to_phys
18
19
20 /* bus_to_virt reverses virt_to_bus, the address must be output
21 * from virt_to_bus to be valid. This function does not work on
22 * all bus addresses.
23 */
24 #define bus_to_virt phys_to_virt
25
26 /* ioremap converts a random 32bit bus address into something
27 * etherboot can access.
28 */
ioremap(unsigned long bus_addr,unsigned long length __unused)29 static inline void *ioremap(unsigned long bus_addr, unsigned long length __unused)
30 {
31 return bus_to_virt(bus_addr);
32 }
33
34 /* iounmap cleans up anything ioremap had to setup */
iounmap(void * virt_addr __unused)35 static inline void iounmap(void *virt_addr __unused)
36 {
37 return;
38 }
39
40 /*
41 * This file contains the definitions for the x86 IO instructions
42 * inb/inw/inl/outb/outw/outl and the "string versions" of the same
43 * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
44 * versions of the single-IO instructions (inb_p/inw_p/..).
45 *
46 * This file is not meant to be obfuscating: it's just complicated
47 * to (a) handle it all in a way that makes gcc able to optimize it
48 * as well as possible and (b) trying to avoid writing the same thing
49 * over and over again with slight variations and possibly making a
50 * mistake somewhere.
51 */
52
53 /*
54 * Thanks to James van Artsdalen for a better timing-fix than
55 * the two short jumps: using outb's to a nonexistent port seems
56 * to guarantee better timings even on fast machines.
57 *
58 * On the other hand, I'd like to be sure of a non-existent port:
59 * I feel a bit unsafe about using 0x80 (should be safe, though)
60 *
61 * Linus
62 */
63
64 #ifdef SLOW_IO_BY_JUMPING
65 #define __SLOW_DOWN_IO __asm__ __volatile__("jmp 1f\n1:\tjmp 1f\n1:")
66 #else
67 #define __SLOW_DOWN_IO __asm__ __volatile__("outb %al,$0x80")
68 #endif
69
70 #ifdef REALLY_SLOW_IO
71 #define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; }
72 #else
73 #define SLOW_DOWN_IO __SLOW_DOWN_IO
74 #endif
75
76 /*
77 * readX/writeX() are used to access memory mapped devices. On some
78 * architectures the memory mapped IO stuff needs to be accessed
79 * differently. On the x86 architecture, we just read/write the
80 * memory location directly.
81 */
82 #define readb(addr) (*(volatile unsigned char *) (addr))
83 #define readw(addr) (*(volatile unsigned short *) (addr))
84 #define readl(addr) (*(volatile unsigned int *) (addr))
85
86 #define writeb(b,addr) ((*(volatile unsigned char *) (addr)) = (b))
87 #define writew(b,addr) ((*(volatile unsigned short *) (addr)) = (b))
88 #define writel(b,addr) ((*(volatile unsigned int *) (addr)) = (b))
89
90 #define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
91 #define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
92
93 /*
94 * Force strict CPU ordering.
95 * And yes, this is required on UP too when we're talking
96 * to devices.
97 *
98 * For now, "wmb()" doesn't actually do anything, as all
99 * Intel CPU's follow what Intel calls a *Processor Order*,
100 * in which all writes are seen in the program order even
101 * outside the CPU.
102 *
103 * I expect future Intel CPU's to have a weaker ordering,
104 * but I'd also expect them to finally get their act together
105 * and add some real memory barriers if so.
106 *
107 * Some non intel clones support out of order store. wmb() ceases to be a
108 * nop for these.
109 */
110
111 #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
112 #define rmb() mb()
113 #define wmb() mb();
114
115
116 /*
117 * Talk about misusing macros..
118 */
119
120 #define __OUT1(s,x) \
121 extern void __out##s(unsigned x value, unsigned short port); \
122 extern inline void __out##s(unsigned x value, unsigned short port) {
123
124 #define __OUT2(s,s1,s2) \
125 __asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
126
127 #define __OUT(s,s1,x) \
128 __OUT1(s,x) __OUT2(s,s1,"w") : : "a" (value), "d" (port)); } \
129 __OUT1(s##c,x) __OUT2(s,s1,"") : : "a" (value), "id" (port)); } \
130 __OUT1(s##_p,x) __OUT2(s,s1,"w") : : "a" (value), "d" (port)); SLOW_DOWN_IO; } \
131 __OUT1(s##c_p,x) __OUT2(s,s1,"") : : "a" (value), "id" (port)); SLOW_DOWN_IO; }
132
133 #define __IN1(s,x) \
134 extern unsigned x __in##s(unsigned short port); \
135 extern inline unsigned x __in##s(unsigned short port) { unsigned x _v;
136
137 #define __IN2(s,s1,s2) \
138 __asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
139
140 #define __IN(s,s1,x,i...) \
141 __IN1(s,x) __IN2(s,s1,"w") : "=a" (_v) : "d" (port) ,##i ); return _v; } \
142 __IN1(s##c,x) __IN2(s,s1,"") : "=a" (_v) : "id" (port) ,##i ); return _v; } \
143 __IN1(s##_p,x) __IN2(s,s1,"w") : "=a" (_v) : "d" (port) ,##i ); SLOW_DOWN_IO; return _v; } \
144 __IN1(s##c_p,x) __IN2(s,s1,"") : "=a" (_v) : "id" (port) ,##i ); SLOW_DOWN_IO; return _v; }
145
146 #define __INS(s) \
147 extern void ins##s(unsigned short port, void * addr, unsigned long count); \
148 extern inline void ins##s(unsigned short port, void * addr, unsigned long count) \
149 { __asm__ __volatile__ ("cld ; rep ; ins" #s \
150 : "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
151
152 #define __OUTS(s) \
153 extern void outs##s(unsigned short port, const void * addr, unsigned long count); \
154 extern inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
155 { __asm__ __volatile__ ("cld ; rep ; outs" #s \
156 : "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
157
158 __IN(b,"", char)
159 __IN(w,"",short)
160 __IN(l,"", long)
161
162 __OUT(b,"b",char)
163 __OUT(w,"w",short)
164 __OUT(l,,int)
165
166 __INS(b)
167 __INS(w)
168 __INS(l)
169
170 __OUTS(b)
171 __OUTS(w)
172 __OUTS(l)
173
174 /*
175 * Note that due to the way __builtin_constant_p() works, you
176 * - can't use it inside a inline function (it will never be true)
177 * - you don't have to worry about side effects within the __builtin..
178 */
179 #define outb(val,port) \
180 ((__builtin_constant_p((port)) && (port) < 256) ? \
181 __outbc((val),(port)) : \
182 __outb((val),(port)))
183
184 #define inb(port) \
185 ((__builtin_constant_p((port)) && (port) < 256) ? \
186 __inbc(port) : \
187 __inb(port))
188
189 #define outb_p(val,port) \
190 ((__builtin_constant_p((port)) && (port) < 256) ? \
191 __outbc_p((val),(port)) : \
192 __outb_p((val),(port)))
193
194 #define inb_p(port) \
195 ((__builtin_constant_p((port)) && (port) < 256) ? \
196 __inbc_p(port) : \
197 __inb_p(port))
198
199 #define outw(val,port) \
200 ((__builtin_constant_p((port)) && (port) < 256) ? \
201 __outwc((val),(port)) : \
202 __outw((val),(port)))
203
204 #define inw(port) \
205 ((__builtin_constant_p((port)) && (port) < 256) ? \
206 __inwc(port) : \
207 __inw(port))
208
209 #define outw_p(val,port) \
210 ((__builtin_constant_p((port)) && (port) < 256) ? \
211 __outwc_p((val),(port)) : \
212 __outw_p((val),(port)))
213
214 #define inw_p(port) \
215 ((__builtin_constant_p((port)) && (port) < 256) ? \
216 __inwc_p(port) : \
217 __inw_p(port))
218
219 #define outl(val,port) \
220 ((__builtin_constant_p((port)) && (port) < 256) ? \
221 __outlc((val),(port)) : \
222 __outl((val),(port)))
223
224 #define inl(port) \
225 ((__builtin_constant_p((port)) && (port) < 256) ? \
226 __inlc(port) : \
227 __inl(port))
228
229 #define outl_p(val,port) \
230 ((__builtin_constant_p((port)) && (port) < 256) ? \
231 __outlc_p((val),(port)) : \
232 __outl_p((val),(port)))
233
234 #define inl_p(port) \
235 ((__builtin_constant_p((port)) && (port) < 256) ? \
236 __inlc_p(port) : \
237 __inl_p(port))
238
239 #endif /* ETHERBOOT_IO_H */
240