xref: /linux/arch/x86/include/asm/io.h (revision a06c3fad49a50d5d5eb078f93e70f4d3eca5d5a5)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_IO_H
3 #define _ASM_X86_IO_H
4 
5 /*
6  * This file contains the definitions for the x86 IO instructions
7  * inb/inw/inl/outb/outw/outl and the "string versions" of the same
8  * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
9  * versions of the single-IO instructions (inb_p/inw_p/..).
10  *
11  * This file is not meant to be obfuscating: it's just complicated
12  * to (a) handle it all in a way that makes gcc able to optimize it
13  * as well as possible and (b) trying to avoid writing the same thing
14  * over and over again with slight variations and possibly making a
15  * mistake somewhere.
16  */
17 
18 /*
19  * Thanks to James van Artsdalen for a better timing-fix than
20  * the two short jumps: using outb's to a nonexistent port seems
21  * to guarantee better timings even on fast machines.
22  *
23  * On the other hand, I'd like to be sure of a non-existent port:
24  * I feel a bit unsafe about using 0x80 (should be safe, though)
25  *
26  *		Linus
27  */
28 
29  /*
30   *  Bit simplified and optimized by Jan Hubicka
31   *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
32   *
33   *  isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
34   *  isa_read[wl] and isa_write[wl] fixed
35   *  - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
36   */
37 
38 #include <linux/string.h>
39 #include <linux/compiler.h>
40 #include <linux/cc_platform.h>
41 #include <asm/page.h>
42 #include <asm/early_ioremap.h>
43 #include <asm/pgtable_types.h>
44 #include <asm/shared/io.h>
45 #include <asm/special_insns.h>
46 
47 #define build_mmio_read(name, size, type, reg, barrier) \
48 static inline type name(const volatile void __iomem *addr) \
49 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \
50 :"m" (*(volatile type __force *)addr) barrier); return ret; }
51 
52 #define build_mmio_write(name, size, type, reg, barrier) \
53 static inline void name(type val, volatile void __iomem *addr) \
54 { asm volatile("mov" size " %0,%1": :reg (val), \
55 "m" (*(volatile type __force *)addr) barrier); }
56 
57 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
58 build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
59 build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
60 
61 build_mmio_read(__readb, "b", unsigned char, "=q", )
62 build_mmio_read(__readw, "w", unsigned short, "=r", )
63 build_mmio_read(__readl, "l", unsigned int, "=r", )
64 
65 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
66 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
67 build_mmio_write(writel, "l", unsigned int, "r", :"memory")
68 
69 build_mmio_write(__writeb, "b", unsigned char, "q", )
70 build_mmio_write(__writew, "w", unsigned short, "r", )
71 build_mmio_write(__writel, "l", unsigned int, "r", )
72 
73 #define readb readb
74 #define readw readw
75 #define readl readl
76 #define readb_relaxed(a) __readb(a)
77 #define readw_relaxed(a) __readw(a)
78 #define readl_relaxed(a) __readl(a)
79 #define __raw_readb __readb
80 #define __raw_readw __readw
81 #define __raw_readl __readl
82 
83 #define writeb writeb
84 #define writew writew
85 #define writel writel
86 #define writeb_relaxed(v, a) __writeb(v, a)
87 #define writew_relaxed(v, a) __writew(v, a)
88 #define writel_relaxed(v, a) __writel(v, a)
89 #define __raw_writeb __writeb
90 #define __raw_writew __writew
91 #define __raw_writel __writel
92 
93 #ifdef CONFIG_X86_64
94 
95 build_mmio_read(readq, "q", u64, "=r", :"memory")
96 build_mmio_read(__readq, "q", u64, "=r", )
97 build_mmio_write(writeq, "q", u64, "r", :"memory")
98 build_mmio_write(__writeq, "q", u64, "r", )
99 
100 #define readq_relaxed(a)	__readq(a)
101 #define writeq_relaxed(v, a)	__writeq(v, a)
102 
103 #define __raw_readq		__readq
104 #define __raw_writeq		__writeq
105 
106 /* Let people know that we have them */
107 #define readq			readq
108 #define writeq			writeq
109 
110 #endif
111 
112 #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
113 extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
114 extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
115 
116 /**
117  *	virt_to_phys	-	map virtual addresses to physical
118  *	@address: address to remap
119  *
120  *	The returned physical address is the physical (CPU) mapping for
121  *	the memory address given. It is only valid to use this function on
122  *	addresses directly mapped or allocated via kmalloc.
123  *
124  *	This function does not give bus mappings for DMA transfers. In
125  *	almost all conceivable cases a device driver should not be using
126  *	this function
127  */
128 
129 static inline phys_addr_t virt_to_phys(volatile void *address)
130 {
131 	return __pa(address);
132 }
133 #define virt_to_phys virt_to_phys
134 
135 /**
136  *	phys_to_virt	-	map physical address to virtual
137  *	@address: address to remap
138  *
139  *	The returned virtual address is a current CPU mapping for
140  *	the memory address given. It is only valid to use this function on
141  *	addresses that have a kernel mapping
142  *
143  *	This function does not handle bus mappings for DMA transfers. In
144  *	almost all conceivable cases a device driver should not be using
145  *	this function
146  */
147 
148 static inline void *phys_to_virt(phys_addr_t address)
149 {
150 	return __va(address);
151 }
152 #define phys_to_virt phys_to_virt
153 
154 /*
155  * Change "struct page" to physical address.
156  */
157 #define page_to_phys(page)    ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
158 
159 /*
160  * ISA I/O bus memory addresses are 1:1 with the physical address.
161  * However, we truncate the address to unsigned int to avoid undesirable
162  * promotions in legacy drivers.
163  */
164 static inline unsigned int isa_virt_to_bus(volatile void *address)
165 {
166 	return (unsigned int)virt_to_phys(address);
167 }
168 #define isa_bus_to_virt		phys_to_virt
169 
170 /*
171  * The default ioremap() behavior is non-cached; if you need something
172  * else, you probably want one of the following.
173  */
174 extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
175 #define ioremap_uc ioremap_uc
176 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
177 #define ioremap_cache ioremap_cache
178 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val);
179 #define ioremap_prot ioremap_prot
180 extern void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size);
181 #define ioremap_encrypted ioremap_encrypted
182 
183 /**
184  * ioremap     -   map bus memory into CPU space
185  * @offset:    bus address of the memory
186  * @size:      size of the resource to map
187  *
188  * ioremap performs a platform specific sequence of operations to
189  * make bus memory CPU accessible via the readb/readw/readl/writeb/
190  * writew/writel functions and the other mmio helpers. The returned
191  * address is not guaranteed to be usable directly as a virtual
192  * address.
193  *
194  * If the area you are trying to map is a PCI BAR you should have a
195  * look at pci_iomap().
196  */
197 void __iomem *ioremap(resource_size_t offset, unsigned long size);
198 #define ioremap ioremap
199 
200 extern void iounmap(volatile void __iomem *addr);
201 #define iounmap iounmap
202 
203 #ifdef __KERNEL__
204 
205 void memcpy_fromio(void *, const volatile void __iomem *, size_t);
206 void memcpy_toio(volatile void __iomem *, const void *, size_t);
207 void memset_io(volatile void __iomem *, int, size_t);
208 
209 #define memcpy_fromio memcpy_fromio
210 #define memcpy_toio memcpy_toio
211 #define memset_io memset_io
212 
213 #ifdef CONFIG_X86_64
214 /*
215  * Commit 0f07496144c2 ("[PATCH] Add faster __iowrite32_copy routine for
216  * x86_64") says that circa 2006 rep movsl is noticeably faster than a copy
217  * loop.
218  */
219 static inline void __iowrite32_copy(void __iomem *to, const void *from,
220 				    size_t count)
221 {
222 	asm volatile("rep ; movsl"
223 		     : "=&c"(count), "=&D"(to), "=&S"(from)
224 		     : "0"(count), "1"(to), "2"(from)
225 		     : "memory");
226 }
227 #define __iowrite32_copy __iowrite32_copy
228 #endif
229 
230 /*
231  * ISA space is 'always mapped' on a typical x86 system, no need to
232  * explicitly ioremap() it. The fact that the ISA IO space is mapped
233  * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
234  * are physical addresses. The following constant pointer can be
235  * used as the IO-area pointer (it can be iounmapped as well, so the
236  * analogy with PCI is quite large):
237  */
238 #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
239 
240 #endif /* __KERNEL__ */
241 
242 extern void native_io_delay(void);
243 
244 extern int io_delay_type;
245 extern void io_delay_init(void);
246 
247 #if defined(CONFIG_PARAVIRT)
248 #include <asm/paravirt.h>
249 #else
250 
251 static inline void slow_down_io(void)
252 {
253 	native_io_delay();
254 #ifdef REALLY_SLOW_IO
255 	native_io_delay();
256 	native_io_delay();
257 	native_io_delay();
258 #endif
259 }
260 
261 #endif
262 
263 #define BUILDIO(bwl, type)						\
264 static inline void out##bwl##_p(type value, u16 port)			\
265 {									\
266 	out##bwl(value, port);						\
267 	slow_down_io();							\
268 }									\
269 									\
270 static inline type in##bwl##_p(u16 port)				\
271 {									\
272 	type value = in##bwl(port);					\
273 	slow_down_io();							\
274 	return value;							\
275 }									\
276 									\
277 static inline void outs##bwl(u16 port, const void *addr, unsigned long count) \
278 {									\
279 	if (cc_platform_has(CC_ATTR_GUEST_UNROLL_STRING_IO)) {		\
280 		type *value = (type *)addr;				\
281 		while (count) {						\
282 			out##bwl(*value, port);				\
283 			value++;					\
284 			count--;					\
285 		}							\
286 	} else {							\
287 		asm volatile("rep; outs" #bwl				\
288 			     : "+S"(addr), "+c"(count)			\
289 			     : "d"(port) : "memory");			\
290 	}								\
291 }									\
292 									\
293 static inline void ins##bwl(u16 port, void *addr, unsigned long count)	\
294 {									\
295 	if (cc_platform_has(CC_ATTR_GUEST_UNROLL_STRING_IO)) {		\
296 		type *value = (type *)addr;				\
297 		while (count) {						\
298 			*value = in##bwl(port);				\
299 			value++;					\
300 			count--;					\
301 		}							\
302 	} else {							\
303 		asm volatile("rep; ins" #bwl				\
304 			     : "+D"(addr), "+c"(count)			\
305 			     : "d"(port) : "memory");			\
306 	}								\
307 }
308 
309 BUILDIO(b, u8)
310 BUILDIO(w, u16)
311 BUILDIO(l, u32)
312 #undef BUILDIO
313 
314 #define inb_p inb_p
315 #define inw_p inw_p
316 #define inl_p inl_p
317 #define insb insb
318 #define insw insw
319 #define insl insl
320 
321 #define outb_p outb_p
322 #define outw_p outw_p
323 #define outl_p outl_p
324 #define outsb outsb
325 #define outsw outsw
326 #define outsl outsl
327 
328 extern void *xlate_dev_mem_ptr(phys_addr_t phys);
329 extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
330 
331 #define xlate_dev_mem_ptr xlate_dev_mem_ptr
332 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
333 
334 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
335 				enum page_cache_mode pcm);
336 extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
337 #define ioremap_wc ioremap_wc
338 extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size);
339 #define ioremap_wt ioremap_wt
340 
341 extern bool is_early_ioremap_ptep(pte_t *ptep);
342 
343 #define IO_SPACE_LIMIT 0xffff
344 
345 #include <asm-generic/io.h>
346 #undef PCI_IOBASE
347 
348 #ifdef CONFIG_MTRR
349 extern int __must_check arch_phys_wc_index(int handle);
350 #define arch_phys_wc_index arch_phys_wc_index
351 
352 extern int __must_check arch_phys_wc_add(unsigned long base,
353 					 unsigned long size);
354 extern void arch_phys_wc_del(int handle);
355 #define arch_phys_wc_add arch_phys_wc_add
356 #endif
357 
358 #ifdef CONFIG_X86_PAT
359 extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size);
360 extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size);
361 #define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
362 #endif
363 
364 #ifdef CONFIG_AMD_MEM_ENCRYPT
365 extern bool arch_memremap_can_ram_remap(resource_size_t offset,
366 					unsigned long size,
367 					unsigned long flags);
368 #define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
369 
370 extern bool phys_mem_access_encrypted(unsigned long phys_addr,
371 				      unsigned long size);
372 #else
373 static inline bool phys_mem_access_encrypted(unsigned long phys_addr,
374 					     unsigned long size)
375 {
376 	return true;
377 }
378 #endif
379 
380 /**
381  * iosubmit_cmds512 - copy data to single MMIO location, in 512-bit units
382  * @dst: destination, in MMIO space (must be 512-bit aligned)
383  * @src: source
384  * @count: number of 512 bits quantities to submit
385  *
386  * Submit data from kernel space to MMIO space, in units of 512 bits at a
387  * time.  Order of access is not guaranteed, nor is a memory barrier
388  * performed afterwards.
389  *
390  * Warning: Do not use this helper unless your driver has checked that the CPU
391  * instruction is supported on the platform.
392  */
393 static inline void iosubmit_cmds512(void __iomem *dst, const void *src,
394 				    size_t count)
395 {
396 	const u8 *from = src;
397 	const u8 *end = from + count * 64;
398 
399 	while (from < end) {
400 		movdir64b_io(dst, from);
401 		from += 64;
402 	}
403 }
404 
405 #endif /* _ASM_X86_IO_H */
406