1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994, 1995 Waldorf GmbH 7 * Copyright (C) 1994 - 2000, 06 Ralf Baechle 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 9 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. 10 * Author: Maciej W. Rozycki <macro@mips.com> 11 */ 12 #ifndef _ASM_IO_H 13 #define _ASM_IO_H 14 15 #define ARCH_HAS_IOREMAP_WC 16 17 #include <linux/compiler.h> 18 #include <linux/kernel.h> 19 #include <linux/types.h> 20 #include <linux/irqflags.h> 21 22 #include <asm/addrspace.h> 23 #include <asm/bug.h> 24 #include <asm/byteorder.h> 25 #include <asm/cpu.h> 26 #include <asm/cpu-features.h> 27 #include <asm-generic/iomap.h> 28 #include <asm/page.h> 29 #include <asm/pgtable-bits.h> 30 #include <asm/processor.h> 31 #include <asm/string.h> 32 33 #include <ioremap.h> 34 #include <mangle-port.h> 35 36 /* 37 * Raw operations are never swapped in software. OTOH values that raw 38 * operations are working on may or may not have been swapped by the bus 39 * hardware. An example use would be for flash memory that's used for 40 * execute in place. 41 */ 42 # define __raw_ioswabb(a, x) (x) 43 # define __raw_ioswabw(a, x) (x) 44 # define __raw_ioswabl(a, x) (x) 45 # define __raw_ioswabq(a, x) (x) 46 # define ____raw_ioswabq(a, x) (x) 47 48 /* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */ 49 50 #define IO_SPACE_LIMIT 0xffff 51 52 /* 53 * On MIPS I/O ports are memory mapped, so we access them using normal 54 * load/store instructions. mips_io_port_base is the virtual address to 55 * which all ports are being mapped. For sake of efficiency some code 56 * assumes that this is an address that can be loaded with a single lui 57 * instruction, so the lower 16 bits must be zero. Should be true on 58 * on any sane architecture; generic code does not use this assumption. 59 */ 60 extern const unsigned long mips_io_port_base; 61 62 /* 63 * Gcc will generate code to load the value of mips_io_port_base after each 64 * function call which may be fairly wasteful in some cases. So we don't 65 * play quite by the book. We tell gcc mips_io_port_base is a long variable 66 * which solves the code generation issue. Now we need to violate the 67 * aliasing rules a little to make initialization possible and finally we 68 * will need the barrier() to fight side effects of the aliasing chat. 69 * This trickery will eventually collapse under gcc's optimizer. Oh well. 70 */ 71 static inline void set_io_port_base(unsigned long base) 72 { 73 * (unsigned long *) &mips_io_port_base = base; 74 barrier(); 75 } 76 77 /* 78 * Provide the necessary definitions for generic iomap. We make use of 79 * mips_io_port_base for iomap(), but we don't reserve any low addresses for 80 * use with I/O ports. 81 */ 82 #define HAVE_ARCH_PIO_SIZE 83 #define PIO_OFFSET mips_io_port_base 84 #define PIO_MASK IO_SPACE_LIMIT 85 #define PIO_RESERVED 0x0UL 86 87 /* 88 * virt_to_phys - map virtual addresses to physical 89 * @address: address to remap 90 * 91 * The returned physical address is the physical (CPU) mapping for 92 * the memory address given. It is only valid to use this function on 93 * addresses directly mapped or allocated via kmalloc. 94 * 95 * This function does not give bus mappings for DMA transfers. In 96 * almost all conceivable cases a device driver should not be using 97 * this function 98 */ 99 static inline unsigned long virt_to_phys(volatile const void *address) 100 { 101 return __pa(address); 102 } 103 104 /* 105 * phys_to_virt - map physical address to virtual 106 * @address: address to remap 107 * 108 * The returned virtual address is a current CPU mapping for 109 * the memory address given. It is only valid to use this function on 110 * addresses that have a kernel mapping 111 * 112 * This function does not handle bus mappings for DMA transfers. In 113 * almost all conceivable cases a device driver should not be using 114 * this function 115 */ 116 static inline void * phys_to_virt(unsigned long address) 117 { 118 return (void *)(address + PAGE_OFFSET - PHYS_OFFSET); 119 } 120 121 /* 122 * ISA I/O bus memory addresses are 1:1 with the physical address. 123 */ 124 static inline unsigned long isa_virt_to_bus(volatile void *address) 125 { 126 return virt_to_phys(address); 127 } 128 129 static inline void *isa_bus_to_virt(unsigned long address) 130 { 131 return phys_to_virt(address); 132 } 133 134 #define isa_page_to_bus page_to_phys 135 136 /* 137 * However PCI ones are not necessarily 1:1 and therefore these interfaces 138 * are forbidden in portable PCI drivers. 139 * 140 * Allow them for x86 for legacy drivers, though. 141 */ 142 #define virt_to_bus virt_to_phys 143 #define bus_to_virt phys_to_virt 144 145 /* 146 * Change "struct page" to physical address. 147 */ 148 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) 149 150 extern void __iomem * __ioremap(phys_addr_t offset, phys_addr_t size, unsigned long flags); 151 extern void __iounmap(const volatile void __iomem *addr); 152 153 static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long size, 154 unsigned long flags) 155 { 156 void __iomem *addr = plat_ioremap(offset, size, flags); 157 158 if (addr) 159 return addr; 160 161 #define __IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL)) 162 163 if (cpu_has_64bit_addresses) { 164 u64 base = UNCAC_BASE; 165 166 /* 167 * R10000 supports a 2 bit uncached attribute therefore 168 * UNCAC_BASE may not equal IO_BASE. 169 */ 170 if (flags == _CACHE_UNCACHED) 171 base = (u64) IO_BASE; 172 return (void __iomem *) (unsigned long) (base + offset); 173 } else if (__builtin_constant_p(offset) && 174 __builtin_constant_p(size) && __builtin_constant_p(flags)) { 175 phys_addr_t phys_addr, last_addr; 176 177 phys_addr = fixup_bigphys_addr(offset, size); 178 179 /* Don't allow wraparound or zero size. */ 180 last_addr = phys_addr + size - 1; 181 if (!size || last_addr < phys_addr) 182 return NULL; 183 184 /* 185 * Map uncached objects in the low 512MB of address 186 * space using KSEG1. 187 */ 188 if (__IS_LOW512(phys_addr) && __IS_LOW512(last_addr) && 189 flags == _CACHE_UNCACHED) 190 return (void __iomem *) 191 (unsigned long)CKSEG1ADDR(phys_addr); 192 } 193 194 return __ioremap(offset, size, flags); 195 196 #undef __IS_LOW512 197 } 198 199 /* 200 * ioremap - map bus memory into CPU space 201 * @offset: bus address of the memory 202 * @size: size of the resource to map 203 * 204 * ioremap performs a platform specific sequence of operations to 205 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 206 * writew/writel functions and the other mmio helpers. The returned 207 * address is not guaranteed to be usable directly as a virtual 208 * address. 209 */ 210 #define ioremap(offset, size) \ 211 __ioremap_mode((offset), (size), _CACHE_UNCACHED) 212 213 /* 214 * ioremap_nocache - map bus memory into CPU space 215 * @offset: bus address of the memory 216 * @size: size of the resource to map 217 * 218 * ioremap_nocache performs a platform specific sequence of operations to 219 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 220 * writew/writel functions and the other mmio helpers. The returned 221 * address is not guaranteed to be usable directly as a virtual 222 * address. 223 * 224 * This version of ioremap ensures that the memory is marked uncachable 225 * on the CPU as well as honouring existing caching rules from things like 226 * the PCI bus. Note that there are other caches and buffers on many 227 * busses. In particular driver authors should read up on PCI writes 228 * 229 * It's useful if some control registers are in such an area and 230 * write combining or read caching is not desirable: 231 */ 232 #define ioremap_nocache(offset, size) \ 233 __ioremap_mode((offset), (size), _CACHE_UNCACHED) 234 #define ioremap_uc ioremap_nocache 235 236 /* 237 * ioremap_cachable - map bus memory into CPU space 238 * @offset: bus address of the memory 239 * @size: size of the resource to map 240 * 241 * ioremap_nocache performs a platform specific sequence of operations to 242 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 243 * writew/writel functions and the other mmio helpers. The returned 244 * address is not guaranteed to be usable directly as a virtual 245 * address. 246 * 247 * This version of ioremap ensures that the memory is marked cachable by 248 * the CPU. Also enables full write-combining. Useful for some 249 * memory-like regions on I/O busses. 250 */ 251 #define ioremap_cachable(offset, size) \ 252 __ioremap_mode((offset), (size), _page_cachable_default) 253 #define ioremap_cache ioremap_cachable 254 255 /* 256 * ioremap_wc - map bus memory into CPU space 257 * @offset: bus address of the memory 258 * @size: size of the resource to map 259 * 260 * ioremap_wc performs a platform specific sequence of operations to 261 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 262 * writew/writel functions and the other mmio helpers. The returned 263 * address is not guaranteed to be usable directly as a virtual 264 * address. 265 * 266 * This version of ioremap ensures that the memory is marked uncachable 267 * but accelerated by means of write-combining feature. It is specifically 268 * useful for PCIe prefetchable windows, which may vastly improve a 269 * communications performance. If it was determined on boot stage, what 270 * CPU CCA doesn't support UCA, the method shall fall-back to the 271 * _CACHE_UNCACHED option (see cpu_probe() method). 272 */ 273 #define ioremap_wc(offset, size) \ 274 __ioremap_mode((offset), (size), boot_cpu_data.writecombine) 275 276 static inline void iounmap(const volatile void __iomem *addr) 277 { 278 if (plat_iounmap(addr)) 279 return; 280 281 #define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1) 282 283 if (cpu_has_64bit_addresses || 284 (__builtin_constant_p(addr) && __IS_KSEG1(addr))) 285 return; 286 287 __iounmap(addr); 288 289 #undef __IS_KSEG1 290 } 291 292 #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT) 293 #define war_io_reorder_wmb() wmb() 294 #else 295 #define war_io_reorder_wmb() barrier() 296 #endif 297 298 #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ 299 \ 300 static inline void pfx##write##bwlq(type val, \ 301 volatile void __iomem *mem) \ 302 { \ 303 volatile type *__mem; \ 304 type __val; \ 305 \ 306 war_io_reorder_wmb(); \ 307 \ 308 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ 309 \ 310 __val = pfx##ioswab##bwlq(__mem, val); \ 311 \ 312 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ 313 *__mem = __val; \ 314 else if (cpu_has_64bits) { \ 315 unsigned long __flags; \ 316 type __tmp; \ 317 \ 318 if (irq) \ 319 local_irq_save(__flags); \ 320 __asm__ __volatile__( \ 321 ".set arch=r4000" "\t\t# __writeq""\n\t" \ 322 "dsll32 %L0, %L0, 0" "\n\t" \ 323 "dsrl32 %L0, %L0, 0" "\n\t" \ 324 "dsll32 %M0, %M0, 0" "\n\t" \ 325 "or %L0, %L0, %M0" "\n\t" \ 326 "sd %L0, %2" "\n\t" \ 327 ".set mips0" "\n" \ 328 : "=r" (__tmp) \ 329 : "0" (__val), "m" (*__mem)); \ 330 if (irq) \ 331 local_irq_restore(__flags); \ 332 } else \ 333 BUG(); \ 334 } \ 335 \ 336 static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ 337 { \ 338 volatile type *__mem; \ 339 type __val; \ 340 \ 341 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ 342 \ 343 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ 344 __val = *__mem; \ 345 else if (cpu_has_64bits) { \ 346 unsigned long __flags; \ 347 \ 348 if (irq) \ 349 local_irq_save(__flags); \ 350 __asm__ __volatile__( \ 351 ".set arch=r4000" "\t\t# __readq" "\n\t" \ 352 "ld %L0, %1" "\n\t" \ 353 "dsra32 %M0, %L0, 0" "\n\t" \ 354 "sll %L0, %L0, 0" "\n\t" \ 355 ".set mips0" "\n" \ 356 : "=r" (__val) \ 357 : "m" (*__mem)); \ 358 if (irq) \ 359 local_irq_restore(__flags); \ 360 } else { \ 361 __val = 0; \ 362 BUG(); \ 363 } \ 364 \ 365 /* prevent prefetching of coherent DMA data prematurely */ \ 366 rmb(); \ 367 return pfx##ioswab##bwlq(__mem, __val); \ 368 } 369 370 #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p) \ 371 \ 372 static inline void pfx##out##bwlq##p(type val, unsigned long port) \ 373 { \ 374 volatile type *__addr; \ 375 type __val; \ 376 \ 377 war_io_reorder_wmb(); \ 378 \ 379 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ 380 \ 381 __val = pfx##ioswab##bwlq(__addr, val); \ 382 \ 383 /* Really, we want this to be atomic */ \ 384 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ 385 \ 386 *__addr = __val; \ 387 } \ 388 \ 389 static inline type pfx##in##bwlq##p(unsigned long port) \ 390 { \ 391 volatile type *__addr; \ 392 type __val; \ 393 \ 394 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ 395 \ 396 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ 397 \ 398 __val = *__addr; \ 399 \ 400 /* prevent prefetching of coherent DMA data prematurely */ \ 401 rmb(); \ 402 return pfx##ioswab##bwlq(__addr, __val); \ 403 } 404 405 #define __BUILD_MEMORY_PFX(bus, bwlq, type) \ 406 \ 407 __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1) 408 409 #define BUILDIO_MEM(bwlq, type) \ 410 \ 411 __BUILD_MEMORY_PFX(__raw_, bwlq, type) \ 412 __BUILD_MEMORY_PFX(, bwlq, type) \ 413 __BUILD_MEMORY_PFX(__mem_, bwlq, type) \ 414 415 BUILDIO_MEM(b, u8) 416 BUILDIO_MEM(w, u16) 417 BUILDIO_MEM(l, u32) 418 BUILDIO_MEM(q, u64) 419 420 #define __BUILD_IOPORT_PFX(bus, bwlq, type) \ 421 __BUILD_IOPORT_SINGLE(bus, bwlq, type,) \ 422 __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p) 423 424 #define BUILDIO_IOPORT(bwlq, type) \ 425 __BUILD_IOPORT_PFX(, bwlq, type) \ 426 __BUILD_IOPORT_PFX(__mem_, bwlq, type) 427 428 BUILDIO_IOPORT(b, u8) 429 BUILDIO_IOPORT(w, u16) 430 BUILDIO_IOPORT(l, u32) 431 #ifdef CONFIG_64BIT 432 BUILDIO_IOPORT(q, u64) 433 #endif 434 435 #define __BUILDIO(bwlq, type) \ 436 \ 437 __BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0) 438 439 __BUILDIO(q, u64) 440 441 #define readb_relaxed readb 442 #define readw_relaxed readw 443 #define readl_relaxed readl 444 #define readq_relaxed readq 445 446 #define writeb_relaxed writeb 447 #define writew_relaxed writew 448 #define writel_relaxed writel 449 #define writeq_relaxed writeq 450 451 #define readb_be(addr) \ 452 __raw_readb((__force unsigned *)(addr)) 453 #define readw_be(addr) \ 454 be16_to_cpu(__raw_readw((__force unsigned *)(addr))) 455 #define readl_be(addr) \ 456 be32_to_cpu(__raw_readl((__force unsigned *)(addr))) 457 #define readq_be(addr) \ 458 be64_to_cpu(__raw_readq((__force unsigned *)(addr))) 459 460 #define writeb_be(val, addr) \ 461 __raw_writeb((val), (__force unsigned *)(addr)) 462 #define writew_be(val, addr) \ 463 __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr)) 464 #define writel_be(val, addr) \ 465 __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr)) 466 #define writeq_be(val, addr) \ 467 __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr)) 468 469 /* 470 * Some code tests for these symbols 471 */ 472 #define readq readq 473 #define writeq writeq 474 475 #define __BUILD_MEMORY_STRING(bwlq, type) \ 476 \ 477 static inline void writes##bwlq(volatile void __iomem *mem, \ 478 const void *addr, unsigned int count) \ 479 { \ 480 const volatile type *__addr = addr; \ 481 \ 482 while (count--) { \ 483 __mem_write##bwlq(*__addr, mem); \ 484 __addr++; \ 485 } \ 486 } \ 487 \ 488 static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \ 489 unsigned int count) \ 490 { \ 491 volatile type *__addr = addr; \ 492 \ 493 while (count--) { \ 494 *__addr = __mem_read##bwlq(mem); \ 495 __addr++; \ 496 } \ 497 } 498 499 #define __BUILD_IOPORT_STRING(bwlq, type) \ 500 \ 501 static inline void outs##bwlq(unsigned long port, const void *addr, \ 502 unsigned int count) \ 503 { \ 504 const volatile type *__addr = addr; \ 505 \ 506 while (count--) { \ 507 __mem_out##bwlq(*__addr, port); \ 508 __addr++; \ 509 } \ 510 } \ 511 \ 512 static inline void ins##bwlq(unsigned long port, void *addr, \ 513 unsigned int count) \ 514 { \ 515 volatile type *__addr = addr; \ 516 \ 517 while (count--) { \ 518 *__addr = __mem_in##bwlq(port); \ 519 __addr++; \ 520 } \ 521 } 522 523 #define BUILDSTRING(bwlq, type) \ 524 \ 525 __BUILD_MEMORY_STRING(bwlq, type) \ 526 __BUILD_IOPORT_STRING(bwlq, type) 527 528 BUILDSTRING(b, u8) 529 BUILDSTRING(w, u16) 530 BUILDSTRING(l, u32) 531 #ifdef CONFIG_64BIT 532 BUILDSTRING(q, u64) 533 #endif 534 535 536 #ifdef CONFIG_CPU_CAVIUM_OCTEON 537 #define mmiowb() wmb() 538 #else 539 /* Depends on MIPS II instruction set */ 540 #define mmiowb() asm volatile ("sync" ::: "memory") 541 #endif 542 543 static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) 544 { 545 memset((void __force *) addr, val, count); 546 } 547 static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) 548 { 549 memcpy(dst, (void __force *) src, count); 550 } 551 static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) 552 { 553 memcpy((void __force *) dst, src, count); 554 } 555 556 /* 557 * The caches on some architectures aren't dma-coherent and have need to 558 * handle this in software. There are three types of operations that 559 * can be applied to dma buffers. 560 * 561 * - dma_cache_wback_inv(start, size) makes caches and coherent by 562 * writing the content of the caches back to memory, if necessary. 563 * The function also invalidates the affected part of the caches as 564 * necessary before DMA transfers from outside to memory. 565 * - dma_cache_wback(start, size) makes caches and coherent by 566 * writing the content of the caches back to memory, if necessary. 567 * The function also invalidates the affected part of the caches as 568 * necessary before DMA transfers from outside to memory. 569 * - dma_cache_inv(start, size) invalidates the affected parts of the 570 * caches. Dirty lines of the caches may be written back or simply 571 * be discarded. This operation is necessary before dma operations 572 * to the memory. 573 * 574 * This API used to be exported; it now is for arch code internal use only. 575 */ 576 #ifdef CONFIG_DMA_NONCOHERENT 577 578 extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); 579 extern void (*_dma_cache_wback)(unsigned long start, unsigned long size); 580 extern void (*_dma_cache_inv)(unsigned long start, unsigned long size); 581 582 #define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size) 583 #define dma_cache_wback(start, size) _dma_cache_wback(start, size) 584 #define dma_cache_inv(start, size) _dma_cache_inv(start, size) 585 586 #else /* Sane hardware */ 587 588 #define dma_cache_wback_inv(start,size) \ 589 do { (void) (start); (void) (size); } while (0) 590 #define dma_cache_wback(start,size) \ 591 do { (void) (start); (void) (size); } while (0) 592 #define dma_cache_inv(start,size) \ 593 do { (void) (start); (void) (size); } while (0) 594 595 #endif /* CONFIG_DMA_NONCOHERENT */ 596 597 /* 598 * Read a 32-bit register that requires a 64-bit read cycle on the bus. 599 * Avoid interrupt mucking, just adjust the address for 4-byte access. 600 * Assume the addresses are 8-byte aligned. 601 */ 602 #ifdef __MIPSEB__ 603 #define __CSR_32_ADJUST 4 604 #else 605 #define __CSR_32_ADJUST 0 606 #endif 607 608 #define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v)) 609 #define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST)) 610 611 /* 612 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 613 * access 614 */ 615 #define xlate_dev_mem_ptr(p) __va(p) 616 617 /* 618 * Convert a virtual cached pointer to an uncached pointer 619 */ 620 #define xlate_dev_kmem_ptr(p) p 621 622 void __ioread64_copy(void *to, const void __iomem *from, size_t count); 623 624 #endif /* _ASM_IO_H */ 625