1 /*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2015 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 #ifndef _LINUX_IO_H_ 32 #define _LINUX_IO_H_ 33 34 #include <sys/endian.h> 35 #include <sys/types.h> 36 37 #include <machine/vm.h> 38 39 #include <linux/compiler.h> 40 #include <linux/types.h> 41 42 /* 43 * XXX This is all x86 specific. It should be bus space access. 44 */ 45 46 47 /* rmb and wmb are declared in machine/atomic.h, so should be included first. */ 48 #ifndef __io_br 49 #define __io_br() __compiler_membar() 50 #endif 51 52 #ifndef __io_ar 53 #ifdef rmb 54 #define __io_ar() rmb() 55 #else 56 #define __io_ar() __compiler_membar() 57 #endif 58 #endif 59 60 #ifndef __io_bw 61 #ifdef wmb 62 #define __io_bw() wmb() 63 #else 64 #define __io_bw() __compiler_membar() 65 #endif 66 #endif 67 68 #ifndef __io_aw 69 #define __io_aw() __compiler_membar() 70 #endif 71 72 /* Access MMIO registers atomically without barriers and byte swapping. */ 73 74 static inline uint8_t 75 __raw_readb(const volatile void *addr) 76 { 77 return (*(const volatile uint8_t *)addr); 78 } 79 #define __raw_readb(addr) __raw_readb(addr) 80 81 static inline void 82 __raw_writeb(uint8_t v, volatile void *addr) 83 { 84 *(volatile uint8_t *)addr = v; 85 } 86 #define __raw_writeb(v, addr) __raw_writeb(v, addr) 87 88 static inline uint16_t 89 __raw_readw(const volatile void *addr) 90 { 91 return (*(const volatile uint16_t *)addr); 92 } 93 #define __raw_readw(addr) __raw_readw(addr) 94 95 static inline void 96 __raw_writew(uint16_t v, volatile void *addr) 97 { 98 *(volatile uint16_t *)addr = v; 99 } 100 #define __raw_writew(v, addr) __raw_writew(v, addr) 101 102 static inline uint32_t 103 __raw_readl(const volatile void *addr) 104 { 105 return (*(const volatile uint32_t *)addr); 106 } 107 #define __raw_readl(addr) __raw_readl(addr) 108 109 static inline void 110 __raw_writel(uint32_t v, volatile void *addr) 111 { 112 *(volatile uint32_t *)addr = v; 113 } 114 #define __raw_writel(v, addr) __raw_writel(v, addr) 115 116 #ifdef __LP64__ 117 static inline uint64_t 118 __raw_readq(const volatile void *addr) 119 { 120 return (*(const volatile uint64_t *)addr); 121 } 122 #define __raw_readq(addr) __raw_readq(addr) 123 124 static inline void 125 __raw_writeq(uint64_t v, volatile void *addr) 126 { 127 *(volatile uint64_t *)addr = v; 128 } 129 #define __raw_writeq(v, addr) __raw_writeq(v, addr) 130 #endif 131 132 #define mmiowb() barrier() 133 134 /* Access little-endian MMIO registers atomically with memory barriers. */ 135 136 #undef readb 137 static inline uint8_t 138 readb(const volatile void *addr) 139 { 140 uint8_t v; 141 142 __io_br(); 143 v = *(const volatile uint8_t *)addr; 144 __io_ar(); 145 return (v); 146 } 147 #define readb(addr) readb(addr) 148 149 #undef writeb 150 static inline void 151 writeb(uint8_t v, volatile void *addr) 152 { 153 __io_bw(); 154 *(volatile uint8_t *)addr = v; 155 __io_aw(); 156 } 157 #define writeb(v, addr) writeb(v, addr) 158 159 #undef readw 160 static inline uint16_t 161 readw(const volatile void *addr) 162 { 163 uint16_t v; 164 165 __io_br(); 166 v = le16toh(__raw_readw(addr)); 167 __io_ar(); 168 return (v); 169 } 170 #define readw(addr) readw(addr) 171 172 #undef writew 173 static inline void 174 writew(uint16_t v, volatile void *addr) 175 { 176 __io_bw(); 177 __raw_writew(htole16(v), addr); 178 __io_aw(); 179 } 180 #define writew(v, addr) writew(v, addr) 181 182 #undef readl 183 static inline uint32_t 184 readl(const volatile void *addr) 185 { 186 uint32_t v; 187 188 __io_br(); 189 v = le32toh(__raw_readl(addr)); 190 __io_ar(); 191 return (v); 192 } 193 #define readl(addr) readl(addr) 194 195 #undef writel 196 static inline void 197 writel(uint32_t v, volatile void *addr) 198 { 199 __io_bw(); 200 __raw_writel(htole32(v), addr); 201 __io_aw(); 202 } 203 #define writel(v, addr) writel(v, addr) 204 205 #undef readq 206 #undef writeq 207 #ifdef __LP64__ 208 static inline uint64_t 209 readq(const volatile void *addr) 210 { 211 uint64_t v; 212 213 __io_br(); 214 v = le64toh(__raw_readq(addr)); 215 __io_ar(); 216 return (v); 217 } 218 #define readq(addr) readq(addr) 219 220 static inline void 221 writeq(uint64_t v, volatile void *addr) 222 { 223 __io_bw(); 224 __raw_writeq(htole64(v), addr); 225 __io_aw(); 226 } 227 #define writeq(v, addr) writeq(v, addr) 228 #endif 229 230 /* Access little-endian MMIO registers atomically without memory barriers. */ 231 232 #undef readb_relaxed 233 static inline uint8_t 234 readb_relaxed(const volatile void *addr) 235 { 236 return (__raw_readb(addr)); 237 } 238 #define readb_relaxed(addr) readb_relaxed(addr) 239 240 #undef writeb_relaxed 241 static inline void 242 writeb_relaxed(uint8_t v, volatile void *addr) 243 { 244 __raw_writeb(v, addr); 245 } 246 #define writeb_relaxed(v, addr) writeb_relaxed(v, addr) 247 248 #undef readw_relaxed 249 static inline uint16_t 250 readw_relaxed(const volatile void *addr) 251 { 252 return (le16toh(__raw_readw(addr))); 253 } 254 #define readw_relaxed(addr) readw_relaxed(addr) 255 256 #undef writew_relaxed 257 static inline void 258 writew_relaxed(uint16_t v, volatile void *addr) 259 { 260 __raw_writew(htole16(v), addr); 261 } 262 #define writew_relaxed(v, addr) writew_relaxed(v, addr) 263 264 #undef readl_relaxed 265 static inline uint32_t 266 readl_relaxed(const volatile void *addr) 267 { 268 return (le32toh(__raw_readl(addr))); 269 } 270 #define readl_relaxed(addr) readl_relaxed(addr) 271 272 #undef writel_relaxed 273 static inline void 274 writel_relaxed(uint32_t v, volatile void *addr) 275 { 276 __raw_writel(htole32(v), addr); 277 } 278 #define writel_relaxed(v, addr) writel_relaxed(v, addr) 279 280 #undef readq_relaxed 281 #undef writeq_relaxed 282 #ifdef __LP64__ 283 static inline uint64_t 284 readq_relaxed(const volatile void *addr) 285 { 286 return (le64toh(__raw_readq(addr))); 287 } 288 #define readq_relaxed(addr) readq_relaxed(addr) 289 290 static inline void 291 writeq_relaxed(uint64_t v, volatile void *addr) 292 { 293 __raw_writeq(htole64(v), addr); 294 } 295 #define writeq_relaxed(v, addr) writeq_relaxed(v, addr) 296 #endif 297 298 /* XXX On Linux ioread and iowrite handle both MMIO and port IO. */ 299 300 #undef ioread8 301 static inline uint8_t 302 ioread8(const volatile void *addr) 303 { 304 return (readb(addr)); 305 } 306 #define ioread8(addr) ioread8(addr) 307 308 #undef ioread16 309 static inline uint16_t 310 ioread16(const volatile void *addr) 311 { 312 return (readw(addr)); 313 } 314 #define ioread16(addr) ioread16(addr) 315 316 #undef ioread16be 317 static inline uint16_t 318 ioread16be(const volatile void *addr) 319 { 320 uint16_t v; 321 322 __io_br(); 323 v = (be16toh(__raw_readw(addr))); 324 __io_ar(); 325 326 return (v); 327 } 328 #define ioread16be(addr) ioread16be(addr) 329 330 #undef ioread32 331 static inline uint32_t 332 ioread32(const volatile void *addr) 333 { 334 return (readl(addr)); 335 } 336 #define ioread32(addr) ioread32(addr) 337 338 #undef ioread32be 339 static inline uint32_t 340 ioread32be(const volatile void *addr) 341 { 342 uint32_t v; 343 344 __io_br(); 345 v = (be32toh(__raw_readl(addr))); 346 __io_ar(); 347 348 return (v); 349 } 350 #define ioread32be(addr) ioread32be(addr) 351 352 #undef iowrite8 353 static inline void 354 iowrite8(uint8_t v, volatile void *addr) 355 { 356 writeb(v, addr); 357 } 358 #define iowrite8(v, addr) iowrite8(v, addr) 359 360 #undef iowrite16 361 static inline void 362 iowrite16(uint16_t v, volatile void *addr) 363 { 364 writew(v, addr); 365 } 366 #define iowrite16 iowrite16 367 368 #undef iowrite32 369 static inline void 370 iowrite32(uint32_t v, volatile void *addr) 371 { 372 writel(v, addr); 373 } 374 #define iowrite32(v, addr) iowrite32(v, addr) 375 376 #undef iowrite32be 377 static inline void 378 iowrite32be(uint32_t v, volatile void *addr) 379 { 380 __io_bw(); 381 __raw_writel(htobe32(v), addr); 382 __io_aw(); 383 } 384 #define iowrite32be(v, addr) iowrite32be(v, addr) 385 386 #if defined(__i386__) || defined(__amd64__) 387 static inline void 388 _outb(u_char data, u_int port) 389 { 390 __asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port)); 391 } 392 #endif 393 394 #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__) 395 void *_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr); 396 #else 397 #define _ioremap_attr(...) NULL 398 #endif 399 400 #ifdef VM_MEMATTR_DEVICE 401 #define ioremap_nocache(addr, size) \ 402 _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE) 403 #define ioremap_wt(addr, size) \ 404 _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE) 405 #define ioremap(addr, size) \ 406 _ioremap_attr((addr), (size), VM_MEMATTR_DEVICE) 407 #else 408 #define ioremap_nocache(addr, size) \ 409 _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE) 410 #define ioremap_wt(addr, size) \ 411 _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_THROUGH) 412 #define ioremap(addr, size) \ 413 _ioremap_attr((addr), (size), VM_MEMATTR_UNCACHEABLE) 414 #endif 415 #define ioremap_wc(addr, size) \ 416 _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_COMBINING) 417 #define ioremap_wb(addr, size) \ 418 _ioremap_attr((addr), (size), VM_MEMATTR_WRITE_BACK) 419 void iounmap(void *addr); 420 421 #define memset_io(a, b, c) memset((a), (b), (c)) 422 #define memcpy_fromio(a, b, c) memcpy((a), (b), (c)) 423 #define memcpy_toio(a, b, c) memcpy((a), (b), (c)) 424 425 static inline void 426 __iowrite32_copy(void *to, void *from, size_t count) 427 { 428 uint32_t *src; 429 uint32_t *dst; 430 int i; 431 432 for (i = 0, src = from, dst = to; i < count; i++, src++, dst++) 433 __raw_writel(*src, dst); 434 } 435 436 static inline void 437 __iowrite64_copy(void *to, void *from, size_t count) 438 { 439 #ifdef __LP64__ 440 uint64_t *src; 441 uint64_t *dst; 442 int i; 443 444 for (i = 0, src = from, dst = to; i < count; i++, src++, dst++) 445 __raw_writeq(*src, dst); 446 #else 447 __iowrite32_copy(to, from, count * 2); 448 #endif 449 } 450 451 enum { 452 MEMREMAP_WB = 1 << 0, 453 MEMREMAP_WT = 1 << 1, 454 MEMREMAP_WC = 1 << 2, 455 }; 456 457 static inline void * 458 memremap(resource_size_t offset, size_t size, unsigned long flags) 459 { 460 void *addr = NULL; 461 462 if ((flags & MEMREMAP_WB) && 463 (addr = ioremap_wb(offset, size)) != NULL) 464 goto done; 465 if ((flags & MEMREMAP_WT) && 466 (addr = ioremap_wt(offset, size)) != NULL) 467 goto done; 468 if ((flags & MEMREMAP_WC) && 469 (addr = ioremap_wc(offset, size)) != NULL) 470 goto done; 471 done: 472 return (addr); 473 } 474 475 static inline void 476 memunmap(void *addr) 477 { 478 /* XXX May need to check if this is RAM */ 479 iounmap(addr); 480 } 481 482 #endif /* _LINUX_IO_H_ */ 483