1 /** 2 * \file drm_os_freebsd.h 3 * OS abstraction macros. 4 */ 5 6 #include <sys/cdefs.h> 7 __FBSDID("$FreeBSD$"); 8 9 #ifndef _DRM_OS_FREEBSD_H_ 10 #define _DRM_OS_FREEBSD_H_ 11 12 #include <sys/fbio.h> 13 #include <sys/smp.h> 14 15 #if _BYTE_ORDER == _BIG_ENDIAN 16 #define __BIG_ENDIAN 4321 17 #else 18 #define __LITTLE_ENDIAN 1234 19 #endif 20 21 #ifdef __LP64__ 22 #define BITS_PER_LONG 64 23 #else 24 #define BITS_PER_LONG 32 25 #endif 26 27 #ifndef __user 28 #define __user 29 #endif 30 #ifndef __iomem 31 #define __iomem 32 #endif 33 #ifndef __always_unused 34 #define __always_unused 35 #endif 36 #ifndef __must_check 37 #define __must_check 38 #endif 39 #ifndef __force 40 #define __force 41 #endif 42 #ifndef uninitialized_var 43 #define uninitialized_var(x) x 44 #endif 45 46 #define cpu_to_le16(x) htole16(x) 47 #define le16_to_cpu(x) le16toh(x) 48 #define cpu_to_le32(x) htole32(x) 49 #define le32_to_cpu(x) le32toh(x) 50 51 #define cpu_to_be16(x) htobe16(x) 52 #define be16_to_cpu(x) be16toh(x) 53 #define cpu_to_be32(x) htobe32(x) 54 #define be32_to_cpu(x) be32toh(x) 55 #define be32_to_cpup(x) be32toh(*x) 56 57 typedef vm_paddr_t dma_addr_t; 58 typedef vm_paddr_t resource_size_t; 59 #define wait_queue_head_t atomic_t 60 61 typedef uint64_t u64; 62 typedef uint32_t u32; 63 typedef uint16_t u16; 64 typedef uint8_t u8; 65 typedef int64_t s64; 66 typedef int32_t s32; 67 typedef int16_t s16; 68 typedef int8_t s8; 69 typedef uint16_t __le16; 70 typedef uint32_t __le32; 71 typedef uint64_t __le64; 72 typedef uint16_t __be16; 73 typedef uint32_t __be32; 74 typedef uint64_t __be64; 75 76 #define DRM_IRQ_ARGS void *arg 77 typedef void irqreturn_t; 78 #define IRQ_HANDLED /* nothing */ 79 #define IRQ_NONE /* nothing */ 80 81 #define __init 82 #define __exit 83 84 #define BUILD_BUG_ON(x) CTASSERT(!(x)) 85 #define BUILD_BUG_ON_NOT_POWER_OF_2(x) 86 87 #ifndef WARN 88 #define WARN(condition, format, ...) ({ \ 89 int __ret_warn_on = !!(condition); \ 90 if (unlikely(__ret_warn_on)) \ 91 DRM_ERROR(format, ##__VA_ARGS__); \ 92 unlikely(__ret_warn_on); \ 93 }) 94 #endif 95 #define WARN_ONCE(condition, format, ...) \ 96 WARN(condition, format, ##__VA_ARGS__) 97 #define WARN_ON(cond) WARN(cond, "WARN ON: " #cond) 98 #define WARN_ON_SMP(cond) WARN_ON(cond) 99 #define BUG() panic("BUG") 100 #define BUG_ON(cond) KASSERT(!(cond), ("BUG ON: " #cond " -> 0x%jx", (uintmax_t)(cond))) 101 #define unlikely(x) __builtin_expect(!!(x), 0) 102 #define likely(x) __builtin_expect(!!(x), 1) 103 #define container_of(ptr, type, member) ({ \ 104 __typeof( ((type *)0)->member ) *__mptr = (ptr); \ 105 (type *)( (char *)__mptr - offsetof(type,member) );}) 106 107 #define KHZ2PICOS(a) (1000000000UL/(a)) 108 109 #define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) 110 111 #define HZ hz 112 #define DRM_HZ hz 113 #define DRM_CURRENTPID curthread->td_proc->p_pid 114 #define DRM_SUSER(p) (priv_check(p, PRIV_DRIVER) == 0) 115 #define udelay(usecs) DELAY(usecs) 116 #define mdelay(msecs) do { int loops = (msecs); \ 117 while (loops--) DELAY(1000); \ 118 } while (0) 119 #define DRM_UDELAY(udelay) DELAY(udelay) 120 #define drm_msleep(x, msg) pause((msg), ((int64_t)(x)) * hz / 1000) 121 #define DRM_MSLEEP(msecs) drm_msleep((msecs), "drm_msleep") 122 #define get_seconds() time_second 123 124 #define ioread8(addr) *(volatile uint8_t *)((char *)addr) 125 #define ioread16(addr) *(volatile uint16_t *)((char *)addr) 126 #define ioread32(addr) *(volatile uint32_t *)((char *)addr) 127 128 #define iowrite8(data, addr) *(volatile uint8_t *)((char *)addr) = data; 129 #define iowrite16(data, addr) *(volatile uint16_t *)((char *)addr) = data; 130 #define iowrite32(data, addr) *(volatile uint32_t *)((char *)addr) = data; 131 132 #define DRM_READ8(map, offset) \ 133 *(volatile u_int8_t *)(((vm_offset_t)(map)->handle) + \ 134 (vm_offset_t)(offset)) 135 #define DRM_READ16(map, offset) \ 136 le16toh(*(volatile u_int16_t *)(((vm_offset_t)(map)->handle) + \ 137 (vm_offset_t)(offset))) 138 #define DRM_READ32(map, offset) \ 139 le32toh(*(volatile u_int32_t *)(((vm_offset_t)(map)->handle) + \ 140 (vm_offset_t)(offset))) 141 #define DRM_READ64(map, offset) \ 142 le64toh(*(volatile u_int64_t *)(((vm_offset_t)(map)->handle) + \ 143 (vm_offset_t)(offset))) 144 #define DRM_WRITE8(map, offset, val) \ 145 *(volatile u_int8_t *)(((vm_offset_t)(map)->handle) + \ 146 (vm_offset_t)(offset)) = val 147 #define DRM_WRITE16(map, offset, val) \ 148 *(volatile u_int16_t *)(((vm_offset_t)(map)->handle) + \ 149 (vm_offset_t)(offset)) = htole16(val) 150 #define DRM_WRITE32(map, offset, val) \ 151 *(volatile u_int32_t *)(((vm_offset_t)(map)->handle) + \ 152 (vm_offset_t)(offset)) = htole32(val) 153 #define DRM_WRITE64(map, offset, val) \ 154 *(volatile u_int64_t *)(((vm_offset_t)(map)->handle) + \ 155 (vm_offset_t)(offset)) = htole64(val) 156 157 #define DRM_PORT "graphics/drm-kmod" 158 159 #define DRM_OBSOLETE(dev) \ 160 do { \ 161 device_printf(dev, "=======================================================\n"); \ 162 device_printf(dev, "This code is deprecated. Install the " DRM_PORT " pkg\n"); \ 163 device_printf(dev, "=======================================================\n"); \ 164 gone_in_dev(dev, 13, "drm2 drivers"); \ 165 } while (0) 166 167 /* DRM_READMEMORYBARRIER() prevents reordering of reads. 168 * DRM_WRITEMEMORYBARRIER() prevents reordering of writes. 169 * DRM_MEMORYBARRIER() prevents reordering of reads and writes. 170 */ 171 #define DRM_READMEMORYBARRIER() rmb() 172 #define DRM_WRITEMEMORYBARRIER() wmb() 173 #define DRM_MEMORYBARRIER() mb() 174 #define smp_rmb() rmb() 175 #define smp_wmb() wmb() 176 #define smp_mb__before_atomic_inc() mb() 177 #define smp_mb__after_atomic_inc() mb() 178 #define barrier() __compiler_membar() 179 180 #define do_div(a, b) ((a) /= (b)) 181 #define div64_u64(a, b) ((a) / (b)) 182 #define lower_32_bits(n) ((u32)(n)) 183 #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) 184 185 #define __set_bit(n, s) set_bit((n), (s)) 186 #define __clear_bit(n, s) clear_bit((n), (s)) 187 188 #define min_t(type, x, y) ({ \ 189 type __min1 = (x); \ 190 type __min2 = (y); \ 191 __min1 < __min2 ? __min1 : __min2; }) 192 193 #define max_t(type, x, y) ({ \ 194 type __max1 = (x); \ 195 type __max2 = (y); \ 196 __max1 > __max2 ? __max1 : __max2; }) 197 198 #define memset_io(a, b, c) memset((a), (b), (c)) 199 #define memcpy_fromio(a, b, c) memcpy((a), (b), (c)) 200 #define memcpy_toio(a, b, c) memcpy((a), (b), (c)) 201 202 #define VERIFY_READ VM_PROT_READ 203 #define VERIFY_WRITE VM_PROT_WRITE 204 #define access_ok(prot, p, l) useracc((p), (l), (prot)) 205 206 /* XXXKIB what is the right code for the FreeBSD ? */ 207 /* kib@ used ENXIO here -- dumbbell@ */ 208 #define EREMOTEIO EIO 209 #define ERESTARTSYS 512 /* Same value as Linux. */ 210 211 #define KTR_DRM KTR_DEV 212 #define KTR_DRM_REG KTR_SPARE3 213 214 #define DRM_AGP_KERN struct agp_info 215 #define DRM_AGP_MEM void 216 217 #define PCI_VENDOR_ID_APPLE 0x106b 218 #define PCI_VENDOR_ID_ASUSTEK 0x1043 219 #define PCI_VENDOR_ID_ATI 0x1002 220 #define PCI_VENDOR_ID_DELL 0x1028 221 #define PCI_VENDOR_ID_HP 0x103c 222 #define PCI_VENDOR_ID_IBM 0x1014 223 #define PCI_VENDOR_ID_INTEL 0x8086 224 #define PCI_VENDOR_ID_SERVERWORKS 0x1166 225 #define PCI_VENDOR_ID_SONY 0x104d 226 #define PCI_VENDOR_ID_VIA 0x1106 227 228 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) 229 #define DIV_ROUND_CLOSEST(n,d) (((n) + (d) / 2) / (d)) 230 #define div_u64(n, d) ((n) / (d)) 231 #define hweight32(i) bitcount32(i) 232 233 static inline unsigned long 234 roundup_pow_of_two(unsigned long x) 235 { 236 237 return (1UL << flsl(x - 1)); 238 } 239 240 /** 241 * ror32 - rotate a 32-bit value right 242 * @word: value to rotate 243 * @shift: bits to roll 244 * 245 * Source: include/linux/bitops.h 246 */ 247 static inline uint32_t 248 ror32(uint32_t word, unsigned int shift) 249 { 250 251 return (word >> shift) | (word << (32 - shift)); 252 } 253 254 #define IS_ALIGNED(x, y) (((x) & ((y) - 1)) == 0) 255 #define round_down(x, y) rounddown2((x), (y)) 256 #define round_up(x, y) roundup2((x), (y)) 257 #define get_unaligned(ptr) \ 258 ({ __typeof__(*(ptr)) __tmp; \ 259 memcpy(&__tmp, (ptr), sizeof(*(ptr))); __tmp; }) 260 261 #if _BYTE_ORDER == _LITTLE_ENDIAN 262 /* Taken from linux/include/linux/unaligned/le_struct.h. */ 263 struct __una_u32 { u32 x; } __packed; 264 265 static inline u32 266 __get_unaligned_cpu32(const void *p) 267 { 268 const struct __una_u32 *ptr = (const struct __una_u32 *)p; 269 270 return (ptr->x); 271 } 272 273 static inline u32 274 get_unaligned_le32(const void *p) 275 { 276 277 return (__get_unaligned_cpu32((const u8 *)p)); 278 } 279 #else 280 /* Taken from linux/include/linux/unaligned/le_byteshift.h. */ 281 static inline u32 282 __get_unaligned_le32(const u8 *p) 283 { 284 285 return (p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24); 286 } 287 288 static inline u32 289 get_unaligned_le32(const void *p) 290 { 291 292 return (__get_unaligned_le32((const u8 *)p)); 293 } 294 #endif 295 296 static inline unsigned long 297 ilog2(unsigned long x) 298 { 299 300 return (flsl(x) - 1); 301 } 302 303 static inline int64_t 304 abs64(int64_t x) 305 { 306 307 return (x < 0 ? -x : x); 308 } 309 310 int64_t timeval_to_ns(const struct timeval *tv); 311 struct timeval ns_to_timeval(const int64_t nsec); 312 313 #define PAGE_ALIGN(addr) round_page(addr) 314 #define page_to_phys(x) VM_PAGE_TO_PHYS(x) 315 #define offset_in_page(x) ((x) & PAGE_MASK) 316 317 #define drm_get_device_from_kdev(_kdev) (((struct drm_minor *)(_kdev)->si_drv1)->dev) 318 319 #define DRM_IOC_VOID IOC_VOID 320 #define DRM_IOC_READ IOC_OUT 321 #define DRM_IOC_WRITE IOC_IN 322 #define DRM_IOC_READWRITE IOC_INOUT 323 #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) 324 325 static inline long 326 __copy_to_user(void __user *to, const void *from, unsigned long n) 327 { 328 return (copyout(from, to, n) != 0 ? n : 0); 329 } 330 #define copy_to_user(to, from, n) __copy_to_user((to), (from), (n)) 331 332 static inline int 333 __put_user(size_t size, void *ptr, void *x) 334 { 335 336 size = copy_to_user(ptr, x, size); 337 338 return (size ? -EFAULT : size); 339 } 340 #define put_user(x, ptr) __put_user(sizeof(*ptr), (ptr), &(x)) 341 342 static inline unsigned long 343 __copy_from_user(void *to, const void __user *from, unsigned long n) 344 { 345 return ((copyin(__DECONST(void *, from), to, n) != 0 ? n : 0)); 346 } 347 #define copy_from_user(to, from, n) __copy_from_user((to), (from), (n)) 348 349 static inline int 350 __get_user(size_t size, const void *ptr, void *x) 351 { 352 353 size = copy_from_user(x, ptr, size); 354 355 return (size ? -EFAULT : size); 356 } 357 #define get_user(x, ptr) __get_user(sizeof(*ptr), (ptr), &(x)) 358 359 static inline int 360 __copy_to_user_inatomic(void __user *to, const void *from, unsigned n) 361 { 362 363 return (copyout_nofault(from, to, n) != 0 ? n : 0); 364 } 365 #define __copy_to_user_inatomic_nocache(to, from, n) \ 366 __copy_to_user_inatomic((to), (from), (n)) 367 368 static inline unsigned long 369 __copy_from_user_inatomic(void *to, const void __user *from, 370 unsigned long n) 371 { 372 373 /* 374 * XXXKIB. Equivalent Linux function is implemented using 375 * MOVNTI for aligned moves. For unaligned head and tail, 376 * normal move is performed. As such, it is not incorrect, if 377 * only somewhat slower, to use normal copyin. All uses 378 * except shmem_pwrite_fast() have the destination mapped WC. 379 */ 380 return ((copyin_nofault(__DECONST(void *, from), to, n) != 0 ? n : 0)); 381 } 382 #define __copy_from_user_inatomic_nocache(to, from, n) \ 383 __copy_from_user_inatomic((to), (from), (n)) 384 385 static inline int 386 fault_in_multipages_readable(const char __user *uaddr, int size) 387 { 388 char c; 389 int ret = 0; 390 const char __user *end = uaddr + size - 1; 391 392 if (unlikely(size == 0)) 393 return ret; 394 395 while (uaddr <= end) { 396 ret = -copyin(uaddr, &c, 1); 397 if (ret != 0) 398 return -EFAULT; 399 uaddr += PAGE_SIZE; 400 } 401 402 /* Check whether the range spilled into the next page. */ 403 if (((unsigned long)uaddr & ~PAGE_MASK) == 404 ((unsigned long)end & ~PAGE_MASK)) { 405 ret = -copyin(end, &c, 1); 406 } 407 408 return ret; 409 } 410 411 static inline int 412 fault_in_multipages_writeable(char __user *uaddr, int size) 413 { 414 int ret = 0; 415 char __user *end = uaddr + size - 1; 416 417 if (unlikely(size == 0)) 418 return ret; 419 420 /* 421 * Writing zeroes into userspace here is OK, because we know that if 422 * the zero gets there, we'll be overwriting it. 423 */ 424 while (uaddr <= end) { 425 ret = subyte(uaddr, 0); 426 if (ret != 0) 427 return -EFAULT; 428 uaddr += PAGE_SIZE; 429 } 430 431 /* Check whether the range spilled into the next page. */ 432 if (((unsigned long)uaddr & ~PAGE_MASK) == 433 ((unsigned long)end & ~PAGE_MASK)) 434 ret = subyte(end, 0); 435 436 return ret; 437 } 438 439 enum __drm_capabilities { 440 CAP_SYS_ADMIN 441 }; 442 443 static inline bool 444 capable(enum __drm_capabilities cap) 445 { 446 447 switch (cap) { 448 case CAP_SYS_ADMIN: 449 return DRM_SUSER(curthread); 450 default: 451 panic("%s: unhandled capability: %0x", __func__, cap); 452 return (false); 453 } 454 } 455 456 #define to_user_ptr(x) ((void *)(uintptr_t)(x)) 457 #define sigemptyset(set) SIGEMPTYSET(set) 458 #define sigaddset(set, sig) SIGADDSET(set, sig) 459 460 #define DRM_LOCK(dev) sx_xlock(&(dev)->dev_struct_lock) 461 #define DRM_UNLOCK(dev) sx_xunlock(&(dev)->dev_struct_lock) 462 463 extern unsigned long drm_linux_timer_hz_mask; 464 #define jiffies ticks 465 #define jiffies_to_msecs(x) (((int64_t)(x)) * 1000 / hz) 466 #define msecs_to_jiffies(x) (((int64_t)(x)) * hz / 1000) 467 #define timespec_to_jiffies(x) (((x)->tv_sec * 1000000 + (x)->tv_nsec) * hz / 1000000) 468 #define time_after(a,b) ((long)(b) - (long)(a) < 0) 469 #define time_after_eq(a,b) ((long)(b) - (long)(a) <= 0) 470 #define round_jiffies(j) ((unsigned long)(((j) + drm_linux_timer_hz_mask) & ~drm_linux_timer_hz_mask)) 471 #define round_jiffies_up(j) round_jiffies(j) /* TODO */ 472 #define round_jiffies_up_relative(j) round_jiffies_up(j) /* TODO */ 473 474 #define getrawmonotonic(ts) getnanouptime(ts) 475 476 #define wake_up(queue) wakeup_one((void *)queue) 477 #define wake_up_interruptible(queue) wakeup_one((void *)queue) 478 #define wake_up_all(queue) wakeup((void *)queue) 479 #define wake_up_interruptible_all(queue) wakeup((void *)queue) 480 481 struct completion { 482 unsigned int done; 483 struct mtx lock; 484 }; 485 486 #define INIT_COMPLETION(c) ((c).done = 0); 487 488 static inline void 489 init_completion(struct completion *c) 490 { 491 492 mtx_init(&c->lock, "drmcompl", NULL, MTX_DEF); 493 c->done = 0; 494 } 495 496 static inline void 497 free_completion(struct completion *c) 498 { 499 500 mtx_destroy(&c->lock); 501 } 502 503 static inline void 504 complete_all(struct completion *c) 505 { 506 507 mtx_lock(&c->lock); 508 c->done++; 509 mtx_unlock(&c->lock); 510 wakeup(c); 511 } 512 513 static inline long 514 wait_for_completion_interruptible_timeout(struct completion *c, 515 unsigned long timeout) 516 { 517 unsigned long start_jiffies, elapsed_jiffies; 518 bool timeout_expired = false, awakened = false; 519 long ret = timeout; 520 521 start_jiffies = ticks; 522 523 mtx_lock(&c->lock); 524 while (c->done == 0 && !timeout_expired) { 525 ret = -msleep(c, &c->lock, PCATCH, "drmwco", timeout); 526 switch(ret) { 527 case -EWOULDBLOCK: 528 timeout_expired = true; 529 ret = 0; 530 break; 531 case -EINTR: 532 case -ERESTART: 533 ret = -ERESTARTSYS; 534 break; 535 case 0: 536 awakened = true; 537 break; 538 } 539 } 540 mtx_unlock(&c->lock); 541 542 if (awakened) { 543 elapsed_jiffies = ticks - start_jiffies; 544 ret = timeout > elapsed_jiffies ? timeout - elapsed_jiffies : 1; 545 } 546 547 return (ret); 548 } 549 550 MALLOC_DECLARE(DRM_MEM_DMA); 551 MALLOC_DECLARE(DRM_MEM_SAREA); 552 MALLOC_DECLARE(DRM_MEM_DRIVER); 553 MALLOC_DECLARE(DRM_MEM_MAGIC); 554 MALLOC_DECLARE(DRM_MEM_MINOR); 555 MALLOC_DECLARE(DRM_MEM_IOCTLS); 556 MALLOC_DECLARE(DRM_MEM_MAPS); 557 MALLOC_DECLARE(DRM_MEM_BUFS); 558 MALLOC_DECLARE(DRM_MEM_SEGS); 559 MALLOC_DECLARE(DRM_MEM_PAGES); 560 MALLOC_DECLARE(DRM_MEM_FILES); 561 MALLOC_DECLARE(DRM_MEM_QUEUES); 562 MALLOC_DECLARE(DRM_MEM_CMDS); 563 MALLOC_DECLARE(DRM_MEM_MAPPINGS); 564 MALLOC_DECLARE(DRM_MEM_BUFLISTS); 565 MALLOC_DECLARE(DRM_MEM_AGPLISTS); 566 MALLOC_DECLARE(DRM_MEM_CTXBITMAP); 567 MALLOC_DECLARE(DRM_MEM_SGLISTS); 568 MALLOC_DECLARE(DRM_MEM_MM); 569 MALLOC_DECLARE(DRM_MEM_HASHTAB); 570 MALLOC_DECLARE(DRM_MEM_KMS); 571 MALLOC_DECLARE(DRM_MEM_VBLANK); 572 573 #define simple_strtol(a, b, c) strtol((a), (b), (c)) 574 575 typedef struct drm_pci_id_list 576 { 577 int vendor; 578 int device; 579 long driver_private; 580 char *name; 581 } drm_pci_id_list_t; 582 583 #ifdef __i386__ 584 #define CONFIG_X86 1 585 #endif 586 #ifdef __amd64__ 587 #define CONFIG_X86 1 588 #define CONFIG_X86_64 1 589 #endif 590 #ifdef __ia64__ 591 #define CONFIG_IA64 1 592 #endif 593 594 #if defined(__i386__) || defined(__amd64__) 595 #define CONFIG_ACPI 596 #define CONFIG_DRM_I915_KMS 597 #undef CONFIG_INTEL_IOMMU 598 #endif 599 600 #ifdef COMPAT_FREEBSD32 601 #define CONFIG_COMPAT 602 #endif 603 604 #ifndef __arm__ 605 #define CONFIG_AGP 1 606 #define CONFIG_MTRR 1 607 #endif 608 609 #define CONFIG_FB 1 610 extern const char *fb_mode_option; 611 612 #undef CONFIG_DEBUG_FS 613 #undef CONFIG_VGA_CONSOLE 614 615 #define EXPORT_SYMBOL(x) 616 #define EXPORT_SYMBOL_GPL(x) 617 #define MODULE_AUTHOR(author) 618 #define MODULE_DESCRIPTION(desc) 619 #define MODULE_LICENSE(license) 620 #define MODULE_PARM_DESC(name, desc) 621 #define MODULE_DEVICE_TABLE(name, list) 622 #define module_param_named(name, var, type, perm) 623 624 #define printk printf 625 #define pr_err DRM_ERROR 626 #define pr_warn DRM_WARNING 627 #define pr_warn_once DRM_WARNING 628 #define KERN_DEBUG "" 629 630 /* I2C compatibility. */ 631 #define I2C_M_RD IIC_M_RD 632 #define I2C_M_WR IIC_M_WR 633 #define I2C_M_NOSTART IIC_M_NOSTART 634 635 struct fb_info * framebuffer_alloc(void); 636 void framebuffer_release(struct fb_info *info); 637 638 #define console_lock() 639 #define console_unlock() 640 #define console_trylock() true 641 642 #define PM_EVENT_SUSPEND 0x0002 643 #define PM_EVENT_QUIESCE 0x0008 644 #define PM_EVENT_PRETHAW PM_EVENT_QUIESCE 645 646 typedef struct pm_message { 647 int event; 648 } pm_message_t; 649 650 static inline int 651 pci_read_config_byte(device_t kdev, int where, u8 *val) 652 { 653 654 *val = (u8)pci_read_config(kdev, where, 1); 655 return (0); 656 } 657 658 static inline int 659 pci_write_config_byte(device_t kdev, int where, u8 val) 660 { 661 662 pci_write_config(kdev, where, val, 1); 663 return (0); 664 } 665 666 static inline int 667 pci_read_config_word(device_t kdev, int where, uint16_t *val) 668 { 669 670 *val = (uint16_t)pci_read_config(kdev, where, 2); 671 return (0); 672 } 673 674 static inline int 675 pci_write_config_word(device_t kdev, int where, uint16_t val) 676 { 677 678 pci_write_config(kdev, where, val, 2); 679 return (0); 680 } 681 682 static inline int 683 pci_read_config_dword(device_t kdev, int where, uint32_t *val) 684 { 685 686 *val = (uint32_t)pci_read_config(kdev, where, 4); 687 return (0); 688 } 689 690 static inline int 691 pci_write_config_dword(device_t kdev, int where, uint32_t val) 692 { 693 694 pci_write_config(kdev, where, val, 4); 695 return (0); 696 } 697 698 static inline void 699 on_each_cpu(void callback(void *data), void *data, int wait) 700 { 701 702 smp_rendezvous(NULL, callback, NULL, data); 703 } 704 705 void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, 706 int groupsize, char *linebuf, size_t linebuflen, bool ascii); 707 708 #define KIB_NOTYET() \ 709 do { \ 710 if (drm_debug && drm_notyet) \ 711 printf("NOTYET: %s at %s:%d\n", __func__, __FILE__, __LINE__); \ 712 } while (0) 713 714 #endif /* _DRM_OS_FREEBSD_H_ */ 715