1 /* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
2 /******************************************************************************
3 *
4 * Name: aclinuxex.h - Extra OS specific defines, etc. for Linux
5 *
6 * Copyright (C) 2000 - 2023, Intel Corp.
7 *
8 *****************************************************************************/
9
10 #ifndef __ACLINUXEX_H__
11 #define __ACLINUXEX_H__
12
13 #ifdef __KERNEL__
14
15 #ifndef ACPI_USE_NATIVE_DIVIDE
16
17 #ifndef ACPI_DIV_64_BY_32
18 #define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) \
19 do { \
20 u64 (__n) = ((u64) n_hi) << 32 | (n_lo); \
21 (r32) = do_div ((__n), (d32)); \
22 (q32) = (u32) (__n); \
23 } while (0)
24 #endif
25
26 #ifndef ACPI_SHIFT_RIGHT_64
27 #define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) \
28 do { \
29 (n_lo) >>= 1; \
30 (n_lo) |= (((n_hi) & 1) << 31); \
31 (n_hi) >>= 1; \
32 } while (0)
33 #endif
34
35 #endif
36
37 /*
38 * Overrides for in-kernel ACPICA
39 */
40 acpi_status ACPI_INIT_FUNCTION acpi_os_initialize(void);
41
42 acpi_status acpi_os_terminate(void);
43
44 /*
45 * The irqs_disabled() check is for resume from RAM.
46 * Interrupts are off during resume, just like they are for boot.
47 * However, boot has (system_state != SYSTEM_RUNNING)
48 * to quiet __might_sleep() in kmalloc() and resume does not.
49 *
50 * These specialized allocators have to be macros for their allocations to be
51 * accounted separately (to have separate alloc_tag).
52 */
53 #define acpi_os_allocate(_size) \
54 kmalloc(_size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
55
56 #define acpi_os_allocate_zeroed(_size) \
57 kzalloc(_size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
58
59 #define acpi_os_acquire_object(_cache) \
60 kmem_cache_zalloc(_cache, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL)
61
acpi_os_free(void * memory)62 static inline void acpi_os_free(void *memory)
63 {
64 kfree(memory);
65 }
66
acpi_os_get_thread_id(void)67 static inline acpi_thread_id acpi_os_get_thread_id(void)
68 {
69 return (acpi_thread_id) (unsigned long)current;
70 }
71
72 /*
73 * When lockdep is enabled, the spin_lock_init() macro stringifies it's
74 * argument and uses that as a name for the lock in debugging.
75 * By executing spin_lock_init() in a macro the key changes from "lock" for
76 * all locks to the name of the argument of acpi_os_create_lock(), which
77 * prevents lockdep from reporting false positives for ACPICA locks.
78 */
79 #define acpi_os_create_lock(__handle) \
80 ({ \
81 spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
82 if (lock) { \
83 *(__handle) = lock; \
84 spin_lock_init(*(__handle)); \
85 } \
86 lock ? AE_OK : AE_NO_MEMORY; \
87 })
88
89
90 #define acpi_os_create_raw_lock(__handle) \
91 ({ \
92 raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
93 if (lock) { \
94 *(__handle) = lock; \
95 raw_spin_lock_init(*(__handle)); \
96 } \
97 lock ? AE_OK : AE_NO_MEMORY; \
98 })
99
acpi_os_acquire_raw_lock(acpi_raw_spinlock lockp)100 static inline acpi_cpu_flags acpi_os_acquire_raw_lock(acpi_raw_spinlock lockp)
101 {
102 acpi_cpu_flags flags;
103
104 raw_spin_lock_irqsave(lockp, flags);
105 return flags;
106 }
107
acpi_os_release_raw_lock(acpi_raw_spinlock lockp,acpi_cpu_flags flags)108 static inline void acpi_os_release_raw_lock(acpi_raw_spinlock lockp,
109 acpi_cpu_flags flags)
110 {
111 raw_spin_unlock_irqrestore(lockp, flags);
112 }
113
acpi_os_delete_raw_lock(acpi_raw_spinlock handle)114 static inline void acpi_os_delete_raw_lock(acpi_raw_spinlock handle)
115 {
116 ACPI_FREE(handle);
117 }
118
acpi_os_readable(void * pointer,acpi_size length)119 static inline u8 acpi_os_readable(void *pointer, acpi_size length)
120 {
121 return TRUE;
122 }
123
acpi_os_initialize_debugger(void)124 static inline acpi_status acpi_os_initialize_debugger(void)
125 {
126 return AE_OK;
127 }
128
acpi_os_terminate_debugger(void)129 static inline void acpi_os_terminate_debugger(void)
130 {
131 return;
132 }
133
134 /*
135 * OSL interfaces added by Linux
136 */
137
138 #endif /* __KERNEL__ */
139
140 #endif /* __ACLINUXEX_H__ */
141