1 #ifndef _LINUX_MM_H
2 #define _LINUX_MM_H
3
4 #include <assert.h>
5 #include <string.h>
6 #include <stdlib.h>
7 #include <errno.h>
8 #include <limits.h>
9 #include <stdio.h>
10
11 typedef unsigned long dma_addr_t;
12
13 #define unlikely
14
15 #define BUG_ON(x) assert(!(x))
16
17 #define WARN_ON(condition) ({ \
18 int __ret_warn_on = !!(condition); \
19 unlikely(__ret_warn_on); \
20 })
21
22 #define WARN_ON_ONCE(condition) ({ \
23 int __ret_warn_on = !!(condition); \
24 if (unlikely(__ret_warn_on)) \
25 assert(0); \
26 unlikely(__ret_warn_on); \
27 })
28
29 #define PAGE_SIZE (4096)
30 #define PAGE_SHIFT (12)
31 #define PAGE_MASK (~(PAGE_SIZE-1))
32
33 #define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
34 #define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
35 #define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
36 #define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
37
38 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
39
40 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
41
42 #define virt_to_page(x) ((void *)x)
43 #define page_address(x) ((void *)x)
44
page_to_phys(struct page * page)45 static inline unsigned long page_to_phys(struct page *page)
46 {
47 assert(0);
48
49 return 0;
50 }
51
52 #define page_to_pfn(page) ((unsigned long)(page) / PAGE_SIZE)
53 #define pfn_to_page(pfn) (void *)((pfn) * PAGE_SIZE)
54
55 #define __min(t1, t2, min1, min2, x, y) ({ \
56 t1 min1 = (x); \
57 t2 min2 = (y); \
58 (void) (&min1 == &min2); \
59 min1 < min2 ? min1 : min2; })
60
61 #define ___PASTE(a,b) a##b
62 #define __PASTE(a,b) ___PASTE(a,b)
63
64 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
65
66 #define min(x, y) \
67 __min(typeof(x), typeof(y), \
68 __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
69 x, y)
70
71 #define min_t(type, x, y) \
72 __min(type, type, \
73 __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \
74 x, y)
75
76 #define pagefault_disabled() (0)
77
kmap(struct page * page)78 static inline void *kmap(struct page *page)
79 {
80 assert(0);
81
82 return NULL;
83 }
84
kmap_atomic(struct page * page)85 static inline void *kmap_atomic(struct page *page)
86 {
87 assert(0);
88
89 return NULL;
90 }
91
kunmap(void * addr)92 static inline void kunmap(void *addr)
93 {
94 assert(0);
95 }
96
kunmap_atomic(void * addr)97 static inline void kunmap_atomic(void *addr)
98 {
99 assert(0);
100 }
101
__get_free_page(unsigned int flags)102 static inline unsigned long __get_free_page(unsigned int flags)
103 {
104 return (unsigned long)malloc(PAGE_SIZE);
105 }
106
free_page(unsigned long page)107 static inline void free_page(unsigned long page)
108 {
109 free((void *)page);
110 }
111
kmalloc(unsigned int size,unsigned int flags)112 static inline void *kmalloc(unsigned int size, unsigned int flags)
113 {
114 return malloc(size);
115 }
116
117 static inline void *
kmalloc_array(unsigned int n,unsigned int size,unsigned int flags)118 kmalloc_array(unsigned int n, unsigned int size, unsigned int flags)
119 {
120 return malloc(n * size);
121 }
122
123 #define kfree(x) free(x)
124
125 #define kmemleak_alloc(a, b, c, d)
126 #define kmemleak_free(a)
127
128 #define PageSlab(p) (0)
129 #define flush_dcache_page(p)
130
131 #define MAX_ERRNO 4095
132
133 #define IS_ERR_VALUE(x) unlikely((unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO)
134
ERR_PTR(long error)135 static inline void * __must_check ERR_PTR(long error)
136 {
137 return (void *) error;
138 }
139
PTR_ERR(__force const void * ptr)140 static inline long __must_check PTR_ERR(__force const void *ptr)
141 {
142 return (long) ptr;
143 }
144
IS_ERR(__force const void * ptr)145 static inline bool __must_check IS_ERR(__force const void *ptr)
146 {
147 return IS_ERR_VALUE((unsigned long)ptr);
148 }
149
PTR_ERR_OR_ZERO(__force const void * ptr)150 static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
151 {
152 if (IS_ERR(ptr))
153 return PTR_ERR(ptr);
154 else
155 return 0;
156 }
157
158 #define IS_ENABLED(x) (0)
159
160 #endif
161