1 /* internal.h: mm/ internal definitions 2 * 3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 #ifndef __MM_INTERNAL_H 12 #define __MM_INTERNAL_H 13 14 #include <linux/mm.h> 15 16 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, 17 unsigned long floor, unsigned long ceiling); 18 19 extern void prep_compound_page(struct page *page, unsigned long order); 20 extern void prep_compound_gigantic_page(struct page *page, unsigned long order); 21 22 static inline void set_page_count(struct page *page, int v) 23 { 24 atomic_set(&page->_count, v); 25 } 26 27 /* 28 * Turn a non-refcounted page (->_count == 0) into refcounted with 29 * a count of one. 30 */ 31 static inline void set_page_refcounted(struct page *page) 32 { 33 VM_BUG_ON(PageTail(page)); 34 VM_BUG_ON(atomic_read(&page->_count)); 35 set_page_count(page, 1); 36 } 37 38 static inline void __put_page(struct page *page) 39 { 40 atomic_dec(&page->_count); 41 } 42 43 /* 44 * in mm/vmscan.c: 45 */ 46 extern int isolate_lru_page(struct page *page); 47 extern void putback_lru_page(struct page *page); 48 49 /* 50 * in mm/page_alloc.c 51 */ 52 extern void __free_pages_bootmem(struct page *page, unsigned int order); 53 54 /* 55 * function for dealing with page's order in buddy system. 56 * zone->lock is already acquired when we use these. 57 * So, we don't need atomic page->flags operations here. 58 */ 59 static inline unsigned long page_order(struct page *page) 60 { 61 VM_BUG_ON(!PageBuddy(page)); 62 return page_private(page); 63 } 64 65 extern long mlock_vma_pages_range(struct vm_area_struct *vma, 66 unsigned long start, unsigned long end); 67 extern void munlock_vma_pages_range(struct vm_area_struct *vma, 68 unsigned long start, unsigned long end); 69 static inline void munlock_vma_pages_all(struct vm_area_struct *vma) 70 { 71 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); 72 } 73 74 #ifdef CONFIG_UNEVICTABLE_LRU 75 /* 76 * unevictable_migrate_page() called only from migrate_page_copy() to 77 * migrate unevictable flag to new page. 78 * Note that the old page has been isolated from the LRU lists at this 79 * point so we don't need to worry about LRU statistics. 80 */ 81 static inline void unevictable_migrate_page(struct page *new, struct page *old) 82 { 83 if (TestClearPageUnevictable(old)) 84 SetPageUnevictable(new); 85 } 86 #else 87 static inline void unevictable_migrate_page(struct page *new, struct page *old) 88 { 89 } 90 #endif 91 92 #ifdef CONFIG_UNEVICTABLE_LRU 93 /* 94 * Called only in fault path via page_evictable() for a new page 95 * to determine if it's being mapped into a LOCKED vma. 96 * If so, mark page as mlocked. 97 */ 98 static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page) 99 { 100 VM_BUG_ON(PageLRU(page)); 101 102 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) 103 return 0; 104 105 if (!TestSetPageMlocked(page)) { 106 inc_zone_page_state(page, NR_MLOCK); 107 count_vm_event(UNEVICTABLE_PGMLOCKED); 108 } 109 return 1; 110 } 111 112 /* 113 * must be called with vma's mmap_sem held for read, and page locked. 114 */ 115 extern void mlock_vma_page(struct page *page); 116 117 /* 118 * Clear the page's PageMlocked(). This can be useful in a situation where 119 * we want to unconditionally remove a page from the pagecache -- e.g., 120 * on truncation or freeing. 121 * 122 * It is legal to call this function for any page, mlocked or not. 123 * If called for a page that is still mapped by mlocked vmas, all we do 124 * is revert to lazy LRU behaviour -- semantics are not broken. 125 */ 126 extern void __clear_page_mlock(struct page *page); 127 static inline void clear_page_mlock(struct page *page) 128 { 129 if (unlikely(TestClearPageMlocked(page))) 130 __clear_page_mlock(page); 131 } 132 133 /* 134 * mlock_migrate_page - called only from migrate_page_copy() to 135 * migrate the Mlocked page flag; update statistics. 136 */ 137 static inline void mlock_migrate_page(struct page *newpage, struct page *page) 138 { 139 if (TestClearPageMlocked(page)) { 140 unsigned long flags; 141 142 local_irq_save(flags); 143 __dec_zone_page_state(page, NR_MLOCK); 144 SetPageMlocked(newpage); 145 __inc_zone_page_state(newpage, NR_MLOCK); 146 local_irq_restore(flags); 147 } 148 } 149 150 /* 151 * free_page_mlock() -- clean up attempts to free and mlocked() page. 152 * Page should not be on lru, so no need to fix that up. 153 * free_pages_check() will verify... 154 */ 155 static inline void free_page_mlock(struct page *page) 156 { 157 if (unlikely(TestClearPageMlocked(page))) { 158 unsigned long flags; 159 160 local_irq_save(flags); 161 __dec_zone_page_state(page, NR_MLOCK); 162 __count_vm_event(UNEVICTABLE_MLOCKFREED); 163 local_irq_restore(flags); 164 } 165 } 166 167 #else /* CONFIG_UNEVICTABLE_LRU */ 168 static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) 169 { 170 return 0; 171 } 172 static inline void clear_page_mlock(struct page *page) { } 173 static inline void mlock_vma_page(struct page *page) { } 174 static inline void mlock_migrate_page(struct page *new, struct page *old) { } 175 static inline void free_page_mlock(struct page *page) { } 176 177 #endif /* CONFIG_UNEVICTABLE_LRU */ 178 179 /* 180 * Return the mem_map entry representing the 'offset' subpage within 181 * the maximally aligned gigantic page 'base'. Handle any discontiguity 182 * in the mem_map at MAX_ORDER_NR_PAGES boundaries. 183 */ 184 static inline struct page *mem_map_offset(struct page *base, int offset) 185 { 186 if (unlikely(offset >= MAX_ORDER_NR_PAGES)) 187 return pfn_to_page(page_to_pfn(base) + offset); 188 return base + offset; 189 } 190 191 /* 192 * Iterator over all subpages withing the maximally aligned gigantic 193 * page 'base'. Handle any discontiguity in the mem_map. 194 */ 195 static inline struct page *mem_map_next(struct page *iter, 196 struct page *base, int offset) 197 { 198 if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) { 199 unsigned long pfn = page_to_pfn(base) + offset; 200 if (!pfn_valid(pfn)) 201 return NULL; 202 return pfn_to_page(pfn); 203 } 204 return iter + 1; 205 } 206 207 /* 208 * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node, 209 * so all functions starting at paging_init should be marked __init 210 * in those cases. SPARSEMEM, however, allows for memory hotplug, 211 * and alloc_bootmem_node is not used. 212 */ 213 #ifdef CONFIG_SPARSEMEM 214 #define __paginginit __meminit 215 #else 216 #define __paginginit __init 217 #endif 218 219 /* Memory initialisation debug and verification */ 220 enum mminit_level { 221 MMINIT_WARNING, 222 MMINIT_VERIFY, 223 MMINIT_TRACE 224 }; 225 226 #ifdef CONFIG_DEBUG_MEMORY_INIT 227 228 extern int mminit_loglevel; 229 230 #define mminit_dprintk(level, prefix, fmt, arg...) \ 231 do { \ 232 if (level < mminit_loglevel) { \ 233 printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \ 234 printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \ 235 } \ 236 } while (0) 237 238 extern void mminit_verify_pageflags_layout(void); 239 extern void mminit_verify_page_links(struct page *page, 240 enum zone_type zone, unsigned long nid, unsigned long pfn); 241 extern void mminit_verify_zonelist(void); 242 243 #else 244 245 static inline void mminit_dprintk(enum mminit_level level, 246 const char *prefix, const char *fmt, ...) 247 { 248 } 249 250 static inline void mminit_verify_pageflags_layout(void) 251 { 252 } 253 254 static inline void mminit_verify_page_links(struct page *page, 255 enum zone_type zone, unsigned long nid, unsigned long pfn) 256 { 257 } 258 259 static inline void mminit_verify_zonelist(void) 260 { 261 } 262 #endif /* CONFIG_DEBUG_MEMORY_INIT */ 263 264 /* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */ 265 #if defined(CONFIG_SPARSEMEM) 266 extern void mminit_validate_memmodel_limits(unsigned long *start_pfn, 267 unsigned long *end_pfn); 268 #else 269 static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn, 270 unsigned long *end_pfn) 271 { 272 } 273 #endif /* CONFIG_SPARSEMEM */ 274 275 #define GUP_FLAGS_WRITE 0x1 276 #define GUP_FLAGS_FORCE 0x2 277 #define GUP_FLAGS_IGNORE_VMA_PERMISSIONS 0x4 278 279 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 280 unsigned long start, int len, int flags, 281 struct page **pages, struct vm_area_struct **vmas); 282 283 #endif 284