1 /************************************************************************** 2 * 3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 * 27 **************************************************************************/ 28 /* 29 * Authors: 30 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> 31 */ 32 33 #ifndef _DRM_MM_H_ 34 #define _DRM_MM_H_ 35 36 /* 37 * Generic range manager structs 38 */ 39 #include <linux/bug.h> 40 #include <linux/rbtree.h> 41 #include <linux/kernel.h> 42 #include <linux/list.h> 43 #include <linux/spinlock.h> 44 #ifdef CONFIG_DEBUG_FS 45 #include <linux/seq_file.h> 46 #endif 47 48 enum drm_mm_search_flags { 49 DRM_MM_SEARCH_DEFAULT = 0, 50 DRM_MM_SEARCH_BEST = 1 << 0, 51 DRM_MM_SEARCH_BELOW = 1 << 1, 52 }; 53 54 enum drm_mm_allocator_flags { 55 DRM_MM_CREATE_DEFAULT = 0, 56 DRM_MM_CREATE_TOP = 1 << 0, 57 }; 58 59 #define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT 60 #define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP 61 62 struct drm_mm_node { 63 struct list_head node_list; 64 struct list_head hole_stack; 65 struct rb_node rb; 66 unsigned hole_follows : 1; 67 unsigned scanned_block : 1; 68 unsigned scanned_prev_free : 1; 69 unsigned scanned_next_free : 1; 70 unsigned scanned_preceeds_hole : 1; 71 unsigned allocated : 1; 72 unsigned long color; 73 u64 start; 74 u64 size; 75 u64 __subtree_last; 76 struct drm_mm *mm; 77 }; 78 79 struct drm_mm { 80 /* List of all memory nodes that immediately precede a free hole. */ 81 struct list_head hole_stack; 82 /* head_node.node_list is the list of all memory nodes, ordered 83 * according to the (increasing) start address of the memory node. */ 84 struct drm_mm_node head_node; 85 /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */ 86 struct rb_root interval_tree; 87 88 unsigned int scan_check_range : 1; 89 unsigned scan_alignment; 90 unsigned long scan_color; 91 u64 scan_size; 92 u64 scan_hit_start; 93 u64 scan_hit_end; 94 unsigned scanned_blocks; 95 u64 scan_start; 96 u64 scan_end; 97 struct drm_mm_node *prev_scanned_node; 98 99 void (*color_adjust)(struct drm_mm_node *node, unsigned long color, 100 u64 *start, u64 *end); 101 }; 102 103 /** 104 * drm_mm_node_allocated - checks whether a node is allocated 105 * @node: drm_mm_node to check 106 * 107 * Drivers should use this helpers for proper encapusulation of drm_mm 108 * internals. 109 * 110 * Returns: 111 * True if the @node is allocated. 112 */ 113 static inline bool drm_mm_node_allocated(struct drm_mm_node *node) 114 { 115 return node->allocated; 116 } 117 118 /** 119 * drm_mm_initialized - checks whether an allocator is initialized 120 * @mm: drm_mm to check 121 * 122 * Drivers should use this helpers for proper encapusulation of drm_mm 123 * internals. 124 * 125 * Returns: 126 * True if the @mm is initialized. 127 */ 128 static inline bool drm_mm_initialized(struct drm_mm *mm) 129 { 130 return mm->hole_stack.next; 131 } 132 133 static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node) 134 { 135 return hole_node->start + hole_node->size; 136 } 137 138 /** 139 * drm_mm_hole_node_start - computes the start of the hole following @node 140 * @hole_node: drm_mm_node which implicitly tracks the following hole 141 * 142 * This is useful for driver-sepific debug dumpers. Otherwise drivers should not 143 * inspect holes themselves. Drivers must check first whether a hole indeed 144 * follows by looking at node->hole_follows. 145 * 146 * Returns: 147 * Start of the subsequent hole. 148 */ 149 static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node) 150 { 151 BUG_ON(!hole_node->hole_follows); 152 return __drm_mm_hole_node_start(hole_node); 153 } 154 155 static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node) 156 { 157 return list_next_entry(hole_node, node_list)->start; 158 } 159 160 /** 161 * drm_mm_hole_node_end - computes the end of the hole following @node 162 * @hole_node: drm_mm_node which implicitly tracks the following hole 163 * 164 * This is useful for driver-sepific debug dumpers. Otherwise drivers should not 165 * inspect holes themselves. Drivers must check first whether a hole indeed 166 * follows by looking at node->hole_follows. 167 * 168 * Returns: 169 * End of the subsequent hole. 170 */ 171 static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node) 172 { 173 return __drm_mm_hole_node_end(hole_node); 174 } 175 176 /** 177 * drm_mm_for_each_node - iterator to walk over all allocated nodes 178 * @entry: drm_mm_node structure to assign to in each iteration step 179 * @mm: drm_mm allocator to walk 180 * 181 * This iterator walks over all nodes in the range allocator. It is implemented 182 * with list_for_each, so not save against removal of elements. 183 */ 184 #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ 185 &(mm)->head_node.node_list, \ 186 node_list) 187 188 #define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \ 189 for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ 190 &entry->hole_stack != &(mm)->hole_stack ? \ 191 hole_start = drm_mm_hole_node_start(entry), \ 192 hole_end = drm_mm_hole_node_end(entry), \ 193 1 : 0; \ 194 entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack)) 195 196 /** 197 * drm_mm_for_each_hole - iterator to walk over all holes 198 * @entry: drm_mm_node used internally to track progress 199 * @mm: drm_mm allocator to walk 200 * @hole_start: ulong variable to assign the hole start to on each iteration 201 * @hole_end: ulong variable to assign the hole end to on each iteration 202 * 203 * This iterator walks over all holes in the range allocator. It is implemented 204 * with list_for_each, so not save against removal of elements. @entry is used 205 * internally and will not reflect a real drm_mm_node for the very first hole. 206 * Hence users of this iterator may not access it. 207 * 208 * Implementation Note: 209 * We need to inline list_for_each_entry in order to be able to set hole_start 210 * and hole_end on each iteration while keeping the macro sane. 211 * 212 * The __drm_mm_for_each_hole version is similar, but with added support for 213 * going backwards. 214 */ 215 #define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \ 216 __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0) 217 218 /* 219 * Basic range manager support (drm_mm.c) 220 */ 221 int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); 222 223 int drm_mm_insert_node_generic(struct drm_mm *mm, 224 struct drm_mm_node *node, 225 u64 size, 226 unsigned alignment, 227 unsigned long color, 228 enum drm_mm_search_flags sflags, 229 enum drm_mm_allocator_flags aflags); 230 /** 231 * drm_mm_insert_node - search for space and insert @node 232 * @mm: drm_mm to allocate from 233 * @node: preallocate node to insert 234 * @size: size of the allocation 235 * @alignment: alignment of the allocation 236 * @flags: flags to fine-tune the allocation 237 * 238 * This is a simplified version of drm_mm_insert_node_generic() with @color set 239 * to 0. 240 * 241 * The preallocated node must be cleared to 0. 242 * 243 * Returns: 244 * 0 on success, -ENOSPC if there's no suitable hole. 245 */ 246 static inline int drm_mm_insert_node(struct drm_mm *mm, 247 struct drm_mm_node *node, 248 u64 size, 249 unsigned alignment, 250 enum drm_mm_search_flags flags) 251 { 252 return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags, 253 DRM_MM_CREATE_DEFAULT); 254 } 255 256 int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, 257 struct drm_mm_node *node, 258 u64 size, 259 unsigned alignment, 260 unsigned long color, 261 u64 start, 262 u64 end, 263 enum drm_mm_search_flags sflags, 264 enum drm_mm_allocator_flags aflags); 265 /** 266 * drm_mm_insert_node_in_range - ranged search for space and insert @node 267 * @mm: drm_mm to allocate from 268 * @node: preallocate node to insert 269 * @size: size of the allocation 270 * @alignment: alignment of the allocation 271 * @start: start of the allowed range for this node 272 * @end: end of the allowed range for this node 273 * @flags: flags to fine-tune the allocation 274 * 275 * This is a simplified version of drm_mm_insert_node_in_range_generic() with 276 * @color set to 0. 277 * 278 * The preallocated node must be cleared to 0. 279 * 280 * Returns: 281 * 0 on success, -ENOSPC if there's no suitable hole. 282 */ 283 static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, 284 struct drm_mm_node *node, 285 u64 size, 286 unsigned alignment, 287 u64 start, 288 u64 end, 289 enum drm_mm_search_flags flags) 290 { 291 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 292 0, start, end, flags, 293 DRM_MM_CREATE_DEFAULT); 294 } 295 296 void drm_mm_remove_node(struct drm_mm_node *node); 297 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); 298 void drm_mm_init(struct drm_mm *mm, 299 u64 start, 300 u64 size); 301 void drm_mm_takedown(struct drm_mm *mm); 302 bool drm_mm_clean(struct drm_mm *mm); 303 304 struct drm_mm_node * 305 drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last); 306 307 struct drm_mm_node * 308 drm_mm_interval_next(struct drm_mm_node *node, u64 start, u64 last); 309 310 void drm_mm_init_scan(struct drm_mm *mm, 311 u64 size, 312 unsigned alignment, 313 unsigned long color); 314 void drm_mm_init_scan_with_range(struct drm_mm *mm, 315 u64 size, 316 unsigned alignment, 317 unsigned long color, 318 u64 start, 319 u64 end); 320 bool drm_mm_scan_add_block(struct drm_mm_node *node); 321 bool drm_mm_scan_remove_block(struct drm_mm_node *node); 322 323 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix); 324 #ifdef CONFIG_DEBUG_FS 325 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); 326 #endif 327 328 #endif 329