1 /************************************************************************** 2 * 3 * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #include <sys/cdefs.h> 32 #include <dev/drm2/drmP.h> 33 #include <dev/drm2/ttm/ttm_module.h> 34 #include <dev/drm2/ttm/ttm_bo_driver.h> 35 #include <dev/drm2/ttm/ttm_placement.h> 36 #include <dev/drm2/drm_mm.h> 37 38 /** 39 * Currently we use a spinlock for the lock, but a mutex *may* be 40 * more appropriate to reduce scheduling latency if the range manager 41 * ends up with very fragmented allocation patterns. 42 */ 43 44 struct ttm_range_manager { 45 struct drm_mm mm; 46 struct mtx lock; 47 }; 48 49 MALLOC_DEFINE(M_TTM_RMAN, "ttm_rman", "TTM Range Manager"); 50 51 static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, 52 struct ttm_buffer_object *bo, 53 struct ttm_placement *placement, 54 struct ttm_mem_reg *mem) 55 { 56 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; 57 struct drm_mm *mm = &rman->mm; 58 struct drm_mm_node *node = NULL; 59 unsigned long lpfn; 60 int ret; 61 62 lpfn = placement->lpfn; 63 if (!lpfn) 64 lpfn = man->size; 65 do { 66 ret = drm_mm_pre_get(mm); 67 if (unlikely(ret)) 68 return ret; 69 70 mtx_lock(&rman->lock); 71 node = drm_mm_search_free_in_range(mm, 72 mem->num_pages, mem->page_alignment, 73 placement->fpfn, lpfn, 1); 74 if (unlikely(node == NULL)) { 75 mtx_unlock(&rman->lock); 76 return 0; 77 } 78 node = drm_mm_get_block_atomic_range(node, mem->num_pages, 79 mem->page_alignment, 80 placement->fpfn, 81 lpfn); 82 mtx_unlock(&rman->lock); 83 } while (node == NULL); 84 85 mem->mm_node = node; 86 mem->start = node->start; 87 return 0; 88 } 89 90 static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, 91 struct ttm_mem_reg *mem) 92 { 93 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; 94 95 if (mem->mm_node) { 96 mtx_lock(&rman->lock); 97 drm_mm_put_block(mem->mm_node); 98 mtx_unlock(&rman->lock); 99 mem->mm_node = NULL; 100 } 101 } 102 103 static int ttm_bo_man_init(struct ttm_mem_type_manager *man, 104 unsigned long p_size) 105 { 106 struct ttm_range_manager *rman; 107 int ret; 108 109 rman = malloc(sizeof(*rman), M_TTM_RMAN, M_ZERO | M_WAITOK); 110 ret = drm_mm_init(&rman->mm, 0, p_size); 111 if (ret) { 112 free(rman, M_TTM_RMAN); 113 return ret; 114 } 115 116 mtx_init(&rman->lock, "ttmrman", NULL, MTX_DEF); 117 man->priv = rman; 118 return 0; 119 } 120 121 static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) 122 { 123 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; 124 struct drm_mm *mm = &rman->mm; 125 126 mtx_lock(&rman->lock); 127 if (drm_mm_clean(mm)) { 128 drm_mm_takedown(mm); 129 mtx_unlock(&rman->lock); 130 mtx_destroy(&rman->lock); 131 free(rman, M_TTM_RMAN); 132 man->priv = NULL; 133 return 0; 134 } 135 mtx_unlock(&rman->lock); 136 return -EBUSY; 137 } 138 139 static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, 140 const char *prefix) 141 { 142 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; 143 144 mtx_lock(&rman->lock); 145 drm_mm_debug_table(&rman->mm, prefix); 146 mtx_unlock(&rman->lock); 147 } 148 149 const struct ttm_mem_type_manager_func ttm_bo_manager_func = { 150 ttm_bo_man_init, 151 ttm_bo_man_takedown, 152 ttm_bo_man_get_node, 153 ttm_bo_man_put_node, 154 ttm_bo_man_debug 155 }; 156