1 /* 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 25 #include <drm/drmP.h> 26 #include "amdgpu.h" 27 28 struct amdgpu_vram_mgr { 29 struct drm_mm mm; 30 spinlock_t lock; 31 }; 32 33 /** 34 * amdgpu_vram_mgr_init - init VRAM manager and DRM MM 35 * 36 * @man: TTM memory type manager 37 * @p_size: maximum size of VRAM 38 * 39 * Allocate and initialize the VRAM manager. 40 */ 41 static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man, 42 unsigned long p_size) 43 { 44 struct amdgpu_vram_mgr *mgr; 45 46 mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); 47 if (!mgr) 48 return -ENOMEM; 49 50 drm_mm_init(&mgr->mm, 0, p_size); 51 spin_lock_init(&mgr->lock); 52 man->priv = mgr; 53 return 0; 54 } 55 56 /** 57 * amdgpu_vram_mgr_fini - free and destroy VRAM manager 58 * 59 * @man: TTM memory type manager 60 * 61 * Destroy and free the VRAM manager, returns -EBUSY if ranges are still 62 * allocated inside it. 63 */ 64 static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man) 65 { 66 struct amdgpu_vram_mgr *mgr = man->priv; 67 68 spin_lock(&mgr->lock); 69 if (!drm_mm_clean(&mgr->mm)) { 70 spin_unlock(&mgr->lock); 71 return -EBUSY; 72 } 73 74 drm_mm_takedown(&mgr->mm); 75 spin_unlock(&mgr->lock); 76 kfree(mgr); 77 man->priv = NULL; 78 return 0; 79 } 80 81 /** 82 * amdgpu_vram_mgr_new - allocate new ranges 83 * 84 * @man: TTM memory type manager 85 * @tbo: TTM BO we need this range for 86 * @place: placement flags and restrictions 87 * @mem: the resulting mem object 88 * 89 * Allocate VRAM for the given BO. 90 */ 91 static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, 92 struct ttm_buffer_object *tbo, 93 const struct ttm_place *place, 94 struct ttm_mem_reg *mem) 95 { 96 struct amdgpu_vram_mgr *mgr = man->priv; 97 struct drm_mm *mm = &mgr->mm; 98 struct drm_mm_node *nodes; 99 enum drm_mm_insert_mode mode; 100 unsigned long lpfn, num_nodes, pages_per_node, pages_left; 101 unsigned i; 102 int r; 103 104 lpfn = place->lpfn; 105 if (!lpfn) 106 lpfn = man->size; 107 108 if (place->flags & TTM_PL_FLAG_CONTIGUOUS || 109 amdgpu_vram_page_split == -1) { 110 pages_per_node = ~0ul; 111 num_nodes = 1; 112 } else { 113 pages_per_node = max((uint32_t)amdgpu_vram_page_split, 114 mem->page_alignment); 115 num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); 116 } 117 118 nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL); 119 if (!nodes) 120 return -ENOMEM; 121 122 mode = DRM_MM_INSERT_BEST; 123 if (place->flags & TTM_PL_FLAG_TOPDOWN) 124 mode = DRM_MM_INSERT_HIGH; 125 126 mem->start = 0; 127 pages_left = mem->num_pages; 128 129 spin_lock(&mgr->lock); 130 for (i = 0; i < num_nodes; ++i) { 131 unsigned long pages = min(pages_left, pages_per_node); 132 uint32_t alignment = mem->page_alignment; 133 unsigned long start; 134 135 if (pages == pages_per_node) 136 alignment = pages_per_node; 137 138 r = drm_mm_insert_node_in_range(mm, &nodes[i], 139 pages, alignment, 0, 140 place->fpfn, lpfn, 141 mode); 142 if (unlikely(r)) 143 goto error; 144 145 /* Calculate a virtual BO start address to easily check if 146 * everything is CPU accessible. 147 */ 148 start = nodes[i].start + nodes[i].size; 149 if (start > mem->num_pages) 150 start -= mem->num_pages; 151 else 152 start = 0; 153 mem->start = max(mem->start, start); 154 pages_left -= pages; 155 } 156 spin_unlock(&mgr->lock); 157 158 mem->mm_node = nodes; 159 160 return 0; 161 162 error: 163 while (i--) 164 drm_mm_remove_node(&nodes[i]); 165 spin_unlock(&mgr->lock); 166 167 kfree(nodes); 168 return r == -ENOSPC ? 0 : r; 169 } 170 171 /** 172 * amdgpu_vram_mgr_del - free ranges 173 * 174 * @man: TTM memory type manager 175 * @tbo: TTM BO we need this range for 176 * @place: placement flags and restrictions 177 * @mem: TTM memory object 178 * 179 * Free the allocated VRAM again. 180 */ 181 static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man, 182 struct ttm_mem_reg *mem) 183 { 184 struct amdgpu_vram_mgr *mgr = man->priv; 185 struct drm_mm_node *nodes = mem->mm_node; 186 unsigned pages = mem->num_pages; 187 188 if (!mem->mm_node) 189 return; 190 191 spin_lock(&mgr->lock); 192 while (pages) { 193 pages -= nodes->size; 194 drm_mm_remove_node(nodes); 195 ++nodes; 196 } 197 spin_unlock(&mgr->lock); 198 199 kfree(mem->mm_node); 200 mem->mm_node = NULL; 201 } 202 203 /** 204 * amdgpu_vram_mgr_debug - dump VRAM table 205 * 206 * @man: TTM memory type manager 207 * @prefix: text prefix 208 * 209 * Dump the table content using printk. 210 */ 211 static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man, 212 const char *prefix) 213 { 214 struct amdgpu_vram_mgr *mgr = man->priv; 215 struct drm_printer p = drm_debug_printer(prefix); 216 217 spin_lock(&mgr->lock); 218 drm_mm_print(&mgr->mm, &p); 219 spin_unlock(&mgr->lock); 220 } 221 222 const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = { 223 .init = amdgpu_vram_mgr_init, 224 .takedown = amdgpu_vram_mgr_fini, 225 .get_node = amdgpu_vram_mgr_new, 226 .put_node = amdgpu_vram_mgr_del, 227 .debug = amdgpu_vram_mgr_debug 228 }; 229