1 /* 2 * Copyright 2020 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Christian König 23 */ 24 25 #ifndef _TTM_DEVICE_H_ 26 #define _TTM_DEVICE_H_ 27 28 #include <linux/types.h> 29 #include <linux/workqueue.h> 30 #include <drm/ttm/ttm_resource.h> 31 #include <drm/ttm/ttm_pool.h> 32 33 #define TTM_NUM_MEM_TYPES 8 34 35 struct ttm_device; 36 struct ttm_placement; 37 struct ttm_buffer_object; 38 struct ttm_operation_ctx; 39 40 /** 41 * struct ttm_global - Buffer object driver global data. 42 * 43 * @dummy_read_page: Pointer to a dummy page used for mapping requests 44 * of unpopulated pages. 45 * @shrink: A shrink callback object used for buffer object swap. 46 * @device_list_mutex: Mutex protecting the device list. 47 * This mutex is held while traversing the device list for pm options. 48 * @lru_lock: Spinlock protecting the bo subsystem lru lists. 49 * @device_list: List of buffer object devices. 50 * @swap_lru: Lru list of buffer objects used for swapping. 51 */ 52 extern struct ttm_global { 53 54 /** 55 * Constant after init. 56 */ 57 58 struct page *dummy_read_page; 59 60 /** 61 * Protected by ttm_global_mutex. 62 */ 63 struct list_head device_list; 64 65 /** 66 * Internal protection. 67 */ 68 atomic_t bo_count; 69 } ttm_glob; 70 71 struct ttm_device_funcs { 72 /** 73 * ttm_tt_create 74 * 75 * @bo: The buffer object to create the ttm for. 76 * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. 77 * 78 * Create a struct ttm_tt to back data with system memory pages. 79 * No pages are actually allocated. 80 * Returns: 81 * NULL: Out of memory. 82 */ 83 struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo, 84 uint32_t page_flags); 85 86 /** 87 * ttm_tt_populate 88 * 89 * @ttm: The struct ttm_tt to contain the backing pages. 90 * 91 * Allocate all backing pages 92 * Returns: 93 * -ENOMEM: Out of memory. 94 */ 95 int (*ttm_tt_populate)(struct ttm_device *bdev, 96 struct ttm_tt *ttm, 97 struct ttm_operation_ctx *ctx); 98 99 /** 100 * ttm_tt_unpopulate 101 * 102 * @ttm: The struct ttm_tt to contain the backing pages. 103 * 104 * Free all backing page 105 */ 106 void (*ttm_tt_unpopulate)(struct ttm_device *bdev, 107 struct ttm_tt *ttm); 108 109 /** 110 * ttm_tt_destroy 111 * 112 * @bdev: Pointer to a ttm device 113 * @ttm: Pointer to a struct ttm_tt. 114 * 115 * Destroy the backend. This will be call back from ttm_tt_destroy so 116 * don't call ttm_tt_destroy from the callback or infinite loop. 117 */ 118 void (*ttm_tt_destroy)(struct ttm_device *bdev, struct ttm_tt *ttm); 119 120 /** 121 * struct ttm_bo_driver member eviction_valuable 122 * 123 * @bo: the buffer object to be evicted 124 * @place: placement we need room for 125 * 126 * Check with the driver if it is valuable to evict a BO to make room 127 * for a certain placement. 128 */ 129 bool (*eviction_valuable)(struct ttm_buffer_object *bo, 130 const struct ttm_place *place); 131 /** 132 * struct ttm_bo_driver member evict_flags: 133 * 134 * @bo: the buffer object to be evicted 135 * 136 * Return the bo flags for a buffer which is not mapped to the hardware. 137 * These will be placed in proposed_flags so that when the move is 138 * finished, they'll end up in bo->mem.flags 139 * This should not cause multihop evictions, and the core will warn 140 * if one is proposed. 141 */ 142 143 void (*evict_flags)(struct ttm_buffer_object *bo, 144 struct ttm_placement *placement); 145 146 /** 147 * struct ttm_bo_driver member move: 148 * 149 * @bo: the buffer to move 150 * @evict: whether this motion is evicting the buffer from 151 * the graphics address space 152 * @ctx: context for this move with parameters 153 * @new_mem: the new memory region receiving the buffer 154 @ @hop: placement for driver directed intermediate hop 155 * 156 * Move a buffer between two memory regions. 157 * Returns errno -EMULTIHOP if driver requests a hop 158 */ 159 int (*move)(struct ttm_buffer_object *bo, bool evict, 160 struct ttm_operation_ctx *ctx, 161 struct ttm_resource *new_mem, 162 struct ttm_place *hop); 163 164 /** 165 * struct ttm_bo_driver_member verify_access 166 * 167 * @bo: Pointer to a buffer object. 168 * @filp: Pointer to a struct file trying to access the object. 169 * 170 * Called from the map / write / read methods to verify that the 171 * caller is permitted to access the buffer object. 172 * This member may be set to NULL, which will refuse this kind of 173 * access for all buffer objects. 174 * This function should return 0 if access is granted, -EPERM otherwise. 175 */ 176 int (*verify_access)(struct ttm_buffer_object *bo, 177 struct file *filp); 178 179 /** 180 * Hook to notify driver about a resource delete. 181 */ 182 void (*delete_mem_notify)(struct ttm_buffer_object *bo); 183 184 /** 185 * notify the driver that we're about to swap out this bo 186 */ 187 void (*swap_notify)(struct ttm_buffer_object *bo); 188 189 /** 190 * Driver callback on when mapping io memory (for bo_move_memcpy 191 * for instance). TTM will take care to call io_mem_free whenever 192 * the mapping is not use anymore. io_mem_reserve & io_mem_free 193 * are balanced. 194 */ 195 int (*io_mem_reserve)(struct ttm_device *bdev, 196 struct ttm_resource *mem); 197 void (*io_mem_free)(struct ttm_device *bdev, 198 struct ttm_resource *mem); 199 200 /** 201 * Return the pfn for a given page_offset inside the BO. 202 * 203 * @bo: the BO to look up the pfn for 204 * @page_offset: the offset to look up 205 */ 206 unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo, 207 unsigned long page_offset); 208 209 /** 210 * Read/write memory buffers for ptrace access 211 * 212 * @bo: the BO to access 213 * @offset: the offset from the start of the BO 214 * @buf: pointer to source/destination buffer 215 * @len: number of bytes to copy 216 * @write: whether to read (0) from or write (non-0) to BO 217 * 218 * If successful, this function should return the number of 219 * bytes copied, -EIO otherwise. If the number of bytes 220 * returned is < len, the function may be called again with 221 * the remainder of the buffer to copy. 222 */ 223 int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset, 224 void *buf, int len, int write); 225 226 /** 227 * struct ttm_bo_driver member del_from_lru_notify 228 * 229 * @bo: the buffer object deleted from lru 230 * 231 * notify driver that a BO was deleted from LRU. 232 */ 233 void (*del_from_lru_notify)(struct ttm_buffer_object *bo); 234 235 /** 236 * Notify the driver that we're about to release a BO 237 * 238 * @bo: BO that is about to be released 239 * 240 * Gives the driver a chance to do any cleanup, including 241 * adding fences that may force a delayed delete 242 */ 243 void (*release_notify)(struct ttm_buffer_object *bo); 244 }; 245 246 /** 247 * struct ttm_device - Buffer object driver device-specific data. 248 * 249 * @device_list: Our entry in the global device list. 250 * @funcs: Function table for the device. 251 * @sysman: Resource manager for the system domain. 252 * @man_drv: An array of resource_managers. 253 * @vma_manager: Address space manager. 254 * @pool: page pool for the device. 255 * @dev_mapping: A pointer to the struct address_space representing the 256 * device address space. 257 * @wq: Work queue structure for the delayed delete workqueue. 258 */ 259 struct ttm_device { 260 /* 261 * Constant after bo device init 262 */ 263 struct list_head device_list; 264 struct ttm_device_funcs *funcs; 265 266 /* 267 * Access via ttm_manager_type. 268 */ 269 struct ttm_resource_manager sysman; 270 struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES]; 271 272 /* 273 * Protected by internal locks. 274 */ 275 struct drm_vma_offset_manager *vma_manager; 276 struct ttm_pool pool; 277 278 /* 279 * Protection for the per manager LRU and ddestroy lists. 280 */ 281 spinlock_t lru_lock; 282 struct list_head ddestroy; 283 284 /* 285 * Protected by load / firstopen / lastclose /unload sync. 286 */ 287 struct address_space *dev_mapping; 288 289 /* 290 * Internal protection. 291 */ 292 struct delayed_work wq; 293 }; 294 295 int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags); 296 int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, 297 gfp_t gfp_flags); 298 299 static inline struct ttm_resource_manager * 300 ttm_manager_type(struct ttm_device *bdev, int mem_type) 301 { 302 return bdev->man_drv[mem_type]; 303 } 304 305 static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type, 306 struct ttm_resource_manager *manager) 307 { 308 bdev->man_drv[type] = manager; 309 } 310 311 int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs, 312 struct device *dev, struct address_space *mapping, 313 struct drm_vma_offset_manager *vma_manager, 314 bool use_dma_alloc, bool use_dma32); 315 void ttm_device_fini(struct ttm_device *bdev); 316 317 #endif 318