1 /* SPDX-License-Identifier: GPL-2.0-only OR MIT */ 2 /* 3 * Copyright © 2024 Intel Corporation 4 */ 5 6 #ifndef __DRM_GPUSVM_H__ 7 #define __DRM_GPUSVM_H__ 8 9 #include <linux/kref.h> 10 #include <linux/interval_tree.h> 11 #include <linux/mmu_notifier.h> 12 13 struct dev_pagemap_ops; 14 struct drm_device; 15 struct drm_gpusvm; 16 struct drm_gpusvm_notifier; 17 struct drm_gpusvm_ops; 18 struct drm_gpusvm_range; 19 struct drm_pagemap; 20 struct drm_pagemap_addr; 21 22 /** 23 * struct drm_gpusvm_ops - Operations structure for GPU SVM 24 * 25 * This structure defines the operations for GPU Shared Virtual Memory (SVM). 26 * These operations are provided by the GPU driver to manage SVM ranges and 27 * notifiers. 28 */ 29 struct drm_gpusvm_ops { 30 /** 31 * @notifier_alloc: Allocate a GPU SVM notifier (optional) 32 * 33 * Allocate a GPU SVM notifier. 34 * 35 * Return: Pointer to the allocated GPU SVM notifier on success, NULL on failure. 36 */ 37 struct drm_gpusvm_notifier *(*notifier_alloc)(void); 38 39 /** 40 * @notifier_free: Free a GPU SVM notifier (optional) 41 * @notifier: Pointer to the GPU SVM notifier to be freed 42 * 43 * Free a GPU SVM notifier. 44 */ 45 void (*notifier_free)(struct drm_gpusvm_notifier *notifier); 46 47 /** 48 * @range_alloc: Allocate a GPU SVM range (optional) 49 * @gpusvm: Pointer to the GPU SVM 50 * 51 * Allocate a GPU SVM range. 52 * 53 * Return: Pointer to the allocated GPU SVM range on success, NULL on failure. 54 */ 55 struct drm_gpusvm_range *(*range_alloc)(struct drm_gpusvm *gpusvm); 56 57 /** 58 * @range_free: Free a GPU SVM range (optional) 59 * @range: Pointer to the GPU SVM range to be freed 60 * 61 * Free a GPU SVM range. 62 */ 63 void (*range_free)(struct drm_gpusvm_range *range); 64 65 /** 66 * @invalidate: Invalidate GPU SVM notifier (required) 67 * @gpusvm: Pointer to the GPU SVM 68 * @notifier: Pointer to the GPU SVM notifier 69 * @mmu_range: Pointer to the mmu_notifier_range structure 70 * 71 * Invalidate the GPU page tables. It can safely walk the notifier range 72 * RB tree/list in this function. Called while holding the notifier lock. 73 */ 74 void (*invalidate)(struct drm_gpusvm *gpusvm, 75 struct drm_gpusvm_notifier *notifier, 76 const struct mmu_notifier_range *mmu_range); 77 }; 78 79 /** 80 * struct drm_gpusvm_notifier - Structure representing a GPU SVM notifier 81 * 82 * @gpusvm: Pointer to the GPU SVM structure 83 * @notifier: MMU interval notifier 84 * @itree: Interval tree node for the notifier (inserted in GPU SVM) 85 * @entry: List entry to fast interval tree traversal 86 * @root: Cached root node of the RB tree containing ranges 87 * @range_list: List head containing of ranges in the same order they appear in 88 * interval tree. This is useful to keep iterating ranges while 89 * doing modifications to RB tree. 90 * @flags: Flags for notifier 91 * @flags.removed: Flag indicating whether the MMU interval notifier has been 92 * removed 93 * 94 * This structure represents a GPU SVM notifier. 95 */ 96 struct drm_gpusvm_notifier { 97 struct drm_gpusvm *gpusvm; 98 struct mmu_interval_notifier notifier; 99 struct interval_tree_node itree; 100 struct list_head entry; 101 struct rb_root_cached root; 102 struct list_head range_list; 103 struct { 104 u32 removed : 1; 105 } flags; 106 }; 107 108 /** 109 * struct drm_gpusvm_range_flags - Structure representing a GPU SVM range flags 110 * 111 * @migrate_devmem: Flag indicating whether the range can be migrated to device memory 112 * @unmapped: Flag indicating if the range has been unmapped 113 * @partial_unmap: Flag indicating if the range has been partially unmapped 114 * @has_devmem_pages: Flag indicating if the range has devmem pages 115 * @has_dma_mapping: Flag indicating if the range has a DMA mapping 116 * @__flags: Flags for range in u16 form (used for READ_ONCE) 117 */ 118 struct drm_gpusvm_range_flags { 119 union { 120 struct { 121 /* All flags below must be set upon creation */ 122 u16 migrate_devmem : 1; 123 /* All flags below must be set / cleared under notifier lock */ 124 u16 unmapped : 1; 125 u16 partial_unmap : 1; 126 u16 has_devmem_pages : 1; 127 u16 has_dma_mapping : 1; 128 }; 129 u16 __flags; 130 }; 131 }; 132 133 /** 134 * struct drm_gpusvm_range - Structure representing a GPU SVM range 135 * 136 * @gpusvm: Pointer to the GPU SVM structure 137 * @notifier: Pointer to the GPU SVM notifier 138 * @refcount: Reference count for the range 139 * @itree: Interval tree node for the range (inserted in GPU SVM notifier) 140 * @entry: List entry to fast interval tree traversal 141 * @notifier_seq: Notifier sequence number of the range's pages 142 * @dma_addr: Device address array 143 * @dpagemap: The struct drm_pagemap of the device pages we're dma-mapping. 144 * Note this is assuming only one drm_pagemap per range is allowed. 145 * @flags: Flags for range 146 * 147 * This structure represents a GPU SVM range used for tracking memory ranges 148 * mapped in a DRM device. 149 */ 150 struct drm_gpusvm_range { 151 struct drm_gpusvm *gpusvm; 152 struct drm_gpusvm_notifier *notifier; 153 struct kref refcount; 154 struct interval_tree_node itree; 155 struct list_head entry; 156 unsigned long notifier_seq; 157 struct drm_pagemap_addr *dma_addr; 158 struct drm_pagemap *dpagemap; 159 struct drm_gpusvm_range_flags flags; 160 }; 161 162 /** 163 * struct drm_gpusvm - GPU SVM structure 164 * 165 * @name: Name of the GPU SVM 166 * @drm: Pointer to the DRM device structure 167 * @mm: Pointer to the mm_struct for the address space 168 * @device_private_page_owner: Device private pages owner 169 * @mm_start: Start address of GPU SVM 170 * @mm_range: Range of the GPU SVM 171 * @notifier_size: Size of individual notifiers 172 * @ops: Pointer to the operations structure for GPU SVM 173 * @chunk_sizes: Pointer to the array of chunk sizes used in range allocation. 174 * Entries should be powers of 2 in descending order. 175 * @num_chunks: Number of chunks 176 * @notifier_lock: Read-write semaphore for protecting notifier operations 177 * @root: Cached root node of the Red-Black tree containing GPU SVM notifiers 178 * @notifier_list: list head containing of notifiers in the same order they 179 * appear in interval tree. This is useful to keep iterating 180 * notifiers while doing modifications to RB tree. 181 * 182 * This structure represents a GPU SVM (Shared Virtual Memory) used for tracking 183 * memory ranges mapped in a DRM (Direct Rendering Manager) device. 184 * 185 * No reference counting is provided, as this is expected to be embedded in the 186 * driver VM structure along with the struct drm_gpuvm, which handles reference 187 * counting. 188 */ 189 struct drm_gpusvm { 190 const char *name; 191 struct drm_device *drm; 192 struct mm_struct *mm; 193 void *device_private_page_owner; 194 unsigned long mm_start; 195 unsigned long mm_range; 196 unsigned long notifier_size; 197 const struct drm_gpusvm_ops *ops; 198 const unsigned long *chunk_sizes; 199 int num_chunks; 200 struct rw_semaphore notifier_lock; 201 struct rb_root_cached root; 202 struct list_head notifier_list; 203 #ifdef CONFIG_LOCKDEP 204 /** 205 * @lock_dep_map: Annotates drm_gpusvm_range_find_or_insert and 206 * drm_gpusvm_range_remove with a driver provided lock. 207 */ 208 struct lockdep_map *lock_dep_map; 209 #endif 210 }; 211 212 /** 213 * struct drm_gpusvm_ctx - DRM GPU SVM context 214 * 215 * @check_pages_threshold: Check CPU pages for present if chunk is less than or 216 * equal to threshold. If not present, reduce chunk 217 * size. 218 * @timeslice_ms: The timeslice MS which in minimum time a piece of memory 219 * remains with either exclusive GPU or CPU access. 220 * @in_notifier: entering from a MMU notifier 221 * @read_only: operating on read-only memory 222 * @devmem_possible: possible to use device memory 223 * @devmem_only: use only device memory 224 * 225 * Context that is DRM GPUSVM is operating in (i.e. user arguments). 226 */ 227 struct drm_gpusvm_ctx { 228 unsigned long check_pages_threshold; 229 unsigned long timeslice_ms; 230 unsigned int in_notifier :1; 231 unsigned int read_only :1; 232 unsigned int devmem_possible :1; 233 unsigned int devmem_only :1; 234 }; 235 236 int drm_gpusvm_init(struct drm_gpusvm *gpusvm, 237 const char *name, struct drm_device *drm, 238 struct mm_struct *mm, void *device_private_page_owner, 239 unsigned long mm_start, unsigned long mm_range, 240 unsigned long notifier_size, 241 const struct drm_gpusvm_ops *ops, 242 const unsigned long *chunk_sizes, int num_chunks); 243 244 void drm_gpusvm_fini(struct drm_gpusvm *gpusvm); 245 246 void drm_gpusvm_free(struct drm_gpusvm *gpusvm); 247 248 unsigned long 249 drm_gpusvm_find_vma_start(struct drm_gpusvm *gpusvm, 250 unsigned long start, 251 unsigned long end); 252 253 struct drm_gpusvm_range * 254 drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm, 255 unsigned long fault_addr, 256 unsigned long gpuva_start, 257 unsigned long gpuva_end, 258 const struct drm_gpusvm_ctx *ctx); 259 260 void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm, 261 struct drm_gpusvm_range *range); 262 263 int drm_gpusvm_range_evict(struct drm_gpusvm *gpusvm, 264 struct drm_gpusvm_range *range); 265 266 struct drm_gpusvm_range * 267 drm_gpusvm_range_get(struct drm_gpusvm_range *range); 268 269 void drm_gpusvm_range_put(struct drm_gpusvm_range *range); 270 271 bool drm_gpusvm_range_pages_valid(struct drm_gpusvm *gpusvm, 272 struct drm_gpusvm_range *range); 273 274 int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm, 275 struct drm_gpusvm_range *range, 276 const struct drm_gpusvm_ctx *ctx); 277 278 void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm, 279 struct drm_gpusvm_range *range, 280 const struct drm_gpusvm_ctx *ctx); 281 282 bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start, 283 unsigned long end); 284 285 struct drm_gpusvm_notifier * 286 drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, unsigned long start, 287 unsigned long end); 288 289 struct drm_gpusvm_range * 290 drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start, 291 unsigned long end); 292 293 void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range, 294 const struct mmu_notifier_range *mmu_range); 295 296 #ifdef CONFIG_LOCKDEP 297 /** 298 * drm_gpusvm_driver_set_lock() - Set the lock protecting accesses to GPU SVM 299 * @gpusvm: Pointer to the GPU SVM structure. 300 * @lock: the lock used to protect the gpuva list. The locking primitive 301 * must contain a dep_map field. 302 * 303 * Call this to annotate drm_gpusvm_range_find_or_insert and 304 * drm_gpusvm_range_remove. 305 */ 306 #define drm_gpusvm_driver_set_lock(gpusvm, lock) \ 307 do { \ 308 if (!WARN((gpusvm)->lock_dep_map, \ 309 "GPUSVM range lock should be set only once."))\ 310 (gpusvm)->lock_dep_map = &(lock)->dep_map; \ 311 } while (0) 312 #else 313 #define drm_gpusvm_driver_set_lock(gpusvm, lock) do {} while (0) 314 #endif 315 316 /** 317 * drm_gpusvm_notifier_lock() - Lock GPU SVM notifier 318 * @gpusvm__: Pointer to the GPU SVM structure. 319 * 320 * Abstract client usage GPU SVM notifier lock, take lock 321 */ 322 #define drm_gpusvm_notifier_lock(gpusvm__) \ 323 down_read(&(gpusvm__)->notifier_lock) 324 325 /** 326 * drm_gpusvm_notifier_unlock() - Unlock GPU SVM notifier 327 * @gpusvm__: Pointer to the GPU SVM structure. 328 * 329 * Abstract client usage GPU SVM notifier lock, drop lock 330 */ 331 #define drm_gpusvm_notifier_unlock(gpusvm__) \ 332 up_read(&(gpusvm__)->notifier_lock) 333 334 /** 335 * drm_gpusvm_range_start() - GPU SVM range start address 336 * @range: Pointer to the GPU SVM range 337 * 338 * Return: GPU SVM range start address 339 */ 340 static inline unsigned long 341 drm_gpusvm_range_start(struct drm_gpusvm_range *range) 342 { 343 return range->itree.start; 344 } 345 346 /** 347 * drm_gpusvm_range_end() - GPU SVM range end address 348 * @range: Pointer to the GPU SVM range 349 * 350 * Return: GPU SVM range end address 351 */ 352 static inline unsigned long 353 drm_gpusvm_range_end(struct drm_gpusvm_range *range) 354 { 355 return range->itree.last + 1; 356 } 357 358 /** 359 * drm_gpusvm_range_size() - GPU SVM range size 360 * @range: Pointer to the GPU SVM range 361 * 362 * Return: GPU SVM range size 363 */ 364 static inline unsigned long 365 drm_gpusvm_range_size(struct drm_gpusvm_range *range) 366 { 367 return drm_gpusvm_range_end(range) - drm_gpusvm_range_start(range); 368 } 369 370 /** 371 * drm_gpusvm_notifier_start() - GPU SVM notifier start address 372 * @notifier: Pointer to the GPU SVM notifier 373 * 374 * Return: GPU SVM notifier start address 375 */ 376 static inline unsigned long 377 drm_gpusvm_notifier_start(struct drm_gpusvm_notifier *notifier) 378 { 379 return notifier->itree.start; 380 } 381 382 /** 383 * drm_gpusvm_notifier_end() - GPU SVM notifier end address 384 * @notifier: Pointer to the GPU SVM notifier 385 * 386 * Return: GPU SVM notifier end address 387 */ 388 static inline unsigned long 389 drm_gpusvm_notifier_end(struct drm_gpusvm_notifier *notifier) 390 { 391 return notifier->itree.last + 1; 392 } 393 394 /** 395 * drm_gpusvm_notifier_size() - GPU SVM notifier size 396 * @notifier: Pointer to the GPU SVM notifier 397 * 398 * Return: GPU SVM notifier size 399 */ 400 static inline unsigned long 401 drm_gpusvm_notifier_size(struct drm_gpusvm_notifier *notifier) 402 { 403 return drm_gpusvm_notifier_end(notifier) - 404 drm_gpusvm_notifier_start(notifier); 405 } 406 407 /** 408 * __drm_gpusvm_range_next() - Get the next GPU SVM range in the list 409 * @range: a pointer to the current GPU SVM range 410 * 411 * Return: A pointer to the next drm_gpusvm_range if available, or NULL if the 412 * current range is the last one or if the input range is NULL. 413 */ 414 static inline struct drm_gpusvm_range * 415 __drm_gpusvm_range_next(struct drm_gpusvm_range *range) 416 { 417 if (range && !list_is_last(&range->entry, 418 &range->notifier->range_list)) 419 return list_next_entry(range, entry); 420 421 return NULL; 422 } 423 424 /** 425 * drm_gpusvm_for_each_range() - Iterate over GPU SVM ranges in a notifier 426 * @range__: Iterator variable for the ranges. If set, it indicates the start of 427 * the iterator. If NULL, call drm_gpusvm_range_find() to get the range. 428 * @notifier__: Pointer to the GPU SVM notifier 429 * @start__: Start address of the range 430 * @end__: End address of the range 431 * 432 * This macro is used to iterate over GPU SVM ranges in a notifier. It is safe 433 * to use while holding the driver SVM lock or the notifier lock. 434 */ 435 #define drm_gpusvm_for_each_range(range__, notifier__, start__, end__) \ 436 for ((range__) = (range__) ?: \ 437 drm_gpusvm_range_find((notifier__), (start__), (end__)); \ 438 (range__) && (drm_gpusvm_range_start(range__) < (end__)); \ 439 (range__) = __drm_gpusvm_range_next(range__)) 440 441 /** 442 * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier 443 * @range__: Iterator variable for the ranges 444 * @next__: Iterator variable for the ranges temporay storage 445 * @notifier__: Pointer to the GPU SVM notifier 446 * @start__: Start address of the range 447 * @end__: End address of the range 448 * 449 * This macro is used to iterate over GPU SVM ranges in a notifier while 450 * removing ranges from it. 451 */ 452 #define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \ 453 for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \ 454 (next__) = __drm_gpusvm_range_next(range__); \ 455 (range__) && (drm_gpusvm_range_start(range__) < (end__)); \ 456 (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__)) 457 458 /** 459 * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list 460 * @notifier: a pointer to the current drm_gpusvm_notifier 461 * 462 * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if 463 * the current notifier is the last one or if the input notifier is 464 * NULL. 465 */ 466 static inline struct drm_gpusvm_notifier * 467 __drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier) 468 { 469 if (notifier && !list_is_last(¬ifier->entry, 470 ¬ifier->gpusvm->notifier_list)) 471 return list_next_entry(notifier, entry); 472 473 return NULL; 474 } 475 476 /** 477 * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm 478 * @notifier__: Iterator variable for the notifiers 479 * @gpusvm__: Pointer to the GPU SVM notifier 480 * @start__: Start address of the notifier 481 * @end__: End address of the notifier 482 * 483 * This macro is used to iterate over GPU SVM notifiers in a gpusvm. 484 */ 485 #define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \ 486 for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)); \ 487 (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \ 488 (notifier__) = __drm_gpusvm_notifier_next(notifier__)) 489 490 /** 491 * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm 492 * @notifier__: Iterator variable for the notifiers 493 * @next__: Iterator variable for the notifiers temporay storage 494 * @gpusvm__: Pointer to the GPU SVM notifier 495 * @start__: Start address of the notifier 496 * @end__: End address of the notifier 497 * 498 * This macro is used to iterate over GPU SVM notifiers in a gpusvm while 499 * removing notifiers from it. 500 */ 501 #define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \ 502 for ((notifier__) = drm_gpusvm_notifier_find((gpusvm__), (start__), (end__)), \ 503 (next__) = __drm_gpusvm_notifier_next(notifier__); \ 504 (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \ 505 (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__)) 506 507 #endif /* __DRM_GPUSVM_H__ */ 508