1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2012-2014 Intel Corporation 5 * 6 * Based on amdgpu_mn, which bears the following notice: 7 * 8 * Copyright 2014 Advanced Micro Devices, Inc. 9 * All Rights Reserved. 10 * 11 * Permission is hereby granted, free of charge, to any person obtaining a 12 * copy of this software and associated documentation files (the 13 * "Software"), to deal in the Software without restriction, including 14 * without limitation the rights to use, copy, modify, merge, publish, 15 * distribute, sub license, and/or sell copies of the Software, and to 16 * permit persons to whom the Software is furnished to do so, subject to 17 * the following conditions: 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 25 * USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 * The above copyright notice and this permission notice (including the 28 * next paragraph) shall be included in all copies or substantial portions 29 * of the Software. 30 * 31 */ 32 /* 33 * Authors: 34 * Christian König <christian.koenig@amd.com> 35 */ 36 37 #include <linux/mmu_context.h> 38 #include <linux/mempolicy.h> 39 #include <linux/swap.h> 40 #include <linux/sched/mm.h> 41 42 #include "i915_drv.h" 43 #include "i915_gem_ioctls.h" 44 #include "i915_gem_object.h" 45 #include "i915_scatterlist.h" 46 47 #ifdef CONFIG_MMU_NOTIFIER 48 49 /** 50 * i915_gem_userptr_invalidate - callback to notify about mm change 51 * 52 * @mni: the range (mm) is about to update 53 * @range: details on the invalidation 54 * @cur_seq: Value to pass to mmu_interval_set_seq() 55 * 56 * Block for operations on BOs to finish and mark pages as accessed and 57 * potentially dirty. 58 */ 59 static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni, 60 const struct mmu_notifier_range *range, 61 unsigned long cur_seq) 62 { 63 mmu_interval_set_seq(mni, cur_seq); 64 return true; 65 } 66 67 static const struct mmu_interval_notifier_ops i915_gem_userptr_notifier_ops = { 68 .invalidate = i915_gem_userptr_invalidate, 69 }; 70 71 static int 72 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj) 73 { 74 return mmu_interval_notifier_insert(&obj->userptr.notifier, current->mm, 75 obj->userptr.ptr, obj->base.size, 76 &i915_gem_userptr_notifier_ops); 77 } 78 79 static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj) 80 { 81 struct page **pvec = NULL; 82 83 assert_object_held_shared(obj); 84 85 if (!--obj->userptr.page_ref) { 86 pvec = obj->userptr.pvec; 87 obj->userptr.pvec = NULL; 88 } 89 GEM_BUG_ON(obj->userptr.page_ref < 0); 90 91 if (pvec) { 92 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; 93 94 unpin_user_pages(pvec, num_pages); 95 kvfree(pvec); 96 } 97 } 98 99 static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) 100 { 101 unsigned int max_segment = i915_sg_segment_size(obj->base.dev->dev); 102 struct sg_table *st; 103 struct page **pvec; 104 unsigned int num_pages; /* limited by sg_alloc_table_from_pages_segment */ 105 int ret; 106 107 if (overflows_type(obj->base.size >> PAGE_SHIFT, num_pages)) 108 return -E2BIG; 109 110 num_pages = obj->base.size >> PAGE_SHIFT; 111 st = kmalloc(sizeof(*st), GFP_KERNEL); 112 if (!st) 113 return -ENOMEM; 114 115 if (!obj->userptr.page_ref) { 116 ret = -EAGAIN; 117 goto err_free; 118 } 119 120 obj->userptr.page_ref++; 121 pvec = obj->userptr.pvec; 122 123 alloc_table: 124 ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0, 125 num_pages << PAGE_SHIFT, 126 max_segment, GFP_KERNEL); 127 if (ret) 128 goto err; 129 130 ret = i915_gem_gtt_prepare_pages(obj, st); 131 if (ret) { 132 sg_free_table(st); 133 134 if (max_segment > PAGE_SIZE) { 135 max_segment = PAGE_SIZE; 136 goto alloc_table; 137 } 138 139 goto err; 140 } 141 142 WARN_ON_ONCE(!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)); 143 if (i915_gem_object_can_bypass_llc(obj)) 144 obj->cache_dirty = true; 145 146 __i915_gem_object_set_pages(obj, st); 147 148 return 0; 149 150 err: 151 i915_gem_object_userptr_drop_ref(obj); 152 err_free: 153 kfree(st); 154 return ret; 155 } 156 157 static void 158 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, 159 struct sg_table *pages) 160 { 161 struct sgt_iter sgt_iter; 162 struct page *page; 163 164 if (!pages) 165 return; 166 167 __i915_gem_object_release_shmem(obj, pages, true); 168 i915_gem_gtt_finish_pages(obj, pages); 169 170 /* 171 * We always mark objects as dirty when they are used by the GPU, 172 * just in case. However, if we set the vma as being read-only we know 173 * that the object will never have been written to. 174 */ 175 if (i915_gem_object_is_readonly(obj)) 176 obj->mm.dirty = false; 177 178 for_each_sgt_page(page, sgt_iter, pages) { 179 if (obj->mm.dirty && trylock_page(page)) { 180 /* 181 * As this may not be anonymous memory (e.g. shmem) 182 * but exist on a real mapping, we have to lock 183 * the page in order to dirty it -- holding 184 * the page reference is not sufficient to 185 * prevent the inode from being truncated. 186 * Play safe and take the lock. 187 * 188 * However...! 189 * 190 * The mmu-notifier can be invalidated for a 191 * migrate_folio, that is alreadying holding the lock 192 * on the folio. Such a try_to_unmap() will result 193 * in us calling put_pages() and so recursively try 194 * to lock the page. We avoid that deadlock with 195 * a trylock_page() and in exchange we risk missing 196 * some page dirtying. 197 */ 198 set_page_dirty(page); 199 unlock_page(page); 200 } 201 202 mark_page_accessed(page); 203 } 204 obj->mm.dirty = false; 205 206 sg_free_table(pages); 207 kfree(pages); 208 209 i915_gem_object_userptr_drop_ref(obj); 210 } 211 212 static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj) 213 { 214 struct sg_table *pages; 215 int err; 216 217 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); 218 if (err) 219 return err; 220 221 if (GEM_WARN_ON(i915_gem_object_has_pinned_pages(obj))) 222 return -EBUSY; 223 224 assert_object_held(obj); 225 226 pages = __i915_gem_object_unset_pages(obj); 227 if (!IS_ERR_OR_NULL(pages)) 228 i915_gem_userptr_put_pages(obj, pages); 229 230 return err; 231 } 232 233 int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) 234 { 235 const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; 236 struct page **pvec; 237 unsigned int gup_flags = 0; 238 unsigned long notifier_seq; 239 int pinned, ret; 240 241 if (obj->userptr.notifier.mm != current->mm) 242 return -EFAULT; 243 244 notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier); 245 246 ret = i915_gem_object_lock_interruptible(obj, NULL); 247 if (ret) 248 return ret; 249 250 if (notifier_seq == obj->userptr.notifier_seq && obj->userptr.pvec) { 251 i915_gem_object_unlock(obj); 252 return 0; 253 } 254 255 ret = i915_gem_object_userptr_unbind(obj); 256 i915_gem_object_unlock(obj); 257 if (ret) 258 return ret; 259 260 pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL); 261 if (!pvec) 262 return -ENOMEM; 263 264 if (!i915_gem_object_is_readonly(obj)) 265 gup_flags |= FOLL_WRITE; 266 267 pinned = 0; 268 while (pinned < num_pages) { 269 ret = pin_user_pages_fast(obj->userptr.ptr + pinned * PAGE_SIZE, 270 num_pages - pinned, gup_flags, 271 &pvec[pinned]); 272 if (ret < 0) 273 goto out; 274 275 pinned += ret; 276 } 277 278 ret = i915_gem_object_lock_interruptible(obj, NULL); 279 if (ret) 280 goto out; 281 282 if (mmu_interval_read_retry(&obj->userptr.notifier, 283 !obj->userptr.page_ref ? notifier_seq : 284 obj->userptr.notifier_seq)) { 285 ret = -EAGAIN; 286 goto out_unlock; 287 } 288 289 if (!obj->userptr.page_ref++) { 290 obj->userptr.pvec = pvec; 291 obj->userptr.notifier_seq = notifier_seq; 292 pvec = NULL; 293 ret = ____i915_gem_object_get_pages(obj); 294 } 295 296 obj->userptr.page_ref--; 297 298 out_unlock: 299 i915_gem_object_unlock(obj); 300 301 out: 302 if (pvec) { 303 unpin_user_pages(pvec, pinned); 304 kvfree(pvec); 305 } 306 307 return ret; 308 } 309 310 int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) 311 { 312 if (mmu_interval_read_retry(&obj->userptr.notifier, 313 obj->userptr.notifier_seq)) { 314 /* We collided with the mmu notifier, need to retry */ 315 316 return -EAGAIN; 317 } 318 319 return 0; 320 } 321 322 int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) 323 { 324 int err; 325 326 err = i915_gem_object_userptr_submit_init(obj); 327 if (err) 328 return err; 329 330 err = i915_gem_object_lock_interruptible(obj, NULL); 331 if (!err) { 332 /* 333 * Since we only check validity, not use the pages, 334 * it doesn't matter if we collide with the mmu notifier, 335 * and -EAGAIN handling is not required. 336 */ 337 err = i915_gem_object_pin_pages(obj); 338 if (!err) 339 i915_gem_object_unpin_pages(obj); 340 341 i915_gem_object_unlock(obj); 342 } 343 344 return err; 345 } 346 347 static void 348 i915_gem_userptr_release(struct drm_i915_gem_object *obj) 349 { 350 GEM_WARN_ON(obj->userptr.page_ref); 351 352 if (!obj->userptr.notifier.mm) 353 return; 354 355 mmu_interval_notifier_remove(&obj->userptr.notifier); 356 obj->userptr.notifier.mm = NULL; 357 } 358 359 static int 360 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) 361 { 362 drm_dbg(obj->base.dev, "Exporting userptr no longer allowed\n"); 363 364 return -EINVAL; 365 } 366 367 static int 368 i915_gem_userptr_pwrite(struct drm_i915_gem_object *obj, 369 const struct drm_i915_gem_pwrite *args) 370 { 371 drm_dbg(obj->base.dev, "pwrite to userptr no longer allowed\n"); 372 373 return -EINVAL; 374 } 375 376 static int 377 i915_gem_userptr_pread(struct drm_i915_gem_object *obj, 378 const struct drm_i915_gem_pread *args) 379 { 380 drm_dbg(obj->base.dev, "pread from userptr no longer allowed\n"); 381 382 return -EINVAL; 383 } 384 385 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { 386 .name = "i915_gem_object_userptr", 387 .flags = I915_GEM_OBJECT_IS_SHRINKABLE | 388 I915_GEM_OBJECT_NO_MMAP | 389 I915_GEM_OBJECT_IS_PROXY, 390 .get_pages = i915_gem_userptr_get_pages, 391 .put_pages = i915_gem_userptr_put_pages, 392 .dmabuf_export = i915_gem_userptr_dmabuf_export, 393 .pwrite = i915_gem_userptr_pwrite, 394 .pread = i915_gem_userptr_pread, 395 .release = i915_gem_userptr_release, 396 }; 397 398 #endif 399 400 static int 401 probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len) 402 { 403 VMA_ITERATOR(vmi, mm, addr); 404 struct vm_area_struct *vma; 405 unsigned long end = addr + len; 406 407 mmap_read_lock(mm); 408 for_each_vma_range(vmi, vma, end) { 409 /* Check for holes, note that we also update the addr below */ 410 if (vma->vm_start > addr) 411 break; 412 413 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) 414 break; 415 416 addr = vma->vm_end; 417 } 418 mmap_read_unlock(mm); 419 420 if (vma || addr < end) 421 return -EFAULT; 422 return 0; 423 } 424 425 /* 426 * Creates a new mm object that wraps some normal memory from the process 427 * context - user memory. 428 * 429 * We impose several restrictions upon the memory being mapped 430 * into the GPU. 431 * 1. It must be page aligned (both start/end addresses, i.e ptr and size). 432 * 2. It must be normal system memory, not a pointer into another map of IO 433 * space (e.g. it must not be a GTT mmapping of another object). 434 * 3. We only allow a bo as large as we could in theory map into the GTT, 435 * that is we limit the size to the total size of the GTT. 436 * 4. The bo is marked as being snoopable. The backing pages are left 437 * accessible directly by the CPU, but reads and writes by the GPU may 438 * incur the cost of a snoop (unless you have an LLC architecture). 439 * 440 * Synchronisation between multiple users and the GPU is left to userspace 441 * through the normal set-domain-ioctl. The kernel will enforce that the 442 * GPU relinquishes the VMA before it is returned back to the system 443 * i.e. upon free(), munmap() or process termination. However, the userspace 444 * malloc() library may not immediately relinquish the VMA after free() and 445 * instead reuse it whilst the GPU is still reading and writing to the VMA. 446 * Caveat emptor. 447 * 448 * Also note, that the object created here is not currently a "first class" 449 * object, in that several ioctls are banned. These are the CPU access 450 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use 451 * direct access via your pointer rather than use those ioctls. Another 452 * restriction is that we do not allow userptr surfaces to be pinned to the 453 * hardware and so we reject any attempt to create a framebuffer out of a 454 * userptr. 455 * 456 * If you think this is a good interface to use to pass GPU memory between 457 * drivers, please use dma-buf instead. In fact, wherever possible use 458 * dma-buf instead. 459 */ 460 int 461 i915_gem_userptr_ioctl(struct drm_device *dev, 462 void *data, 463 struct drm_file *file) 464 { 465 static struct lock_class_key __maybe_unused lock_class; 466 struct drm_i915_private *dev_priv = to_i915(dev); 467 struct drm_i915_gem_userptr *args = data; 468 struct drm_i915_gem_object __maybe_unused *obj; 469 int __maybe_unused ret; 470 u32 __maybe_unused handle; 471 472 if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) { 473 /* We cannot support coherent userptr objects on hw without 474 * LLC and broken snooping. 475 */ 476 return -ENODEV; 477 } 478 479 if (args->flags & ~(I915_USERPTR_READ_ONLY | 480 I915_USERPTR_UNSYNCHRONIZED | 481 I915_USERPTR_PROBE)) 482 return -EINVAL; 483 484 if (i915_gem_object_size_2big(args->user_size)) 485 return -E2BIG; 486 487 if (!args->user_size) 488 return -EINVAL; 489 490 if (offset_in_page(args->user_ptr | args->user_size)) 491 return -EINVAL; 492 493 if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size)) 494 return -EFAULT; 495 496 if (args->flags & I915_USERPTR_UNSYNCHRONIZED) 497 return -ENODEV; 498 499 if (args->flags & I915_USERPTR_READ_ONLY) { 500 /* 501 * On almost all of the older hw, we cannot tell the GPU that 502 * a page is readonly. 503 */ 504 if (!to_gt(dev_priv)->vm->has_read_only) 505 return -ENODEV; 506 } 507 508 if (args->flags & I915_USERPTR_PROBE) { 509 /* 510 * Check that the range pointed to represents real struct 511 * pages and not iomappings (at this moment in time!) 512 */ 513 ret = probe_range(current->mm, args->user_ptr, args->user_size); 514 if (ret) 515 return ret; 516 } 517 518 #ifdef CONFIG_MMU_NOTIFIER 519 obj = i915_gem_object_alloc(); 520 if (obj == NULL) 521 return -ENOMEM; 522 523 drm_gem_private_object_init(dev, &obj->base, args->user_size); 524 i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class, 525 I915_BO_ALLOC_USER); 526 obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE; 527 obj->read_domains = I915_GEM_DOMAIN_CPU; 528 obj->write_domain = I915_GEM_DOMAIN_CPU; 529 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); 530 531 obj->userptr.ptr = args->user_ptr; 532 obj->userptr.notifier_seq = ULONG_MAX; 533 if (args->flags & I915_USERPTR_READ_ONLY) 534 i915_gem_object_set_readonly(obj); 535 536 /* And keep a pointer to the current->mm for resolving the user pages 537 * at binding. This means that we need to hook into the mmu_notifier 538 * in order to detect if the mmu is destroyed. 539 */ 540 ret = i915_gem_userptr_init__mmu_notifier(obj); 541 if (ret == 0) 542 ret = drm_gem_handle_create(file, &obj->base, &handle); 543 544 /* drop reference from allocate - handle holds it now */ 545 i915_gem_object_put(obj); 546 if (ret) 547 return ret; 548 549 args->handle = handle; 550 return 0; 551 #else 552 return -ENODEV; 553 #endif 554 } 555 556