1 /* 2 * drivers/gpu/drm/omapdrm/omap_gem.c 3 * 4 * Copyright (C) 2011 Texas Instruments 5 * Author: Rob Clark <rob.clark@linaro.org> 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 as published by 9 * the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include <linux/shmem_fs.h> 21 #include <linux/spinlock.h> 22 #include <linux/pfn_t.h> 23 24 #include <drm/drm_vma_manager.h> 25 26 #include "omap_drv.h" 27 #include "omap_dmm_tiler.h" 28 29 /* 30 * GEM buffer object implementation. 31 */ 32 33 /* note: we use upper 8 bits of flags for driver-internal flags: */ 34 #define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */ 35 #define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */ 36 #define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */ 37 38 struct omap_gem_object { 39 struct drm_gem_object base; 40 41 struct list_head mm_list; 42 43 uint32_t flags; 44 45 /** width/height for tiled formats (rounded up to slot boundaries) */ 46 uint16_t width, height; 47 48 /** roll applied when mapping to DMM */ 49 uint32_t roll; 50 51 /** 52 * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag 53 * is set and the paddr is valid. Also if the buffer is remapped in 54 * TILER and paddr_cnt > 0, then paddr is valid. But if you are using 55 * the physical address and OMAP_BO_DMA is not set, then you should 56 * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is 57 * not removed from under your feet. 58 * 59 * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable 60 * buffer is requested, but doesn't mean that it is. Use the 61 * OMAP_BO_DMA flag to determine if the buffer has a DMA capable 62 * physical address. 63 */ 64 dma_addr_t paddr; 65 66 /** 67 * # of users of paddr 68 */ 69 uint32_t paddr_cnt; 70 71 /** 72 * tiler block used when buffer is remapped in DMM/TILER. 73 */ 74 struct tiler_block *block; 75 76 /** 77 * Array of backing pages, if allocated. Note that pages are never 78 * allocated for buffers originally allocated from contiguous memory 79 */ 80 struct page **pages; 81 82 /** addresses corresponding to pages in above array */ 83 dma_addr_t *addrs; 84 85 /** 86 * Virtual address, if mapped. 87 */ 88 void *vaddr; 89 90 /** 91 * sync-object allocated on demand (if needed) 92 * 93 * Per-buffer sync-object for tracking pending and completed hw/dma 94 * read and write operations. The layout in memory is dictated by 95 * the SGX firmware, which uses this information to stall the command 96 * stream if a surface is not ready yet. 97 * 98 * Note that when buffer is used by SGX, the sync-object needs to be 99 * allocated from a special heap of sync-objects. This way many sync 100 * objects can be packed in a page, and not waste GPU virtual address 101 * space. Because of this we have to have a omap_gem_set_sync_object() 102 * API to allow replacement of the syncobj after it has (potentially) 103 * already been allocated. A bit ugly but I haven't thought of a 104 * better alternative. 105 */ 106 struct { 107 uint32_t write_pending; 108 uint32_t write_complete; 109 uint32_t read_pending; 110 uint32_t read_complete; 111 } *sync; 112 }; 113 114 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base) 115 116 /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are 117 * not necessarily pinned in TILER all the time, and (b) when they are 118 * they are not necessarily page aligned, we reserve one or more small 119 * regions in each of the 2d containers to use as a user-GART where we 120 * can create a second page-aligned mapping of parts of the buffer 121 * being accessed from userspace. 122 * 123 * Note that we could optimize slightly when we know that multiple 124 * tiler containers are backed by the same PAT.. but I'll leave that 125 * for later.. 126 */ 127 #define NUM_USERGART_ENTRIES 2 128 struct omap_drm_usergart_entry { 129 struct tiler_block *block; /* the reserved tiler block */ 130 dma_addr_t paddr; 131 struct drm_gem_object *obj; /* the current pinned obj */ 132 pgoff_t obj_pgoff; /* page offset of obj currently 133 mapped in */ 134 }; 135 136 struct omap_drm_usergart { 137 struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES]; 138 int height; /* height in rows */ 139 int height_shift; /* ilog2(height in rows) */ 140 int slot_shift; /* ilog2(width per slot) */ 141 int stride_pfn; /* stride in pages */ 142 int last; /* index of last used entry */ 143 }; 144 145 /* ----------------------------------------------------------------------------- 146 * Helpers 147 */ 148 149 /** get mmap offset */ 150 static uint64_t mmap_offset(struct drm_gem_object *obj) 151 { 152 struct drm_device *dev = obj->dev; 153 int ret; 154 size_t size; 155 156 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 157 158 /* Make it mmapable */ 159 size = omap_gem_mmap_size(obj); 160 ret = drm_gem_create_mmap_offset_size(obj, size); 161 if (ret) { 162 dev_err(dev->dev, "could not allocate mmap offset\n"); 163 return 0; 164 } 165 166 return drm_vma_node_offset_addr(&obj->vma_node); 167 } 168 169 /* GEM objects can either be allocated from contiguous memory (in which 170 * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non 171 * contiguous buffers can be remapped in TILER/DMM if they need to be 172 * contiguous... but we don't do this all the time to reduce pressure 173 * on TILER/DMM space when we know at allocation time that the buffer 174 * will need to be scanned out. 175 */ 176 static inline bool is_shmem(struct drm_gem_object *obj) 177 { 178 return obj->filp != NULL; 179 } 180 181 /* ----------------------------------------------------------------------------- 182 * Eviction 183 */ 184 185 static void evict_entry(struct drm_gem_object *obj, 186 enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry) 187 { 188 struct omap_gem_object *omap_obj = to_omap_bo(obj); 189 struct omap_drm_private *priv = obj->dev->dev_private; 190 int n = priv->usergart[fmt].height; 191 size_t size = PAGE_SIZE * n; 192 loff_t off = mmap_offset(obj) + 193 (entry->obj_pgoff << PAGE_SHIFT); 194 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); 195 196 if (m > 1) { 197 int i; 198 /* if stride > than PAGE_SIZE then sparse mapping: */ 199 for (i = n; i > 0; i--) { 200 unmap_mapping_range(obj->dev->anon_inode->i_mapping, 201 off, PAGE_SIZE, 1); 202 off += PAGE_SIZE * m; 203 } 204 } else { 205 unmap_mapping_range(obj->dev->anon_inode->i_mapping, 206 off, size, 1); 207 } 208 209 entry->obj = NULL; 210 } 211 212 /* Evict a buffer from usergart, if it is mapped there */ 213 static void evict(struct drm_gem_object *obj) 214 { 215 struct omap_gem_object *omap_obj = to_omap_bo(obj); 216 struct omap_drm_private *priv = obj->dev->dev_private; 217 218 if (omap_obj->flags & OMAP_BO_TILED) { 219 enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 220 int i; 221 222 for (i = 0; i < NUM_USERGART_ENTRIES; i++) { 223 struct omap_drm_usergart_entry *entry = 224 &priv->usergart[fmt].entry[i]; 225 226 if (entry->obj == obj) 227 evict_entry(obj, fmt, entry); 228 } 229 } 230 } 231 232 /* ----------------------------------------------------------------------------- 233 * Page Management 234 */ 235 236 /** ensure backing pages are allocated */ 237 static int omap_gem_attach_pages(struct drm_gem_object *obj) 238 { 239 struct drm_device *dev = obj->dev; 240 struct omap_gem_object *omap_obj = to_omap_bo(obj); 241 struct page **pages; 242 int npages = obj->size >> PAGE_SHIFT; 243 int i, ret; 244 dma_addr_t *addrs; 245 246 WARN_ON(omap_obj->pages); 247 248 pages = drm_gem_get_pages(obj); 249 if (IS_ERR(pages)) { 250 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages)); 251 return PTR_ERR(pages); 252 } 253 254 /* for non-cached buffers, ensure the new pages are clean because 255 * DSS, GPU, etc. are not cache coherent: 256 */ 257 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { 258 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL); 259 if (!addrs) { 260 ret = -ENOMEM; 261 goto free_pages; 262 } 263 264 for (i = 0; i < npages; i++) { 265 addrs[i] = dma_map_page(dev->dev, pages[i], 266 0, PAGE_SIZE, DMA_BIDIRECTIONAL); 267 } 268 } else { 269 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL); 270 if (!addrs) { 271 ret = -ENOMEM; 272 goto free_pages; 273 } 274 } 275 276 omap_obj->addrs = addrs; 277 omap_obj->pages = pages; 278 279 return 0; 280 281 free_pages: 282 drm_gem_put_pages(obj, pages, true, false); 283 284 return ret; 285 } 286 287 /* acquire pages when needed (for example, for DMA where physically 288 * contiguous buffer is not required 289 */ 290 static int get_pages(struct drm_gem_object *obj, struct page ***pages) 291 { 292 struct omap_gem_object *omap_obj = to_omap_bo(obj); 293 int ret = 0; 294 295 if (is_shmem(obj) && !omap_obj->pages) { 296 ret = omap_gem_attach_pages(obj); 297 if (ret) { 298 dev_err(obj->dev->dev, "could not attach pages\n"); 299 return ret; 300 } 301 } 302 303 /* TODO: even phys-contig.. we should have a list of pages? */ 304 *pages = omap_obj->pages; 305 306 return 0; 307 } 308 309 /** release backing pages */ 310 static void omap_gem_detach_pages(struct drm_gem_object *obj) 311 { 312 struct omap_gem_object *omap_obj = to_omap_bo(obj); 313 314 /* for non-cached buffers, ensure the new pages are clean because 315 * DSS, GPU, etc. are not cache coherent: 316 */ 317 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) { 318 int i, npages = obj->size >> PAGE_SHIFT; 319 for (i = 0; i < npages; i++) { 320 dma_unmap_page(obj->dev->dev, omap_obj->addrs[i], 321 PAGE_SIZE, DMA_BIDIRECTIONAL); 322 } 323 } 324 325 kfree(omap_obj->addrs); 326 omap_obj->addrs = NULL; 327 328 drm_gem_put_pages(obj, omap_obj->pages, true, false); 329 omap_obj->pages = NULL; 330 } 331 332 /* get buffer flags */ 333 uint32_t omap_gem_flags(struct drm_gem_object *obj) 334 { 335 return to_omap_bo(obj)->flags; 336 } 337 338 uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj) 339 { 340 uint64_t offset; 341 mutex_lock(&obj->dev->struct_mutex); 342 offset = mmap_offset(obj); 343 mutex_unlock(&obj->dev->struct_mutex); 344 return offset; 345 } 346 347 /** get mmap size */ 348 size_t omap_gem_mmap_size(struct drm_gem_object *obj) 349 { 350 struct omap_gem_object *omap_obj = to_omap_bo(obj); 351 size_t size = obj->size; 352 353 if (omap_obj->flags & OMAP_BO_TILED) { 354 /* for tiled buffers, the virtual size has stride rounded up 355 * to 4kb.. (to hide the fact that row n+1 might start 16kb or 356 * 32kb later!). But we don't back the entire buffer with 357 * pages, only the valid picture part.. so need to adjust for 358 * this in the size used to mmap and generate mmap offset 359 */ 360 size = tiler_vsize(gem2fmt(omap_obj->flags), 361 omap_obj->width, omap_obj->height); 362 } 363 364 return size; 365 } 366 367 /* get tiled size, returns -EINVAL if not tiled buffer */ 368 int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h) 369 { 370 struct omap_gem_object *omap_obj = to_omap_bo(obj); 371 if (omap_obj->flags & OMAP_BO_TILED) { 372 *w = omap_obj->width; 373 *h = omap_obj->height; 374 return 0; 375 } 376 return -EINVAL; 377 } 378 379 /* ----------------------------------------------------------------------------- 380 * Fault Handling 381 */ 382 383 /* Normal handling for the case of faulting in non-tiled buffers */ 384 static int fault_1d(struct drm_gem_object *obj, 385 struct vm_area_struct *vma, struct vm_fault *vmf) 386 { 387 struct omap_gem_object *omap_obj = to_omap_bo(obj); 388 unsigned long pfn; 389 pgoff_t pgoff; 390 391 /* We don't use vmf->pgoff since that has the fake offset: */ 392 pgoff = ((unsigned long)vmf->virtual_address - 393 vma->vm_start) >> PAGE_SHIFT; 394 395 if (omap_obj->pages) { 396 omap_gem_cpu_sync(obj, pgoff); 397 pfn = page_to_pfn(omap_obj->pages[pgoff]); 398 } else { 399 BUG_ON(!(omap_obj->flags & OMAP_BO_DMA)); 400 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff; 401 } 402 403 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, 404 pfn, pfn << PAGE_SHIFT); 405 406 return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, 407 __pfn_to_pfn_t(pfn, PFN_DEV)); 408 } 409 410 /* Special handling for the case of faulting in 2d tiled buffers */ 411 static int fault_2d(struct drm_gem_object *obj, 412 struct vm_area_struct *vma, struct vm_fault *vmf) 413 { 414 struct omap_gem_object *omap_obj = to_omap_bo(obj); 415 struct omap_drm_private *priv = obj->dev->dev_private; 416 struct omap_drm_usergart_entry *entry; 417 enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 418 struct page *pages[64]; /* XXX is this too much to have on stack? */ 419 unsigned long pfn; 420 pgoff_t pgoff, base_pgoff; 421 void __user *vaddr; 422 int i, ret, slots; 423 424 /* 425 * Note the height of the slot is also equal to the number of pages 426 * that need to be mapped in to fill 4kb wide CPU page. If the slot 427 * height is 64, then 64 pages fill a 4kb wide by 64 row region. 428 */ 429 const int n = priv->usergart[fmt].height; 430 const int n_shift = priv->usergart[fmt].height_shift; 431 432 /* 433 * If buffer width in bytes > PAGE_SIZE then the virtual stride is 434 * rounded up to next multiple of PAGE_SIZE.. this need to be taken 435 * into account in some of the math, so figure out virtual stride 436 * in pages 437 */ 438 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); 439 440 /* We don't use vmf->pgoff since that has the fake offset: */ 441 pgoff = ((unsigned long)vmf->virtual_address - 442 vma->vm_start) >> PAGE_SHIFT; 443 444 /* 445 * Actual address we start mapping at is rounded down to previous slot 446 * boundary in the y direction: 447 */ 448 base_pgoff = round_down(pgoff, m << n_shift); 449 450 /* figure out buffer width in slots */ 451 slots = omap_obj->width >> priv->usergart[fmt].slot_shift; 452 453 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT); 454 455 entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last]; 456 457 /* evict previous buffer using this usergart entry, if any: */ 458 if (entry->obj) 459 evict_entry(entry->obj, fmt, entry); 460 461 entry->obj = obj; 462 entry->obj_pgoff = base_pgoff; 463 464 /* now convert base_pgoff to phys offset from virt offset: */ 465 base_pgoff = (base_pgoff >> n_shift) * slots; 466 467 /* for wider-than 4k.. figure out which part of the slot-row we want: */ 468 if (m > 1) { 469 int off = pgoff % m; 470 entry->obj_pgoff += off; 471 base_pgoff /= m; 472 slots = min(slots - (off << n_shift), n); 473 base_pgoff += off << n_shift; 474 vaddr += off << PAGE_SHIFT; 475 } 476 477 /* 478 * Map in pages. Beyond the valid pixel part of the buffer, we set 479 * pages[i] to NULL to get a dummy page mapped in.. if someone 480 * reads/writes it they will get random/undefined content, but at 481 * least it won't be corrupting whatever other random page used to 482 * be mapped in, or other undefined behavior. 483 */ 484 memcpy(pages, &omap_obj->pages[base_pgoff], 485 sizeof(struct page *) * slots); 486 memset(pages + slots, 0, 487 sizeof(struct page *) * (n - slots)); 488 489 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true); 490 if (ret) { 491 dev_err(obj->dev->dev, "failed to pin: %d\n", ret); 492 return ret; 493 } 494 495 pfn = entry->paddr >> PAGE_SHIFT; 496 497 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address, 498 pfn, pfn << PAGE_SHIFT); 499 500 for (i = n; i > 0; i--) { 501 vm_insert_mixed(vma, (unsigned long)vaddr, 502 __pfn_to_pfn_t(pfn, PFN_DEV)); 503 pfn += priv->usergart[fmt].stride_pfn; 504 vaddr += PAGE_SIZE * m; 505 } 506 507 /* simple round-robin: */ 508 priv->usergart[fmt].last = (priv->usergart[fmt].last + 1) 509 % NUM_USERGART_ENTRIES; 510 511 return 0; 512 } 513 514 /** 515 * omap_gem_fault - pagefault handler for GEM objects 516 * @vma: the VMA of the GEM object 517 * @vmf: fault detail 518 * 519 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM 520 * does most of the work for us including the actual map/unmap calls 521 * but we need to do the actual page work. 522 * 523 * The VMA was set up by GEM. In doing so it also ensured that the 524 * vma->vm_private_data points to the GEM object that is backing this 525 * mapping. 526 */ 527 int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 528 { 529 struct drm_gem_object *obj = vma->vm_private_data; 530 struct omap_gem_object *omap_obj = to_omap_bo(obj); 531 struct drm_device *dev = obj->dev; 532 struct page **pages; 533 int ret; 534 535 /* Make sure we don't parallel update on a fault, nor move or remove 536 * something from beneath our feet 537 */ 538 mutex_lock(&dev->struct_mutex); 539 540 /* if a shmem backed object, make sure we have pages attached now */ 541 ret = get_pages(obj, &pages); 542 if (ret) 543 goto fail; 544 545 /* where should we do corresponding put_pages().. we are mapping 546 * the original page, rather than thru a GART, so we can't rely 547 * on eviction to trigger this. But munmap() or all mappings should 548 * probably trigger put_pages()? 549 */ 550 551 if (omap_obj->flags & OMAP_BO_TILED) 552 ret = fault_2d(obj, vma, vmf); 553 else 554 ret = fault_1d(obj, vma, vmf); 555 556 557 fail: 558 mutex_unlock(&dev->struct_mutex); 559 switch (ret) { 560 case 0: 561 case -ERESTARTSYS: 562 case -EINTR: 563 return VM_FAULT_NOPAGE; 564 case -ENOMEM: 565 return VM_FAULT_OOM; 566 default: 567 return VM_FAULT_SIGBUS; 568 } 569 } 570 571 /** We override mainly to fix up some of the vm mapping flags.. */ 572 int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma) 573 { 574 int ret; 575 576 ret = drm_gem_mmap(filp, vma); 577 if (ret) { 578 DBG("mmap failed: %d", ret); 579 return ret; 580 } 581 582 return omap_gem_mmap_obj(vma->vm_private_data, vma); 583 } 584 585 int omap_gem_mmap_obj(struct drm_gem_object *obj, 586 struct vm_area_struct *vma) 587 { 588 struct omap_gem_object *omap_obj = to_omap_bo(obj); 589 590 vma->vm_flags &= ~VM_PFNMAP; 591 vma->vm_flags |= VM_MIXEDMAP; 592 593 if (omap_obj->flags & OMAP_BO_WC) { 594 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 595 } else if (omap_obj->flags & OMAP_BO_UNCACHED) { 596 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags)); 597 } else { 598 /* 599 * We do have some private objects, at least for scanout buffers 600 * on hardware without DMM/TILER. But these are allocated write- 601 * combine 602 */ 603 if (WARN_ON(!obj->filp)) 604 return -EINVAL; 605 606 /* 607 * Shunt off cached objs to shmem file so they have their own 608 * address_space (so unmap_mapping_range does what we want, 609 * in particular in the case of mmap'd dmabufs) 610 */ 611 fput(vma->vm_file); 612 vma->vm_pgoff = 0; 613 vma->vm_file = get_file(obj->filp); 614 615 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 616 } 617 618 return 0; 619 } 620 621 /* ----------------------------------------------------------------------------- 622 * Dumb Buffers 623 */ 624 625 /** 626 * omap_gem_dumb_create - create a dumb buffer 627 * @drm_file: our client file 628 * @dev: our device 629 * @args: the requested arguments copied from userspace 630 * 631 * Allocate a buffer suitable for use for a frame buffer of the 632 * form described by user space. Give userspace a handle by which 633 * to reference it. 634 */ 635 int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 636 struct drm_mode_create_dumb *args) 637 { 638 union omap_gem_size gsize; 639 640 args->pitch = align_pitch(0, args->width, args->bpp); 641 args->size = PAGE_ALIGN(args->pitch * args->height); 642 643 gsize = (union omap_gem_size){ 644 .bytes = args->size, 645 }; 646 647 return omap_gem_new_handle(dev, file, gsize, 648 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle); 649 } 650 651 /** 652 * omap_gem_dumb_map - buffer mapping for dumb interface 653 * @file: our drm client file 654 * @dev: drm device 655 * @handle: GEM handle to the object (from dumb_create) 656 * 657 * Do the necessary setup to allow the mapping of the frame buffer 658 * into user memory. We don't have to do much here at the moment. 659 */ 660 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 661 uint32_t handle, uint64_t *offset) 662 { 663 struct drm_gem_object *obj; 664 int ret = 0; 665 666 /* GEM does all our handle to object mapping */ 667 obj = drm_gem_object_lookup(dev, file, handle); 668 if (obj == NULL) { 669 ret = -ENOENT; 670 goto fail; 671 } 672 673 *offset = omap_gem_mmap_offset(obj); 674 675 drm_gem_object_unreference_unlocked(obj); 676 677 fail: 678 return ret; 679 } 680 681 #ifdef CONFIG_DRM_FBDEV_EMULATION 682 /* Set scrolling position. This allows us to implement fast scrolling 683 * for console. 684 * 685 * Call only from non-atomic contexts. 686 */ 687 int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll) 688 { 689 struct omap_gem_object *omap_obj = to_omap_bo(obj); 690 uint32_t npages = obj->size >> PAGE_SHIFT; 691 int ret = 0; 692 693 if (roll > npages) { 694 dev_err(obj->dev->dev, "invalid roll: %d\n", roll); 695 return -EINVAL; 696 } 697 698 omap_obj->roll = roll; 699 700 mutex_lock(&obj->dev->struct_mutex); 701 702 /* if we aren't mapped yet, we don't need to do anything */ 703 if (omap_obj->block) { 704 struct page **pages; 705 ret = get_pages(obj, &pages); 706 if (ret) 707 goto fail; 708 ret = tiler_pin(omap_obj->block, pages, npages, roll, true); 709 if (ret) 710 dev_err(obj->dev->dev, "could not repin: %d\n", ret); 711 } 712 713 fail: 714 mutex_unlock(&obj->dev->struct_mutex); 715 716 return ret; 717 } 718 #endif 719 720 /* ----------------------------------------------------------------------------- 721 * Memory Management & DMA Sync 722 */ 723 724 /** 725 * shmem buffers that are mapped cached can simulate coherency via using 726 * page faulting to keep track of dirty pages 727 */ 728 static inline bool is_cached_coherent(struct drm_gem_object *obj) 729 { 730 struct omap_gem_object *omap_obj = to_omap_bo(obj); 731 return is_shmem(obj) && 732 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED); 733 } 734 735 /* Sync the buffer for CPU access.. note pages should already be 736 * attached, ie. omap_gem_get_pages() 737 */ 738 void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff) 739 { 740 struct drm_device *dev = obj->dev; 741 struct omap_gem_object *omap_obj = to_omap_bo(obj); 742 743 if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) { 744 dma_unmap_page(dev->dev, omap_obj->addrs[pgoff], 745 PAGE_SIZE, DMA_BIDIRECTIONAL); 746 omap_obj->addrs[pgoff] = 0; 747 } 748 } 749 750 /* sync the buffer for DMA access */ 751 void omap_gem_dma_sync(struct drm_gem_object *obj, 752 enum dma_data_direction dir) 753 { 754 struct drm_device *dev = obj->dev; 755 struct omap_gem_object *omap_obj = to_omap_bo(obj); 756 757 if (is_cached_coherent(obj)) { 758 int i, npages = obj->size >> PAGE_SHIFT; 759 struct page **pages = omap_obj->pages; 760 bool dirty = false; 761 762 for (i = 0; i < npages; i++) { 763 if (!omap_obj->addrs[i]) { 764 omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0, 765 PAGE_SIZE, DMA_BIDIRECTIONAL); 766 dirty = true; 767 } 768 } 769 770 if (dirty) { 771 unmap_mapping_range(obj->filp->f_mapping, 0, 772 omap_gem_mmap_size(obj), 1); 773 } 774 } 775 } 776 777 /* Get physical address for DMA.. if 'remap' is true, and the buffer is not 778 * already contiguous, remap it to pin in physically contiguous memory.. (ie. 779 * map in TILER) 780 */ 781 int omap_gem_get_paddr(struct drm_gem_object *obj, 782 dma_addr_t *paddr, bool remap) 783 { 784 struct omap_drm_private *priv = obj->dev->dev_private; 785 struct omap_gem_object *omap_obj = to_omap_bo(obj); 786 int ret = 0; 787 788 mutex_lock(&obj->dev->struct_mutex); 789 790 if (remap && is_shmem(obj) && priv->has_dmm) { 791 if (omap_obj->paddr_cnt == 0) { 792 struct page **pages; 793 uint32_t npages = obj->size >> PAGE_SHIFT; 794 enum tiler_fmt fmt = gem2fmt(omap_obj->flags); 795 struct tiler_block *block; 796 797 BUG_ON(omap_obj->block); 798 799 ret = get_pages(obj, &pages); 800 if (ret) 801 goto fail; 802 803 if (omap_obj->flags & OMAP_BO_TILED) { 804 block = tiler_reserve_2d(fmt, 805 omap_obj->width, 806 omap_obj->height, 0); 807 } else { 808 block = tiler_reserve_1d(obj->size); 809 } 810 811 if (IS_ERR(block)) { 812 ret = PTR_ERR(block); 813 dev_err(obj->dev->dev, 814 "could not remap: %d (%d)\n", ret, fmt); 815 goto fail; 816 } 817 818 /* TODO: enable async refill.. */ 819 ret = tiler_pin(block, pages, npages, 820 omap_obj->roll, true); 821 if (ret) { 822 tiler_release(block); 823 dev_err(obj->dev->dev, 824 "could not pin: %d\n", ret); 825 goto fail; 826 } 827 828 omap_obj->paddr = tiler_ssptr(block); 829 omap_obj->block = block; 830 831 DBG("got paddr: %pad", &omap_obj->paddr); 832 } 833 834 omap_obj->paddr_cnt++; 835 836 *paddr = omap_obj->paddr; 837 } else if (omap_obj->flags & OMAP_BO_DMA) { 838 *paddr = omap_obj->paddr; 839 } else { 840 ret = -EINVAL; 841 goto fail; 842 } 843 844 fail: 845 mutex_unlock(&obj->dev->struct_mutex); 846 847 return ret; 848 } 849 850 /* Release physical address, when DMA is no longer being performed.. this 851 * could potentially unpin and unmap buffers from TILER 852 */ 853 void omap_gem_put_paddr(struct drm_gem_object *obj) 854 { 855 struct omap_gem_object *omap_obj = to_omap_bo(obj); 856 int ret; 857 858 mutex_lock(&obj->dev->struct_mutex); 859 if (omap_obj->paddr_cnt > 0) { 860 omap_obj->paddr_cnt--; 861 if (omap_obj->paddr_cnt == 0) { 862 ret = tiler_unpin(omap_obj->block); 863 if (ret) { 864 dev_err(obj->dev->dev, 865 "could not unpin pages: %d\n", ret); 866 } 867 ret = tiler_release(omap_obj->block); 868 if (ret) { 869 dev_err(obj->dev->dev, 870 "could not release unmap: %d\n", ret); 871 } 872 omap_obj->paddr = 0; 873 omap_obj->block = NULL; 874 } 875 } 876 877 mutex_unlock(&obj->dev->struct_mutex); 878 } 879 880 /* Get rotated scanout address (only valid if already pinned), at the 881 * specified orientation and x,y offset from top-left corner of buffer 882 * (only valid for tiled 2d buffers) 883 */ 884 int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient, 885 int x, int y, dma_addr_t *paddr) 886 { 887 struct omap_gem_object *omap_obj = to_omap_bo(obj); 888 int ret = -EINVAL; 889 890 mutex_lock(&obj->dev->struct_mutex); 891 if ((omap_obj->paddr_cnt > 0) && omap_obj->block && 892 (omap_obj->flags & OMAP_BO_TILED)) { 893 *paddr = tiler_tsptr(omap_obj->block, orient, x, y); 894 ret = 0; 895 } 896 mutex_unlock(&obj->dev->struct_mutex); 897 return ret; 898 } 899 900 /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */ 901 int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient) 902 { 903 struct omap_gem_object *omap_obj = to_omap_bo(obj); 904 int ret = -EINVAL; 905 if (omap_obj->flags & OMAP_BO_TILED) 906 ret = tiler_stride(gem2fmt(omap_obj->flags), orient); 907 return ret; 908 } 909 910 /* if !remap, and we don't have pages backing, then fail, rather than 911 * increasing the pin count (which we don't really do yet anyways, 912 * because we don't support swapping pages back out). And 'remap' 913 * might not be quite the right name, but I wanted to keep it working 914 * similarly to omap_gem_get_paddr(). Note though that mutex is not 915 * aquired if !remap (because this can be called in atomic ctxt), 916 * but probably omap_gem_get_paddr() should be changed to work in the 917 * same way. If !remap, a matching omap_gem_put_pages() call is not 918 * required (and should not be made). 919 */ 920 int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, 921 bool remap) 922 { 923 int ret; 924 if (!remap) { 925 struct omap_gem_object *omap_obj = to_omap_bo(obj); 926 if (!omap_obj->pages) 927 return -ENOMEM; 928 *pages = omap_obj->pages; 929 return 0; 930 } 931 mutex_lock(&obj->dev->struct_mutex); 932 ret = get_pages(obj, pages); 933 mutex_unlock(&obj->dev->struct_mutex); 934 return ret; 935 } 936 937 /* release pages when DMA no longer being performed */ 938 int omap_gem_put_pages(struct drm_gem_object *obj) 939 { 940 /* do something here if we dynamically attach/detach pages.. at 941 * least they would no longer need to be pinned if everyone has 942 * released the pages.. 943 */ 944 return 0; 945 } 946 947 #ifdef CONFIG_DRM_FBDEV_EMULATION 948 /* Get kernel virtual address for CPU access.. this more or less only 949 * exists for omap_fbdev. This should be called with struct_mutex 950 * held. 951 */ 952 void *omap_gem_vaddr(struct drm_gem_object *obj) 953 { 954 struct omap_gem_object *omap_obj = to_omap_bo(obj); 955 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 956 if (!omap_obj->vaddr) { 957 struct page **pages; 958 int ret = get_pages(obj, &pages); 959 if (ret) 960 return ERR_PTR(ret); 961 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, 962 VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 963 } 964 return omap_obj->vaddr; 965 } 966 #endif 967 968 /* ----------------------------------------------------------------------------- 969 * Power Management 970 */ 971 972 #ifdef CONFIG_PM 973 /* re-pin objects in DMM in resume path: */ 974 int omap_gem_resume(struct device *dev) 975 { 976 struct drm_device *drm_dev = dev_get_drvdata(dev); 977 struct omap_drm_private *priv = drm_dev->dev_private; 978 struct omap_gem_object *omap_obj; 979 int ret = 0; 980 981 list_for_each_entry(omap_obj, &priv->obj_list, mm_list) { 982 if (omap_obj->block) { 983 struct drm_gem_object *obj = &omap_obj->base; 984 uint32_t npages = obj->size >> PAGE_SHIFT; 985 WARN_ON(!omap_obj->pages); /* this can't happen */ 986 ret = tiler_pin(omap_obj->block, 987 omap_obj->pages, npages, 988 omap_obj->roll, true); 989 if (ret) { 990 dev_err(dev, "could not repin: %d\n", ret); 991 return ret; 992 } 993 } 994 } 995 996 return 0; 997 } 998 #endif 999 1000 /* ----------------------------------------------------------------------------- 1001 * DebugFS 1002 */ 1003 1004 #ifdef CONFIG_DEBUG_FS 1005 void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) 1006 { 1007 struct omap_gem_object *omap_obj = to_omap_bo(obj); 1008 uint64_t off; 1009 1010 off = drm_vma_node_start(&obj->vma_node); 1011 1012 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d", 1013 omap_obj->flags, obj->name, obj->refcount.refcount.counter, 1014 off, &omap_obj->paddr, omap_obj->paddr_cnt, 1015 omap_obj->vaddr, omap_obj->roll); 1016 1017 if (omap_obj->flags & OMAP_BO_TILED) { 1018 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height); 1019 if (omap_obj->block) { 1020 struct tcm_area *area = &omap_obj->block->area; 1021 seq_printf(m, " (%dx%d, %dx%d)", 1022 area->p0.x, area->p0.y, 1023 area->p1.x, area->p1.y); 1024 } 1025 } else { 1026 seq_printf(m, " %d", obj->size); 1027 } 1028 1029 seq_printf(m, "\n"); 1030 } 1031 1032 void omap_gem_describe_objects(struct list_head *list, struct seq_file *m) 1033 { 1034 struct omap_gem_object *omap_obj; 1035 int count = 0; 1036 size_t size = 0; 1037 1038 list_for_each_entry(omap_obj, list, mm_list) { 1039 struct drm_gem_object *obj = &omap_obj->base; 1040 seq_printf(m, " "); 1041 omap_gem_describe(obj, m); 1042 count++; 1043 size += obj->size; 1044 } 1045 1046 seq_printf(m, "Total %d objects, %zu bytes\n", count, size); 1047 } 1048 #endif 1049 1050 /* ----------------------------------------------------------------------------- 1051 * Buffer Synchronization 1052 */ 1053 1054 static DEFINE_SPINLOCK(sync_lock); 1055 1056 struct omap_gem_sync_waiter { 1057 struct list_head list; 1058 struct omap_gem_object *omap_obj; 1059 enum omap_gem_op op; 1060 uint32_t read_target, write_target; 1061 /* notify called w/ sync_lock held */ 1062 void (*notify)(void *arg); 1063 void *arg; 1064 }; 1065 1066 /* list of omap_gem_sync_waiter.. the notify fxn gets called back when 1067 * the read and/or write target count is achieved which can call a user 1068 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for 1069 * cpu access), etc. 1070 */ 1071 static LIST_HEAD(waiters); 1072 1073 static inline bool is_waiting(struct omap_gem_sync_waiter *waiter) 1074 { 1075 struct omap_gem_object *omap_obj = waiter->omap_obj; 1076 if ((waiter->op & OMAP_GEM_READ) && 1077 (omap_obj->sync->write_complete < waiter->write_target)) 1078 return true; 1079 if ((waiter->op & OMAP_GEM_WRITE) && 1080 (omap_obj->sync->read_complete < waiter->read_target)) 1081 return true; 1082 return false; 1083 } 1084 1085 /* macro for sync debug.. */ 1086 #define SYNCDBG 0 1087 #define SYNC(fmt, ...) do { if (SYNCDBG) \ 1088 printk(KERN_ERR "%s:%d: "fmt"\n", \ 1089 __func__, __LINE__, ##__VA_ARGS__); \ 1090 } while (0) 1091 1092 1093 static void sync_op_update(void) 1094 { 1095 struct omap_gem_sync_waiter *waiter, *n; 1096 list_for_each_entry_safe(waiter, n, &waiters, list) { 1097 if (!is_waiting(waiter)) { 1098 list_del(&waiter->list); 1099 SYNC("notify: %p", waiter); 1100 waiter->notify(waiter->arg); 1101 kfree(waiter); 1102 } 1103 } 1104 } 1105 1106 static inline int sync_op(struct drm_gem_object *obj, 1107 enum omap_gem_op op, bool start) 1108 { 1109 struct omap_gem_object *omap_obj = to_omap_bo(obj); 1110 int ret = 0; 1111 1112 spin_lock(&sync_lock); 1113 1114 if (!omap_obj->sync) { 1115 omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC); 1116 if (!omap_obj->sync) { 1117 ret = -ENOMEM; 1118 goto unlock; 1119 } 1120 } 1121 1122 if (start) { 1123 if (op & OMAP_GEM_READ) 1124 omap_obj->sync->read_pending++; 1125 if (op & OMAP_GEM_WRITE) 1126 omap_obj->sync->write_pending++; 1127 } else { 1128 if (op & OMAP_GEM_READ) 1129 omap_obj->sync->read_complete++; 1130 if (op & OMAP_GEM_WRITE) 1131 omap_obj->sync->write_complete++; 1132 sync_op_update(); 1133 } 1134 1135 unlock: 1136 spin_unlock(&sync_lock); 1137 1138 return ret; 1139 } 1140 1141 /* it is a bit lame to handle updates in this sort of polling way, but 1142 * in case of PVR, the GPU can directly update read/write complete 1143 * values, and not really tell us which ones it updated.. this also 1144 * means that sync_lock is not quite sufficient. So we'll need to 1145 * do something a bit better when it comes time to add support for 1146 * separate 2d hw.. 1147 */ 1148 void omap_gem_op_update(void) 1149 { 1150 spin_lock(&sync_lock); 1151 sync_op_update(); 1152 spin_unlock(&sync_lock); 1153 } 1154 1155 /* mark the start of read and/or write operation */ 1156 int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op) 1157 { 1158 return sync_op(obj, op, true); 1159 } 1160 1161 int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op) 1162 { 1163 return sync_op(obj, op, false); 1164 } 1165 1166 static DECLARE_WAIT_QUEUE_HEAD(sync_event); 1167 1168 static void sync_notify(void *arg) 1169 { 1170 struct task_struct **waiter_task = arg; 1171 *waiter_task = NULL; 1172 wake_up_all(&sync_event); 1173 } 1174 1175 int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op) 1176 { 1177 struct omap_gem_object *omap_obj = to_omap_bo(obj); 1178 int ret = 0; 1179 if (omap_obj->sync) { 1180 struct task_struct *waiter_task = current; 1181 struct omap_gem_sync_waiter *waiter = 1182 kzalloc(sizeof(*waiter), GFP_KERNEL); 1183 1184 if (!waiter) 1185 return -ENOMEM; 1186 1187 waiter->omap_obj = omap_obj; 1188 waiter->op = op; 1189 waiter->read_target = omap_obj->sync->read_pending; 1190 waiter->write_target = omap_obj->sync->write_pending; 1191 waiter->notify = sync_notify; 1192 waiter->arg = &waiter_task; 1193 1194 spin_lock(&sync_lock); 1195 if (is_waiting(waiter)) { 1196 SYNC("waited: %p", waiter); 1197 list_add_tail(&waiter->list, &waiters); 1198 spin_unlock(&sync_lock); 1199 ret = wait_event_interruptible(sync_event, 1200 (waiter_task == NULL)); 1201 spin_lock(&sync_lock); 1202 if (waiter_task) { 1203 SYNC("interrupted: %p", waiter); 1204 /* we were interrupted */ 1205 list_del(&waiter->list); 1206 waiter_task = NULL; 1207 } else { 1208 /* freed in sync_op_update() */ 1209 waiter = NULL; 1210 } 1211 } 1212 spin_unlock(&sync_lock); 1213 kfree(waiter); 1214 } 1215 return ret; 1216 } 1217 1218 /* call fxn(arg), either synchronously or asynchronously if the op 1219 * is currently blocked.. fxn() can be called from any context 1220 * 1221 * (TODO for now fxn is called back from whichever context calls 1222 * omap_gem_op_update().. but this could be better defined later 1223 * if needed) 1224 * 1225 * TODO more code in common w/ _sync().. 1226 */ 1227 int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op, 1228 void (*fxn)(void *arg), void *arg) 1229 { 1230 struct omap_gem_object *omap_obj = to_omap_bo(obj); 1231 if (omap_obj->sync) { 1232 struct omap_gem_sync_waiter *waiter = 1233 kzalloc(sizeof(*waiter), GFP_ATOMIC); 1234 1235 if (!waiter) 1236 return -ENOMEM; 1237 1238 waiter->omap_obj = omap_obj; 1239 waiter->op = op; 1240 waiter->read_target = omap_obj->sync->read_pending; 1241 waiter->write_target = omap_obj->sync->write_pending; 1242 waiter->notify = fxn; 1243 waiter->arg = arg; 1244 1245 spin_lock(&sync_lock); 1246 if (is_waiting(waiter)) { 1247 SYNC("waited: %p", waiter); 1248 list_add_tail(&waiter->list, &waiters); 1249 spin_unlock(&sync_lock); 1250 return 0; 1251 } 1252 1253 spin_unlock(&sync_lock); 1254 1255 kfree(waiter); 1256 } 1257 1258 /* no waiting.. */ 1259 fxn(arg); 1260 1261 return 0; 1262 } 1263 1264 /* special API so PVR can update the buffer to use a sync-object allocated 1265 * from it's sync-obj heap. Only used for a newly allocated (from PVR's 1266 * perspective) sync-object, so we overwrite the new syncobj w/ values 1267 * from the already allocated syncobj (if there is one) 1268 */ 1269 int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj) 1270 { 1271 struct omap_gem_object *omap_obj = to_omap_bo(obj); 1272 int ret = 0; 1273 1274 spin_lock(&sync_lock); 1275 1276 if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) { 1277 /* clearing a previously set syncobj */ 1278 syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync), 1279 GFP_ATOMIC); 1280 if (!syncobj) { 1281 ret = -ENOMEM; 1282 goto unlock; 1283 } 1284 omap_obj->flags &= ~OMAP_BO_EXT_SYNC; 1285 omap_obj->sync = syncobj; 1286 } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) { 1287 /* replacing an existing syncobj */ 1288 if (omap_obj->sync) { 1289 memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync)); 1290 kfree(omap_obj->sync); 1291 } 1292 omap_obj->flags |= OMAP_BO_EXT_SYNC; 1293 omap_obj->sync = syncobj; 1294 } 1295 1296 unlock: 1297 spin_unlock(&sync_lock); 1298 return ret; 1299 } 1300 1301 /* ----------------------------------------------------------------------------- 1302 * Constructor & Destructor 1303 */ 1304 1305 /* don't call directly.. called from GEM core when it is time to actually 1306 * free the object.. 1307 */ 1308 void omap_gem_free_object(struct drm_gem_object *obj) 1309 { 1310 struct drm_device *dev = obj->dev; 1311 struct omap_drm_private *priv = dev->dev_private; 1312 struct omap_gem_object *omap_obj = to_omap_bo(obj); 1313 1314 evict(obj); 1315 1316 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 1317 1318 spin_lock(&priv->list_lock); 1319 list_del(&omap_obj->mm_list); 1320 spin_unlock(&priv->list_lock); 1321 1322 /* this means the object is still pinned.. which really should 1323 * not happen. I think.. 1324 */ 1325 WARN_ON(omap_obj->paddr_cnt > 0); 1326 1327 /* don't free externally allocated backing memory */ 1328 if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) { 1329 if (omap_obj->pages) 1330 omap_gem_detach_pages(obj); 1331 1332 if (!is_shmem(obj)) { 1333 dma_free_writecombine(dev->dev, obj->size, 1334 omap_obj->vaddr, omap_obj->paddr); 1335 } else if (omap_obj->vaddr) { 1336 vunmap(omap_obj->vaddr); 1337 } 1338 } 1339 1340 /* don't free externally allocated syncobj */ 1341 if (!(omap_obj->flags & OMAP_BO_EXT_SYNC)) 1342 kfree(omap_obj->sync); 1343 1344 drm_gem_object_release(obj); 1345 1346 kfree(omap_obj); 1347 } 1348 1349 /* GEM buffer object constructor */ 1350 struct drm_gem_object *omap_gem_new(struct drm_device *dev, 1351 union omap_gem_size gsize, uint32_t flags) 1352 { 1353 struct omap_drm_private *priv = dev->dev_private; 1354 struct omap_gem_object *omap_obj; 1355 struct drm_gem_object *obj; 1356 struct address_space *mapping; 1357 size_t size; 1358 int ret; 1359 1360 if (flags & OMAP_BO_TILED) { 1361 if (!priv->usergart) { 1362 dev_err(dev->dev, "Tiled buffers require DMM\n"); 1363 return NULL; 1364 } 1365 1366 /* tiled buffers are always shmem paged backed.. when they are 1367 * scanned out, they are remapped into DMM/TILER 1368 */ 1369 flags &= ~OMAP_BO_SCANOUT; 1370 1371 /* currently don't allow cached buffers.. there is some caching 1372 * stuff that needs to be handled better 1373 */ 1374 flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED); 1375 flags |= tiler_get_cpu_cache_flags(); 1376 1377 /* align dimensions to slot boundaries... */ 1378 tiler_align(gem2fmt(flags), 1379 &gsize.tiled.width, &gsize.tiled.height); 1380 1381 /* ...and calculate size based on aligned dimensions */ 1382 size = tiler_size(gem2fmt(flags), 1383 gsize.tiled.width, gsize.tiled.height); 1384 } else { 1385 size = PAGE_ALIGN(gsize.bytes); 1386 } 1387 1388 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL); 1389 if (!omap_obj) 1390 return NULL; 1391 1392 obj = &omap_obj->base; 1393 1394 if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) { 1395 /* attempt to allocate contiguous memory if we don't 1396 * have DMM for remappign discontiguous buffers 1397 */ 1398 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size, 1399 &omap_obj->paddr, GFP_KERNEL); 1400 if (!omap_obj->vaddr) { 1401 kfree(omap_obj); 1402 1403 return NULL; 1404 } 1405 1406 flags |= OMAP_BO_DMA; 1407 } 1408 1409 spin_lock(&priv->list_lock); 1410 list_add(&omap_obj->mm_list, &priv->obj_list); 1411 spin_unlock(&priv->list_lock); 1412 1413 omap_obj->flags = flags; 1414 1415 if (flags & OMAP_BO_TILED) { 1416 omap_obj->width = gsize.tiled.width; 1417 omap_obj->height = gsize.tiled.height; 1418 } 1419 1420 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) { 1421 drm_gem_private_object_init(dev, obj, size); 1422 } else { 1423 ret = drm_gem_object_init(dev, obj, size); 1424 if (ret) 1425 goto fail; 1426 1427 mapping = file_inode(obj->filp)->i_mapping; 1428 mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32); 1429 } 1430 1431 return obj; 1432 1433 fail: 1434 omap_gem_free_object(obj); 1435 return NULL; 1436 } 1437 1438 /* convenience method to construct a GEM buffer object, and userspace handle */ 1439 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file, 1440 union omap_gem_size gsize, uint32_t flags, uint32_t *handle) 1441 { 1442 struct drm_gem_object *obj; 1443 int ret; 1444 1445 obj = omap_gem_new(dev, gsize, flags); 1446 if (!obj) 1447 return -ENOMEM; 1448 1449 ret = drm_gem_handle_create(file, obj, handle); 1450 if (ret) { 1451 omap_gem_free_object(obj); 1452 return ret; 1453 } 1454 1455 /* drop reference from allocate - handle holds it now */ 1456 drm_gem_object_unreference_unlocked(obj); 1457 1458 return 0; 1459 } 1460 1461 /* ----------------------------------------------------------------------------- 1462 * Init & Cleanup 1463 */ 1464 1465 /* If DMM is used, we need to set some stuff up.. */ 1466 void omap_gem_init(struct drm_device *dev) 1467 { 1468 struct omap_drm_private *priv = dev->dev_private; 1469 struct omap_drm_usergart *usergart; 1470 const enum tiler_fmt fmts[] = { 1471 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT 1472 }; 1473 int i, j; 1474 1475 if (!dmm_is_available()) { 1476 /* DMM only supported on OMAP4 and later, so this isn't fatal */ 1477 dev_warn(dev->dev, "DMM not available, disable DMM support\n"); 1478 return; 1479 } 1480 1481 usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL); 1482 if (!usergart) 1483 return; 1484 1485 /* reserve 4k aligned/wide regions for userspace mappings: */ 1486 for (i = 0; i < ARRAY_SIZE(fmts); i++) { 1487 uint16_t h = 1, w = PAGE_SIZE >> i; 1488 tiler_align(fmts[i], &w, &h); 1489 /* note: since each region is 1 4kb page wide, and minimum 1490 * number of rows, the height ends up being the same as the 1491 * # of pages in the region 1492 */ 1493 usergart[i].height = h; 1494 usergart[i].height_shift = ilog2(h); 1495 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT; 1496 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i); 1497 for (j = 0; j < NUM_USERGART_ENTRIES; j++) { 1498 struct omap_drm_usergart_entry *entry; 1499 struct tiler_block *block; 1500 1501 entry = &usergart[i].entry[j]; 1502 block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE); 1503 if (IS_ERR(block)) { 1504 dev_err(dev->dev, 1505 "reserve failed: %d, %d, %ld\n", 1506 i, j, PTR_ERR(block)); 1507 return; 1508 } 1509 entry->paddr = tiler_ssptr(block); 1510 entry->block = block; 1511 1512 DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h, 1513 &entry->paddr, 1514 usergart[i].stride_pfn << PAGE_SHIFT); 1515 } 1516 } 1517 1518 priv->usergart = usergart; 1519 priv->has_dmm = true; 1520 } 1521 1522 void omap_gem_deinit(struct drm_device *dev) 1523 { 1524 struct omap_drm_private *priv = dev->dev_private; 1525 1526 /* I believe we can rely on there being no more outstanding GEM 1527 * objects which could depend on usergart/dmm at this point. 1528 */ 1529 kfree(priv->usergart); 1530 } 1531