xref: /linux/drivers/gpu/drm/omapdrm/omap_gem.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
4  * Author: Rob Clark <rob.clark@linaro.org>
5  */
6 
7 #include <linux/dma-mapping.h>
8 #include <linux/seq_file.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/spinlock.h>
11 #include <linux/pfn_t.h>
12 #include <linux/vmalloc.h>
13 
14 #include <drm/drm_prime.h>
15 #include <drm/drm_vma_manager.h>
16 
17 #include "omap_drv.h"
18 #include "omap_dmm_tiler.h"
19 
20 /*
21  * GEM buffer object implementation.
22  */
23 
24 /* note: we use upper 8 bits of flags for driver-internal flags: */
25 #define OMAP_BO_MEM_DMA_API	0x01000000	/* memory allocated with the dma_alloc_* API */
26 #define OMAP_BO_MEM_SHMEM	0x02000000	/* memory allocated through shmem backing */
27 #define OMAP_BO_MEM_DMABUF	0x08000000	/* memory imported from a dmabuf */
28 
29 struct omap_gem_object {
30 	struct drm_gem_object base;
31 
32 	struct list_head mm_list;
33 
34 	u32 flags;
35 
36 	/** width/height for tiled formats (rounded up to slot boundaries) */
37 	u16 width, height;
38 
39 	/** roll applied when mapping to DMM */
40 	u32 roll;
41 
42 	/** protects pin_cnt, block, pages, dma_addrs and vaddr */
43 	struct mutex lock;
44 
45 	/**
46 	 * dma_addr contains the buffer DMA address. It is valid for
47 	 *
48 	 * - buffers allocated through the DMA mapping API (with the
49 	 *   OMAP_BO_MEM_DMA_API flag set)
50 	 *
51 	 * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
52 	 *   if they are physically contiguous
53 	 *
54 	 * - buffers mapped through the TILER when pin_cnt is not zero, in which
55 	 *   case the DMA address points to the TILER aperture
56 	 *
57 	 * Physically contiguous buffers have their DMA address equal to the
58 	 * physical address as we don't remap those buffers through the TILER.
59 	 *
60 	 * Buffers mapped to the TILER have their DMA address pointing to the
61 	 * TILER aperture. As TILER mappings are refcounted (through pin_cnt)
62 	 * the DMA address must be accessed through omap_gem_pin() to ensure
63 	 * that the mapping won't disappear unexpectedly. References must be
64 	 * released with omap_gem_unpin().
65 	 */
66 	dma_addr_t dma_addr;
67 
68 	/**
69 	 * # of users
70 	 */
71 	refcount_t pin_cnt;
72 
73 	/**
74 	 * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
75 	 * is set and the sgt field is valid.
76 	 */
77 	struct sg_table *sgt;
78 
79 	/**
80 	 * tiler block used when buffer is remapped in DMM/TILER.
81 	 */
82 	struct tiler_block *block;
83 
84 	/**
85 	 * Array of backing pages, if allocated.  Note that pages are never
86 	 * allocated for buffers originally allocated from contiguous memory
87 	 */
88 	struct page **pages;
89 
90 	/** addresses corresponding to pages in above array */
91 	dma_addr_t *dma_addrs;
92 
93 	/**
94 	 * Virtual address, if mapped.
95 	 */
96 	void *vaddr;
97 };
98 
99 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
100 
101 /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
102  * not necessarily pinned in TILER all the time, and (b) when they are
103  * they are not necessarily page aligned, we reserve one or more small
104  * regions in each of the 2d containers to use as a user-GART where we
105  * can create a second page-aligned mapping of parts of the buffer
106  * being accessed from userspace.
107  *
108  * Note that we could optimize slightly when we know that multiple
109  * tiler containers are backed by the same PAT.. but I'll leave that
110  * for later..
111  */
112 #define NUM_USERGART_ENTRIES 2
113 struct omap_drm_usergart_entry {
114 	struct tiler_block *block;	/* the reserved tiler block */
115 	dma_addr_t dma_addr;
116 	struct drm_gem_object *obj;	/* the current pinned obj */
117 	pgoff_t obj_pgoff;		/* page offset of obj currently
118 					   mapped in */
119 };
120 
121 struct omap_drm_usergart {
122 	struct omap_drm_usergart_entry entry[NUM_USERGART_ENTRIES];
123 	int height;				/* height in rows */
124 	int height_shift;		/* ilog2(height in rows) */
125 	int slot_shift;			/* ilog2(width per slot) */
126 	int stride_pfn;			/* stride in pages */
127 	int last;				/* index of last used entry */
128 };
129 
130 /* -----------------------------------------------------------------------------
131  * Helpers
132  */
133 
134 /** get mmap offset */
omap_gem_mmap_offset(struct drm_gem_object * obj)135 u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
136 {
137 	struct drm_device *dev = obj->dev;
138 	int ret;
139 	size_t size;
140 
141 	/* Make it mmapable */
142 	size = omap_gem_mmap_size(obj);
143 	ret = drm_gem_create_mmap_offset_size(obj, size);
144 	if (ret) {
145 		dev_err(dev->dev, "could not allocate mmap offset\n");
146 		return 0;
147 	}
148 
149 	return drm_vma_node_offset_addr(&obj->vma_node);
150 }
151 
omap_gem_sgt_is_contiguous(struct sg_table * sgt,size_t size)152 static bool omap_gem_sgt_is_contiguous(struct sg_table *sgt, size_t size)
153 {
154 	return !(drm_prime_get_contiguous_size(sgt) < size);
155 }
156 
omap_gem_is_contiguous(struct omap_gem_object * omap_obj)157 static bool omap_gem_is_contiguous(struct omap_gem_object *omap_obj)
158 {
159 	if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
160 		return true;
161 
162 	if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) &&
163 	    omap_gem_sgt_is_contiguous(omap_obj->sgt, omap_obj->base.size))
164 		return true;
165 
166 	return false;
167 }
168 
169 /* -----------------------------------------------------------------------------
170  * Eviction
171  */
172 
omap_gem_evict_entry(struct drm_gem_object * obj,enum tiler_fmt fmt,struct omap_drm_usergart_entry * entry)173 static void omap_gem_evict_entry(struct drm_gem_object *obj,
174 		enum tiler_fmt fmt, struct omap_drm_usergart_entry *entry)
175 {
176 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
177 	struct omap_drm_private *priv = obj->dev->dev_private;
178 	int n = priv->usergart[fmt].height;
179 	size_t size = PAGE_SIZE * n;
180 	loff_t off = omap_gem_mmap_offset(obj) +
181 			(entry->obj_pgoff << PAGE_SHIFT);
182 	const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
183 
184 	if (m > 1) {
185 		int i;
186 		/* if stride > than PAGE_SIZE then sparse mapping: */
187 		for (i = n; i > 0; i--) {
188 			unmap_mapping_range(obj->dev->anon_inode->i_mapping,
189 					    off, PAGE_SIZE, 1);
190 			off += PAGE_SIZE * m;
191 		}
192 	} else {
193 		unmap_mapping_range(obj->dev->anon_inode->i_mapping,
194 				    off, size, 1);
195 	}
196 
197 	entry->obj = NULL;
198 }
199 
200 /* Evict a buffer from usergart, if it is mapped there */
omap_gem_evict(struct drm_gem_object * obj)201 static void omap_gem_evict(struct drm_gem_object *obj)
202 {
203 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
204 	struct omap_drm_private *priv = obj->dev->dev_private;
205 
206 	if (omap_obj->flags & OMAP_BO_TILED_MASK) {
207 		enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
208 		int i;
209 
210 		for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
211 			struct omap_drm_usergart_entry *entry =
212 				&priv->usergart[fmt].entry[i];
213 
214 			if (entry->obj == obj)
215 				omap_gem_evict_entry(obj, fmt, entry);
216 		}
217 	}
218 }
219 
220 /* -----------------------------------------------------------------------------
221  * Page Management
222  */
223 
224 /*
225  * Ensure backing pages are allocated. Must be called with the omap_obj.lock
226  * held.
227  */
omap_gem_attach_pages(struct drm_gem_object * obj)228 static int omap_gem_attach_pages(struct drm_gem_object *obj)
229 {
230 	struct drm_device *dev = obj->dev;
231 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
232 	struct page **pages;
233 	int npages = obj->size >> PAGE_SHIFT;
234 	int i, ret;
235 	dma_addr_t *addrs;
236 
237 	lockdep_assert_held(&omap_obj->lock);
238 
239 	/*
240 	 * If not using shmem (in which case backing pages don't need to be
241 	 * allocated) or if pages are already allocated we're done.
242 	 */
243 	if (!(omap_obj->flags & OMAP_BO_MEM_SHMEM) || omap_obj->pages)
244 		return 0;
245 
246 	pages = drm_gem_get_pages(obj);
247 	if (IS_ERR(pages)) {
248 		dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
249 		return PTR_ERR(pages);
250 	}
251 
252 	/* for non-cached buffers, ensure the new pages are clean because
253 	 * DSS, GPU, etc. are not cache coherent:
254 	 */
255 	if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
256 		addrs = kmalloc_array(npages, sizeof(*addrs), GFP_KERNEL);
257 		if (!addrs) {
258 			ret = -ENOMEM;
259 			goto free_pages;
260 		}
261 
262 		for (i = 0; i < npages; i++) {
263 			addrs[i] = dma_map_page(dev->dev, pages[i],
264 					0, PAGE_SIZE, DMA_TO_DEVICE);
265 
266 			if (dma_mapping_error(dev->dev, addrs[i])) {
267 				dev_warn(dev->dev,
268 					"%s: failed to map page\n", __func__);
269 
270 				for (i = i - 1; i >= 0; --i) {
271 					dma_unmap_page(dev->dev, addrs[i],
272 						PAGE_SIZE, DMA_TO_DEVICE);
273 				}
274 
275 				ret = -ENOMEM;
276 				goto free_addrs;
277 			}
278 		}
279 	} else {
280 		addrs = kcalloc(npages, sizeof(*addrs), GFP_KERNEL);
281 		if (!addrs) {
282 			ret = -ENOMEM;
283 			goto free_pages;
284 		}
285 	}
286 
287 	omap_obj->dma_addrs = addrs;
288 	omap_obj->pages = pages;
289 
290 	return 0;
291 
292 free_addrs:
293 	kfree(addrs);
294 free_pages:
295 	drm_gem_put_pages(obj, pages, true, false);
296 
297 	return ret;
298 }
299 
300 /* Release backing pages. Must be called with the omap_obj.lock held. */
omap_gem_detach_pages(struct drm_gem_object * obj)301 static void omap_gem_detach_pages(struct drm_gem_object *obj)
302 {
303 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
304 	unsigned int npages = obj->size >> PAGE_SHIFT;
305 	unsigned int i;
306 
307 	lockdep_assert_held(&omap_obj->lock);
308 
309 	for (i = 0; i < npages; i++) {
310 		if (omap_obj->dma_addrs[i])
311 			dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
312 				       PAGE_SIZE, DMA_TO_DEVICE);
313 	}
314 
315 	kfree(omap_obj->dma_addrs);
316 	omap_obj->dma_addrs = NULL;
317 
318 	drm_gem_put_pages(obj, omap_obj->pages, true, false);
319 	omap_obj->pages = NULL;
320 }
321 
322 /* get buffer flags */
omap_gem_flags(struct drm_gem_object * obj)323 u32 omap_gem_flags(struct drm_gem_object *obj)
324 {
325 	return to_omap_bo(obj)->flags;
326 }
327 
328 /** get mmap size */
omap_gem_mmap_size(struct drm_gem_object * obj)329 size_t omap_gem_mmap_size(struct drm_gem_object *obj)
330 {
331 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
332 	size_t size = obj->size;
333 
334 	if (omap_obj->flags & OMAP_BO_TILED_MASK) {
335 		/* for tiled buffers, the virtual size has stride rounded up
336 		 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
337 		 * 32kb later!).  But we don't back the entire buffer with
338 		 * pages, only the valid picture part.. so need to adjust for
339 		 * this in the size used to mmap and generate mmap offset
340 		 */
341 		size = tiler_vsize(gem2fmt(omap_obj->flags),
342 				omap_obj->width, omap_obj->height);
343 	}
344 
345 	return size;
346 }
347 
348 /* -----------------------------------------------------------------------------
349  * Fault Handling
350  */
351 
352 /* Normal handling for the case of faulting in non-tiled buffers */
omap_gem_fault_1d(struct drm_gem_object * obj,struct vm_area_struct * vma,struct vm_fault * vmf)353 static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj,
354 		struct vm_area_struct *vma, struct vm_fault *vmf)
355 {
356 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
357 	unsigned long pfn;
358 	pgoff_t pgoff;
359 
360 	/* We don't use vmf->pgoff since that has the fake offset: */
361 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
362 
363 	if (omap_obj->pages) {
364 		omap_gem_cpu_sync_page(obj, pgoff);
365 		pfn = page_to_pfn(omap_obj->pages[pgoff]);
366 	} else {
367 		BUG_ON(!omap_gem_is_contiguous(omap_obj));
368 		pfn = (omap_obj->dma_addr >> PAGE_SHIFT) + pgoff;
369 	}
370 
371 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
372 			pfn, pfn << PAGE_SHIFT);
373 
374 	return vmf_insert_mixed(vma, vmf->address,
375 			__pfn_to_pfn_t(pfn, PFN_DEV));
376 }
377 
378 /* Special handling for the case of faulting in 2d tiled buffers */
omap_gem_fault_2d(struct drm_gem_object * obj,struct vm_area_struct * vma,struct vm_fault * vmf)379 static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
380 		struct vm_area_struct *vma, struct vm_fault *vmf)
381 {
382 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
383 	struct omap_drm_private *priv = obj->dev->dev_private;
384 	struct omap_drm_usergart_entry *entry;
385 	enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
386 	struct page *pages[64];  /* XXX is this too much to have on stack? */
387 	unsigned long pfn;
388 	pgoff_t pgoff, base_pgoff;
389 	unsigned long vaddr;
390 	int i, err, slots;
391 	vm_fault_t ret = VM_FAULT_NOPAGE;
392 
393 	/*
394 	 * Note the height of the slot is also equal to the number of pages
395 	 * that need to be mapped in to fill 4kb wide CPU page.  If the slot
396 	 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
397 	 */
398 	const int n = priv->usergart[fmt].height;
399 	const int n_shift = priv->usergart[fmt].height_shift;
400 
401 	/*
402 	 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
403 	 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
404 	 * into account in some of the math, so figure out virtual stride
405 	 * in pages
406 	 */
407 	const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
408 
409 	/* We don't use vmf->pgoff since that has the fake offset: */
410 	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
411 
412 	/*
413 	 * Actual address we start mapping at is rounded down to previous slot
414 	 * boundary in the y direction:
415 	 */
416 	base_pgoff = round_down(pgoff, m << n_shift);
417 
418 	/* figure out buffer width in slots */
419 	slots = omap_obj->width >> priv->usergart[fmt].slot_shift;
420 
421 	vaddr = vmf->address - ((pgoff - base_pgoff) << PAGE_SHIFT);
422 
423 	entry = &priv->usergart[fmt].entry[priv->usergart[fmt].last];
424 
425 	/* evict previous buffer using this usergart entry, if any: */
426 	if (entry->obj)
427 		omap_gem_evict_entry(entry->obj, fmt, entry);
428 
429 	entry->obj = obj;
430 	entry->obj_pgoff = base_pgoff;
431 
432 	/* now convert base_pgoff to phys offset from virt offset: */
433 	base_pgoff = (base_pgoff >> n_shift) * slots;
434 
435 	/* for wider-than 4k.. figure out which part of the slot-row we want: */
436 	if (m > 1) {
437 		int off = pgoff % m;
438 		entry->obj_pgoff += off;
439 		base_pgoff /= m;
440 		slots = min(slots - (off << n_shift), n);
441 		base_pgoff += off << n_shift;
442 		vaddr += off << PAGE_SHIFT;
443 	}
444 
445 	/*
446 	 * Map in pages. Beyond the valid pixel part of the buffer, we set
447 	 * pages[i] to NULL to get a dummy page mapped in.. if someone
448 	 * reads/writes it they will get random/undefined content, but at
449 	 * least it won't be corrupting whatever other random page used to
450 	 * be mapped in, or other undefined behavior.
451 	 */
452 	memcpy(pages, &omap_obj->pages[base_pgoff],
453 			sizeof(struct page *) * slots);
454 	memset(pages + slots, 0,
455 			sizeof(struct page *) * (n - slots));
456 
457 	err = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
458 	if (err) {
459 		ret = vmf_error(err);
460 		dev_err(obj->dev->dev, "failed to pin: %d\n", err);
461 		return ret;
462 	}
463 
464 	pfn = entry->dma_addr >> PAGE_SHIFT;
465 
466 	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
467 			pfn, pfn << PAGE_SHIFT);
468 
469 	for (i = n; i > 0; i--) {
470 		ret = vmf_insert_mixed(vma,
471 			vaddr, __pfn_to_pfn_t(pfn, PFN_DEV));
472 		if (ret & VM_FAULT_ERROR)
473 			break;
474 		pfn += priv->usergart[fmt].stride_pfn;
475 		vaddr += PAGE_SIZE * m;
476 	}
477 
478 	/* simple round-robin: */
479 	priv->usergart[fmt].last = (priv->usergart[fmt].last + 1)
480 				 % NUM_USERGART_ENTRIES;
481 
482 	return ret;
483 }
484 
485 /**
486  * omap_gem_fault		-	pagefault handler for GEM objects
487  * @vmf: fault detail
488  *
489  * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
490  * does most of the work for us including the actual map/unmap calls
491  * but we need to do the actual page work.
492  *
493  * The VMA was set up by GEM. In doing so it also ensured that the
494  * vma->vm_private_data points to the GEM object that is backing this
495  * mapping.
496  */
omap_gem_fault(struct vm_fault * vmf)497 static vm_fault_t omap_gem_fault(struct vm_fault *vmf)
498 {
499 	struct vm_area_struct *vma = vmf->vma;
500 	struct drm_gem_object *obj = vma->vm_private_data;
501 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
502 	int err;
503 	vm_fault_t ret;
504 
505 	/* Make sure we don't parallel update on a fault, nor move or remove
506 	 * something from beneath our feet
507 	 */
508 	mutex_lock(&omap_obj->lock);
509 
510 	/* if a shmem backed object, make sure we have pages attached now */
511 	err = omap_gem_attach_pages(obj);
512 	if (err) {
513 		ret = vmf_error(err);
514 		goto fail;
515 	}
516 
517 	/* where should we do corresponding put_pages().. we are mapping
518 	 * the original page, rather than thru a GART, so we can't rely
519 	 * on eviction to trigger this.  But munmap() or all mappings should
520 	 * probably trigger put_pages()?
521 	 */
522 
523 	if (omap_obj->flags & OMAP_BO_TILED_MASK)
524 		ret = omap_gem_fault_2d(obj, vma, vmf);
525 	else
526 		ret = omap_gem_fault_1d(obj, vma, vmf);
527 
528 
529 fail:
530 	mutex_unlock(&omap_obj->lock);
531 	return ret;
532 }
533 
omap_gem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)534 static int omap_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
535 {
536 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
537 
538 	vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_IO | VM_MIXEDMAP);
539 
540 	if (omap_obj->flags & OMAP_BO_WC) {
541 		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
542 	} else if (omap_obj->flags & OMAP_BO_UNCACHED) {
543 		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
544 	} else {
545 		/*
546 		 * We do have some private objects, at least for scanout buffers
547 		 * on hardware without DMM/TILER.  But these are allocated write-
548 		 * combine
549 		 */
550 		if (WARN_ON(!obj->filp))
551 			return -EINVAL;
552 
553 		/*
554 		 * Shunt off cached objs to shmem file so they have their own
555 		 * address_space (so unmap_mapping_range does what we want,
556 		 * in particular in the case of mmap'd dmabufs)
557 		 */
558 		vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
559 		vma_set_file(vma, obj->filp);
560 
561 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
562 	}
563 
564 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
565 
566 	return 0;
567 }
568 
569 /* -----------------------------------------------------------------------------
570  * Dumb Buffers
571  */
572 
573 /**
574  * omap_gem_dumb_create	-	create a dumb buffer
575  * @file: our client file
576  * @dev: our device
577  * @args: the requested arguments copied from userspace
578  *
579  * Allocate a buffer suitable for use for a frame buffer of the
580  * form described by user space. Give userspace a handle by which
581  * to reference it.
582  */
omap_gem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)583 int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
584 		struct drm_mode_create_dumb *args)
585 {
586 	union omap_gem_size gsize;
587 
588 	args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
589 
590 	args->size = PAGE_ALIGN(args->pitch * args->height);
591 
592 	gsize = (union omap_gem_size){
593 		.bytes = args->size,
594 	};
595 
596 	return omap_gem_new_handle(dev, file, gsize,
597 			OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
598 }
599 
600 /**
601  * omap_gem_dumb_map_offset - create an offset for a dumb buffer
602  * @file: our drm client file
603  * @dev: drm device
604  * @handle: GEM handle to the object (from dumb_create)
605  * @offset: memory map offset placeholder
606  *
607  * Do the necessary setup to allow the mapping of the frame buffer
608  * into user memory. We don't have to do much here at the moment.
609  */
omap_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,u32 handle,u64 * offset)610 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
611 		u32 handle, u64 *offset)
612 {
613 	struct drm_gem_object *obj;
614 	int ret = 0;
615 
616 	/* GEM does all our handle to object mapping */
617 	obj = drm_gem_object_lookup(file, handle);
618 	if (obj == NULL) {
619 		ret = -ENOENT;
620 		goto fail;
621 	}
622 
623 	*offset = omap_gem_mmap_offset(obj);
624 
625 	drm_gem_object_put(obj);
626 
627 fail:
628 	return ret;
629 }
630 
631 #ifdef CONFIG_DRM_FBDEV_EMULATION
632 /* Set scrolling position.  This allows us to implement fast scrolling
633  * for console.
634  *
635  * Call only from non-atomic contexts.
636  */
omap_gem_roll(struct drm_gem_object * obj,u32 roll)637 int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
638 {
639 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
640 	u32 npages = obj->size >> PAGE_SHIFT;
641 	int ret = 0;
642 
643 	if (roll > npages) {
644 		dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
645 		return -EINVAL;
646 	}
647 
648 	omap_obj->roll = roll;
649 
650 	mutex_lock(&omap_obj->lock);
651 
652 	/* if we aren't mapped yet, we don't need to do anything */
653 	if (omap_obj->block) {
654 		ret = omap_gem_attach_pages(obj);
655 		if (ret)
656 			goto fail;
657 
658 		ret = tiler_pin(omap_obj->block, omap_obj->pages, npages,
659 				roll, true);
660 		if (ret)
661 			dev_err(obj->dev->dev, "could not repin: %d\n", ret);
662 	}
663 
664 fail:
665 	mutex_unlock(&omap_obj->lock);
666 
667 	return ret;
668 }
669 #endif
670 
671 /* -----------------------------------------------------------------------------
672  * Memory Management & DMA Sync
673  */
674 
675 /*
676  * shmem buffers that are mapped cached are not coherent.
677  *
678  * We keep track of dirty pages using page faulting to perform cache management.
679  * When a page is mapped to the CPU in read/write mode the device can't access
680  * it and omap_obj->dma_addrs[i] is NULL. When a page is mapped to the device
681  * the omap_obj->dma_addrs[i] is set to the DMA address, and the page is
682  * unmapped from the CPU.
683  */
omap_gem_is_cached_coherent(struct drm_gem_object * obj)684 static inline bool omap_gem_is_cached_coherent(struct drm_gem_object *obj)
685 {
686 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
687 
688 	return !((omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
689 		((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED));
690 }
691 
692 /* Sync the buffer for CPU access.. note pages should already be
693  * attached, ie. omap_gem_get_pages()
694  */
omap_gem_cpu_sync_page(struct drm_gem_object * obj,int pgoff)695 void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
696 {
697 	struct drm_device *dev = obj->dev;
698 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
699 
700 	if (omap_gem_is_cached_coherent(obj))
701 		return;
702 
703 	if (omap_obj->dma_addrs[pgoff]) {
704 		dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
705 				PAGE_SIZE, DMA_TO_DEVICE);
706 		omap_obj->dma_addrs[pgoff] = 0;
707 	}
708 }
709 
710 /* sync the buffer for DMA access */
omap_gem_dma_sync_buffer(struct drm_gem_object * obj,enum dma_data_direction dir)711 void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
712 		enum dma_data_direction dir)
713 {
714 	struct drm_device *dev = obj->dev;
715 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
716 	int i, npages = obj->size >> PAGE_SHIFT;
717 	struct page **pages = omap_obj->pages;
718 	bool dirty = false;
719 
720 	if (omap_gem_is_cached_coherent(obj))
721 		return;
722 
723 	for (i = 0; i < npages; i++) {
724 		if (!omap_obj->dma_addrs[i]) {
725 			dma_addr_t addr;
726 
727 			addr = dma_map_page(dev->dev, pages[i], 0,
728 					    PAGE_SIZE, dir);
729 			if (dma_mapping_error(dev->dev, addr)) {
730 				dev_warn(dev->dev, "%s: failed to map page\n",
731 					__func__);
732 				break;
733 			}
734 
735 			dirty = true;
736 			omap_obj->dma_addrs[i] = addr;
737 		}
738 	}
739 
740 	if (dirty) {
741 		unmap_mapping_range(obj->filp->f_mapping, 0,
742 				    omap_gem_mmap_size(obj), 1);
743 	}
744 }
745 
omap_gem_pin_tiler(struct drm_gem_object * obj)746 static int omap_gem_pin_tiler(struct drm_gem_object *obj)
747 {
748 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
749 	u32 npages = obj->size >> PAGE_SHIFT;
750 	enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
751 	struct tiler_block *block;
752 	int ret;
753 
754 	BUG_ON(omap_obj->block);
755 
756 	if (omap_obj->flags & OMAP_BO_TILED_MASK) {
757 		block = tiler_reserve_2d(fmt, omap_obj->width, omap_obj->height,
758 					 PAGE_SIZE);
759 	} else {
760 		block = tiler_reserve_1d(obj->size);
761 	}
762 
763 	if (IS_ERR(block)) {
764 		ret = PTR_ERR(block);
765 		dev_err(obj->dev->dev, "could not remap: %d (%d)\n", ret, fmt);
766 		goto fail;
767 	}
768 
769 	/* TODO: enable async refill.. */
770 	ret = tiler_pin(block, omap_obj->pages, npages, omap_obj->roll, true);
771 	if (ret) {
772 		tiler_release(block);
773 		dev_err(obj->dev->dev, "could not pin: %d\n", ret);
774 		goto fail;
775 	}
776 
777 	omap_obj->dma_addr = tiler_ssptr(block);
778 	omap_obj->block = block;
779 
780 	DBG("got dma address: %pad", &omap_obj->dma_addr);
781 
782 fail:
783 	return ret;
784 }
785 
786 /**
787  * omap_gem_pin() - Pin a GEM object in memory
788  * @obj: the GEM object
789  * @dma_addr: the DMA address
790  *
791  * Pin the given GEM object in memory and fill the dma_addr pointer with the
792  * object's DMA address. If the buffer is not physically contiguous it will be
793  * remapped through the TILER to provide a contiguous view.
794  *
795  * Pins are reference-counted, calling this function multiple times is allowed
796  * as long the corresponding omap_gem_unpin() calls are balanced.
797  *
798  * Return 0 on success or a negative error code otherwise.
799  */
omap_gem_pin(struct drm_gem_object * obj,dma_addr_t * dma_addr)800 int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
801 {
802 	struct omap_drm_private *priv = obj->dev->dev_private;
803 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
804 	int ret = 0;
805 
806 	mutex_lock(&omap_obj->lock);
807 
808 	if (!omap_gem_is_contiguous(omap_obj)) {
809 		if (refcount_read(&omap_obj->pin_cnt) == 0) {
810 
811 			refcount_set(&omap_obj->pin_cnt, 1);
812 
813 			ret = omap_gem_attach_pages(obj);
814 			if (ret)
815 				goto fail;
816 
817 			if (omap_obj->flags & OMAP_BO_SCANOUT) {
818 				if (priv->has_dmm) {
819 					ret = omap_gem_pin_tiler(obj);
820 					if (ret)
821 						goto fail;
822 				}
823 			}
824 		} else {
825 			refcount_inc(&omap_obj->pin_cnt);
826 		}
827 	}
828 
829 	if (dma_addr)
830 		*dma_addr = omap_obj->dma_addr;
831 
832 fail:
833 	mutex_unlock(&omap_obj->lock);
834 
835 	return ret;
836 }
837 
838 /**
839  * omap_gem_unpin_locked() - Unpin a GEM object from memory
840  * @obj: the GEM object
841  *
842  * omap_gem_unpin() without locking.
843  */
omap_gem_unpin_locked(struct drm_gem_object * obj)844 static void omap_gem_unpin_locked(struct drm_gem_object *obj)
845 {
846 	struct omap_drm_private *priv = obj->dev->dev_private;
847 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
848 	int ret;
849 
850 	if (omap_gem_is_contiguous(omap_obj))
851 		return;
852 
853 	if (refcount_dec_and_test(&omap_obj->pin_cnt)) {
854 		if (omap_obj->sgt) {
855 			sg_free_table(omap_obj->sgt);
856 			kfree(omap_obj->sgt);
857 			omap_obj->sgt = NULL;
858 		}
859 		if (!(omap_obj->flags & OMAP_BO_SCANOUT))
860 			return;
861 		if (priv->has_dmm) {
862 			ret = tiler_unpin(omap_obj->block);
863 			if (ret) {
864 				dev_err(obj->dev->dev,
865 					"could not unpin pages: %d\n", ret);
866 			}
867 			ret = tiler_release(omap_obj->block);
868 			if (ret) {
869 				dev_err(obj->dev->dev,
870 					"could not release unmap: %d\n", ret);
871 			}
872 			omap_obj->dma_addr = 0;
873 			omap_obj->block = NULL;
874 		}
875 	}
876 }
877 
878 /**
879  * omap_gem_unpin() - Unpin a GEM object from memory
880  * @obj: the GEM object
881  *
882  * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are
883  * reference-counted, the actual unpin will only be performed when the number
884  * of calls to this function matches the number of calls to omap_gem_pin().
885  */
omap_gem_unpin(struct drm_gem_object * obj)886 void omap_gem_unpin(struct drm_gem_object *obj)
887 {
888 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
889 
890 	mutex_lock(&omap_obj->lock);
891 	omap_gem_unpin_locked(obj);
892 	mutex_unlock(&omap_obj->lock);
893 }
894 
895 /* Get rotated scanout address (only valid if already pinned), at the
896  * specified orientation and x,y offset from top-left corner of buffer
897  * (only valid for tiled 2d buffers)
898  */
omap_gem_rotated_dma_addr(struct drm_gem_object * obj,u32 orient,int x,int y,dma_addr_t * dma_addr)899 int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
900 		int x, int y, dma_addr_t *dma_addr)
901 {
902 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
903 	int ret = -EINVAL;
904 
905 	mutex_lock(&omap_obj->lock);
906 
907 	if ((refcount_read(&omap_obj->pin_cnt) > 0) && omap_obj->block &&
908 			(omap_obj->flags & OMAP_BO_TILED_MASK)) {
909 		*dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
910 		ret = 0;
911 	}
912 
913 	mutex_unlock(&omap_obj->lock);
914 
915 	return ret;
916 }
917 
918 /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
omap_gem_tiled_stride(struct drm_gem_object * obj,u32 orient)919 int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
920 {
921 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
922 	int ret = -EINVAL;
923 	if (omap_obj->flags & OMAP_BO_TILED_MASK)
924 		ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
925 	return ret;
926 }
927 
928 /* if !remap, and we don't have pages backing, then fail, rather than
929  * increasing the pin count (which we don't really do yet anyways,
930  * because we don't support swapping pages back out).  And 'remap'
931  * might not be quite the right name, but I wanted to keep it working
932  * similarly to omap_gem_pin().  Note though that mutex is not
933  * aquired if !remap (because this can be called in atomic ctxt),
934  * but probably omap_gem_unpin() should be changed to work in the
935  * same way.  If !remap, a matching omap_gem_put_pages() call is not
936  * required (and should not be made).
937  */
omap_gem_get_pages(struct drm_gem_object * obj,struct page *** pages,bool remap)938 int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
939 		bool remap)
940 {
941 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
942 	int ret = 0;
943 
944 	mutex_lock(&omap_obj->lock);
945 
946 	if (remap) {
947 		ret = omap_gem_attach_pages(obj);
948 		if (ret)
949 			goto unlock;
950 	}
951 
952 	if (!omap_obj->pages) {
953 		ret = -ENOMEM;
954 		goto unlock;
955 	}
956 
957 	*pages = omap_obj->pages;
958 
959 unlock:
960 	mutex_unlock(&omap_obj->lock);
961 
962 	return ret;
963 }
964 
965 /* release pages when DMA no longer being performed */
omap_gem_put_pages(struct drm_gem_object * obj)966 int omap_gem_put_pages(struct drm_gem_object *obj)
967 {
968 	/* do something here if we dynamically attach/detach pages.. at
969 	 * least they would no longer need to be pinned if everyone has
970 	 * released the pages..
971 	 */
972 	return 0;
973 }
974 
omap_gem_get_sg(struct drm_gem_object * obj,enum dma_data_direction dir)975 struct sg_table *omap_gem_get_sg(struct drm_gem_object *obj,
976 		enum dma_data_direction dir)
977 {
978 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
979 	dma_addr_t addr;
980 	struct sg_table *sgt;
981 	struct scatterlist *sg;
982 	unsigned int count, len, stride, i;
983 	int ret;
984 
985 	ret = omap_gem_pin(obj, &addr);
986 	if (ret)
987 		return ERR_PTR(ret);
988 
989 	mutex_lock(&omap_obj->lock);
990 
991 	sgt = omap_obj->sgt;
992 	if (sgt)
993 		goto out;
994 
995 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
996 	if (!sgt) {
997 		ret = -ENOMEM;
998 		goto err_unpin;
999 	}
1000 
1001 	if (addr) {
1002 		if (omap_obj->flags & OMAP_BO_TILED_MASK) {
1003 			enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
1004 
1005 			len = omap_obj->width << (int)fmt;
1006 			count = omap_obj->height;
1007 			stride = tiler_stride(fmt, 0);
1008 		} else {
1009 			len = obj->size;
1010 			count = 1;
1011 			stride = 0;
1012 		}
1013 	} else {
1014 		count = obj->size >> PAGE_SHIFT;
1015 	}
1016 
1017 	ret = sg_alloc_table(sgt, count, GFP_KERNEL);
1018 	if (ret)
1019 		goto err_free;
1020 
1021 	/* this must be after omap_gem_pin() to ensure we have pages attached */
1022 	omap_gem_dma_sync_buffer(obj, dir);
1023 
1024 	if (addr) {
1025 		for_each_sg(sgt->sgl, sg, count, i) {
1026 			sg_set_page(sg, pfn_to_page(__phys_to_pfn(addr)),
1027 				    len, offset_in_page(addr));
1028 			sg_dma_address(sg) = addr;
1029 			sg_dma_len(sg) = len;
1030 
1031 			addr += stride;
1032 		}
1033 	} else {
1034 		for_each_sg(sgt->sgl, sg, count, i) {
1035 			sg_set_page(sg, omap_obj->pages[i], PAGE_SIZE, 0);
1036 			sg_dma_address(sg) = omap_obj->dma_addrs[i];
1037 			sg_dma_len(sg) =  PAGE_SIZE;
1038 		}
1039 	}
1040 
1041 	omap_obj->sgt = sgt;
1042 out:
1043 	mutex_unlock(&omap_obj->lock);
1044 	return sgt;
1045 
1046 err_free:
1047 	kfree(sgt);
1048 err_unpin:
1049 	mutex_unlock(&omap_obj->lock);
1050 	omap_gem_unpin(obj);
1051 	return ERR_PTR(ret);
1052 }
1053 
omap_gem_put_sg(struct drm_gem_object * obj,struct sg_table * sgt)1054 void omap_gem_put_sg(struct drm_gem_object *obj, struct sg_table *sgt)
1055 {
1056 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1057 
1058 	if (WARN_ON(omap_obj->sgt != sgt))
1059 		return;
1060 
1061 	omap_gem_unpin(obj);
1062 }
1063 
1064 #ifdef CONFIG_DRM_FBDEV_EMULATION
1065 /*
1066  * Get kernel virtual address for CPU access.. this more or less only
1067  * exists for omap_fbdev.
1068  */
omap_gem_vaddr(struct drm_gem_object * obj)1069 void *omap_gem_vaddr(struct drm_gem_object *obj)
1070 {
1071 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1072 	void *vaddr;
1073 	int ret;
1074 
1075 	mutex_lock(&omap_obj->lock);
1076 
1077 	if (!omap_obj->vaddr) {
1078 		ret = omap_gem_attach_pages(obj);
1079 		if (ret) {
1080 			vaddr = ERR_PTR(ret);
1081 			goto unlock;
1082 		}
1083 
1084 		omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
1085 				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
1086 	}
1087 
1088 	vaddr = omap_obj->vaddr;
1089 
1090 unlock:
1091 	mutex_unlock(&omap_obj->lock);
1092 	return vaddr;
1093 }
1094 #endif
1095 
1096 /* -----------------------------------------------------------------------------
1097  * Power Management
1098  */
1099 
1100 #ifdef CONFIG_PM
1101 /* re-pin objects in DMM in resume path: */
omap_gem_resume(struct drm_device * dev)1102 int omap_gem_resume(struct drm_device *dev)
1103 {
1104 	struct omap_drm_private *priv = dev->dev_private;
1105 	struct omap_gem_object *omap_obj;
1106 	int ret = 0;
1107 
1108 	mutex_lock(&priv->list_lock);
1109 	list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
1110 		if (omap_obj->block) {
1111 			struct drm_gem_object *obj = &omap_obj->base;
1112 			u32 npages = obj->size >> PAGE_SHIFT;
1113 
1114 			WARN_ON(!omap_obj->pages);  /* this can't happen */
1115 			ret = tiler_pin(omap_obj->block,
1116 					omap_obj->pages, npages,
1117 					omap_obj->roll, true);
1118 			if (ret) {
1119 				dev_err(dev->dev, "could not repin: %d\n", ret);
1120 				goto done;
1121 			}
1122 		}
1123 	}
1124 
1125 done:
1126 	mutex_unlock(&priv->list_lock);
1127 	return ret;
1128 }
1129 #endif
1130 
1131 /* -----------------------------------------------------------------------------
1132  * DebugFS
1133  */
1134 
1135 #ifdef CONFIG_DEBUG_FS
omap_gem_describe(struct drm_gem_object * obj,struct seq_file * m)1136 void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1137 {
1138 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1139 	u64 off;
1140 
1141 	off = drm_vma_node_start(&obj->vma_node);
1142 
1143 	mutex_lock(&omap_obj->lock);
1144 
1145 	seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1146 			omap_obj->flags, obj->name, kref_read(&obj->refcount),
1147 			off, &omap_obj->dma_addr,
1148 			refcount_read(&omap_obj->pin_cnt),
1149 			omap_obj->vaddr, omap_obj->roll);
1150 
1151 	if (omap_obj->flags & OMAP_BO_TILED_MASK) {
1152 		seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
1153 		if (omap_obj->block) {
1154 			struct tcm_area *area = &omap_obj->block->area;
1155 			seq_printf(m, " (%dx%d, %dx%d)",
1156 					area->p0.x, area->p0.y,
1157 					area->p1.x, area->p1.y);
1158 		}
1159 	} else {
1160 		seq_printf(m, " %zu", obj->size);
1161 	}
1162 
1163 	mutex_unlock(&omap_obj->lock);
1164 
1165 	seq_printf(m, "\n");
1166 }
1167 
omap_gem_describe_objects(struct list_head * list,struct seq_file * m)1168 void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1169 {
1170 	struct omap_gem_object *omap_obj;
1171 	int count = 0;
1172 	size_t size = 0;
1173 
1174 	list_for_each_entry(omap_obj, list, mm_list) {
1175 		struct drm_gem_object *obj = &omap_obj->base;
1176 		seq_printf(m, "   ");
1177 		omap_gem_describe(obj, m);
1178 		count++;
1179 		size += obj->size;
1180 	}
1181 
1182 	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1183 }
1184 #endif
1185 
1186 /* -----------------------------------------------------------------------------
1187  * Constructor & Destructor
1188  */
1189 
omap_gem_free_object(struct drm_gem_object * obj)1190 static void omap_gem_free_object(struct drm_gem_object *obj)
1191 {
1192 	struct drm_device *dev = obj->dev;
1193 	struct omap_drm_private *priv = dev->dev_private;
1194 	struct omap_gem_object *omap_obj = to_omap_bo(obj);
1195 
1196 	omap_gem_evict(obj);
1197 
1198 	mutex_lock(&priv->list_lock);
1199 	list_del(&omap_obj->mm_list);
1200 	mutex_unlock(&priv->list_lock);
1201 
1202 	/*
1203 	 * We own the sole reference to the object at this point, but to keep
1204 	 * lockdep happy, we must still take the omap_obj_lock to call
1205 	 * omap_gem_detach_pages(). This should hardly make any difference as
1206 	 * there can't be any lock contention.
1207 	 */
1208 	mutex_lock(&omap_obj->lock);
1209 
1210 	/* The object should not be pinned. */
1211 	WARN_ON(refcount_read(&omap_obj->pin_cnt) > 0);
1212 
1213 	if (omap_obj->pages) {
1214 		if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
1215 			kfree(omap_obj->pages);
1216 		else
1217 			omap_gem_detach_pages(obj);
1218 	}
1219 
1220 	if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
1221 		dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
1222 			    omap_obj->dma_addr);
1223 	} else if (omap_obj->vaddr) {
1224 		vunmap(omap_obj->vaddr);
1225 	} else if (obj->import_attach) {
1226 		drm_prime_gem_destroy(obj, omap_obj->sgt);
1227 	}
1228 
1229 	mutex_unlock(&omap_obj->lock);
1230 
1231 	drm_gem_object_release(obj);
1232 
1233 	mutex_destroy(&omap_obj->lock);
1234 
1235 	kfree(omap_obj);
1236 }
1237 
omap_gem_validate_flags(struct drm_device * dev,u32 flags)1238 static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags)
1239 {
1240 	struct omap_drm_private *priv = dev->dev_private;
1241 
1242 	switch (flags & OMAP_BO_CACHE_MASK) {
1243 	case OMAP_BO_CACHED:
1244 	case OMAP_BO_WC:
1245 	case OMAP_BO_CACHE_MASK:
1246 		break;
1247 
1248 	default:
1249 		return false;
1250 	}
1251 
1252 	if (flags & OMAP_BO_TILED_MASK) {
1253 		if (!priv->usergart)
1254 			return false;
1255 
1256 		switch (flags & OMAP_BO_TILED_MASK) {
1257 		case OMAP_BO_TILED_8:
1258 		case OMAP_BO_TILED_16:
1259 		case OMAP_BO_TILED_32:
1260 			break;
1261 
1262 		default:
1263 			return false;
1264 		}
1265 	}
1266 
1267 	return true;
1268 }
1269 
1270 static const struct vm_operations_struct omap_gem_vm_ops = {
1271 	.fault = omap_gem_fault,
1272 	.open = drm_gem_vm_open,
1273 	.close = drm_gem_vm_close,
1274 };
1275 
1276 static const struct drm_gem_object_funcs omap_gem_object_funcs = {
1277 	.free = omap_gem_free_object,
1278 	.export = omap_gem_prime_export,
1279 	.mmap = omap_gem_object_mmap,
1280 	.vm_ops = &omap_gem_vm_ops,
1281 };
1282 
1283 /* GEM buffer object constructor */
omap_gem_new(struct drm_device * dev,union omap_gem_size gsize,u32 flags)1284 struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1285 		union omap_gem_size gsize, u32 flags)
1286 {
1287 	struct omap_drm_private *priv = dev->dev_private;
1288 	struct omap_gem_object *omap_obj;
1289 	struct drm_gem_object *obj;
1290 	struct address_space *mapping;
1291 	size_t size;
1292 	int ret;
1293 
1294 	if (!omap_gem_validate_flags(dev, flags))
1295 		return NULL;
1296 
1297 	/* Validate the flags and compute the memory and cache flags. */
1298 	if (flags & OMAP_BO_TILED_MASK) {
1299 		/*
1300 		 * Tiled buffers are always shmem paged backed. When they are
1301 		 * scanned out, they are remapped into DMM/TILER.
1302 		 */
1303 		flags |= OMAP_BO_MEM_SHMEM;
1304 
1305 		/*
1306 		 * Currently don't allow cached buffers. There is some caching
1307 		 * stuff that needs to be handled better.
1308 		 */
1309 		flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
1310 		flags |= tiler_get_cpu_cache_flags();
1311 	} else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1312 		/*
1313 		 * If we don't have DMM, we must allocate scanout buffers
1314 		 * from contiguous DMA memory.
1315 		 */
1316 		flags |= OMAP_BO_MEM_DMA_API;
1317 	} else if (!(flags & OMAP_BO_MEM_DMABUF)) {
1318 		/*
1319 		 * All other buffers not backed by dma_buf are shmem-backed.
1320 		 */
1321 		flags |= OMAP_BO_MEM_SHMEM;
1322 	}
1323 
1324 	/* Allocate the initialize the OMAP GEM object. */
1325 	omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1326 	if (!omap_obj)
1327 		return NULL;
1328 
1329 	obj = &omap_obj->base;
1330 	omap_obj->flags = flags;
1331 	mutex_init(&omap_obj->lock);
1332 
1333 	if (flags & OMAP_BO_TILED_MASK) {
1334 		/*
1335 		 * For tiled buffers align dimensions to slot boundaries and
1336 		 * calculate size based on aligned dimensions.
1337 		 */
1338 		tiler_align(gem2fmt(flags), &gsize.tiled.width,
1339 			    &gsize.tiled.height);
1340 
1341 		size = tiler_size(gem2fmt(flags), gsize.tiled.width,
1342 				  gsize.tiled.height);
1343 
1344 		omap_obj->width = gsize.tiled.width;
1345 		omap_obj->height = gsize.tiled.height;
1346 	} else {
1347 		size = PAGE_ALIGN(gsize.bytes);
1348 	}
1349 
1350 	obj->funcs = &omap_gem_object_funcs;
1351 
1352 	/* Initialize the GEM object. */
1353 	if (!(flags & OMAP_BO_MEM_SHMEM)) {
1354 		drm_gem_private_object_init(dev, obj, size);
1355 	} else {
1356 		ret = drm_gem_object_init(dev, obj, size);
1357 		if (ret)
1358 			goto err_free;
1359 
1360 		mapping = obj->filp->f_mapping;
1361 		mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
1362 	}
1363 
1364 	/* Allocate memory if needed. */
1365 	if (flags & OMAP_BO_MEM_DMA_API) {
1366 		omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
1367 					       &omap_obj->dma_addr,
1368 					       GFP_KERNEL);
1369 		if (!omap_obj->vaddr)
1370 			goto err_release;
1371 	}
1372 
1373 	mutex_lock(&priv->list_lock);
1374 	list_add(&omap_obj->mm_list, &priv->obj_list);
1375 	mutex_unlock(&priv->list_lock);
1376 
1377 	return obj;
1378 
1379 err_release:
1380 	drm_gem_object_release(obj);
1381 err_free:
1382 	kfree(omap_obj);
1383 	return NULL;
1384 }
1385 
omap_gem_new_dmabuf(struct drm_device * dev,size_t size,struct sg_table * sgt)1386 struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1387 					   struct sg_table *sgt)
1388 {
1389 	struct omap_drm_private *priv = dev->dev_private;
1390 	struct omap_gem_object *omap_obj;
1391 	struct drm_gem_object *obj;
1392 	union omap_gem_size gsize;
1393 
1394 	/* Without a DMM only physically contiguous buffers can be supported. */
1395 	if (!omap_gem_sgt_is_contiguous(sgt, size) && !priv->has_dmm)
1396 		return ERR_PTR(-EINVAL);
1397 
1398 	gsize.bytes = PAGE_ALIGN(size);
1399 	obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1400 	if (!obj)
1401 		return ERR_PTR(-ENOMEM);
1402 
1403 	omap_obj = to_omap_bo(obj);
1404 
1405 	mutex_lock(&omap_obj->lock);
1406 
1407 	omap_obj->sgt = sgt;
1408 
1409 	if (omap_gem_sgt_is_contiguous(sgt, size)) {
1410 		omap_obj->dma_addr = sg_dma_address(sgt->sgl);
1411 	} else {
1412 		/* Create pages list from sgt */
1413 		struct page **pages;
1414 		unsigned int npages;
1415 		unsigned int ret;
1416 
1417 		npages = DIV_ROUND_UP(size, PAGE_SIZE);
1418 		pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1419 		if (!pages) {
1420 			omap_gem_free_object(obj);
1421 			obj = ERR_PTR(-ENOMEM);
1422 			goto done;
1423 		}
1424 
1425 		omap_obj->pages = pages;
1426 		ret = drm_prime_sg_to_page_array(sgt, pages, npages);
1427 		if (ret) {
1428 			omap_gem_free_object(obj);
1429 			obj = ERR_PTR(-ENOMEM);
1430 			goto done;
1431 		}
1432 	}
1433 
1434 done:
1435 	mutex_unlock(&omap_obj->lock);
1436 	return obj;
1437 }
1438 
1439 /* convenience method to construct a GEM buffer object, and userspace handle */
omap_gem_new_handle(struct drm_device * dev,struct drm_file * file,union omap_gem_size gsize,u32 flags,u32 * handle)1440 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1441 		union omap_gem_size gsize, u32 flags, u32 *handle)
1442 {
1443 	struct drm_gem_object *obj;
1444 	int ret;
1445 
1446 	obj = omap_gem_new(dev, gsize, flags);
1447 	if (!obj)
1448 		return -ENOMEM;
1449 
1450 	ret = drm_gem_handle_create(file, obj, handle);
1451 	if (ret) {
1452 		omap_gem_free_object(obj);
1453 		return ret;
1454 	}
1455 
1456 	/* drop reference from allocate - handle holds it now */
1457 	drm_gem_object_put(obj);
1458 
1459 	return 0;
1460 }
1461 
1462 /* -----------------------------------------------------------------------------
1463  * Init & Cleanup
1464  */
1465 
1466 /* If DMM is used, we need to set some stuff up.. */
omap_gem_init(struct drm_device * dev)1467 void omap_gem_init(struct drm_device *dev)
1468 {
1469 	struct omap_drm_private *priv = dev->dev_private;
1470 	struct omap_drm_usergart *usergart;
1471 	const enum tiler_fmt fmts[] = {
1472 			TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1473 	};
1474 	int i, j;
1475 
1476 	if (!dmm_is_available()) {
1477 		/* DMM only supported on OMAP4 and later, so this isn't fatal */
1478 		dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1479 		return;
1480 	}
1481 
1482 	usergart = kcalloc(3, sizeof(*usergart), GFP_KERNEL);
1483 	if (!usergart)
1484 		return;
1485 
1486 	/* reserve 4k aligned/wide regions for userspace mappings: */
1487 	for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1488 		u16 h = 1, w = PAGE_SIZE >> i;
1489 
1490 		tiler_align(fmts[i], &w, &h);
1491 		/* note: since each region is 1 4kb page wide, and minimum
1492 		 * number of rows, the height ends up being the same as the
1493 		 * # of pages in the region
1494 		 */
1495 		usergart[i].height = h;
1496 		usergart[i].height_shift = ilog2(h);
1497 		usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1498 		usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1499 		for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1500 			struct omap_drm_usergart_entry *entry;
1501 			struct tiler_block *block;
1502 
1503 			entry = &usergart[i].entry[j];
1504 			block = tiler_reserve_2d(fmts[i], w, h, PAGE_SIZE);
1505 			if (IS_ERR(block)) {
1506 				dev_err(dev->dev,
1507 						"reserve failed: %d, %d, %ld\n",
1508 						i, j, PTR_ERR(block));
1509 				return;
1510 			}
1511 			entry->dma_addr = tiler_ssptr(block);
1512 			entry->block = block;
1513 
1514 			DBG("%d:%d: %dx%d: dma_addr=%pad stride=%d", i, j, w, h,
1515 					&entry->dma_addr,
1516 					usergart[i].stride_pfn << PAGE_SHIFT);
1517 		}
1518 	}
1519 
1520 	priv->usergart = usergart;
1521 	priv->has_dmm = true;
1522 }
1523 
omap_gem_deinit(struct drm_device * dev)1524 void omap_gem_deinit(struct drm_device *dev)
1525 {
1526 	struct omap_drm_private *priv = dev->dev_private;
1527 
1528 	/* I believe we can rely on there being no more outstanding GEM
1529 	 * objects which could depend on usergart/dmm at this point.
1530 	 */
1531 	kfree(priv->usergart);
1532 }
1533