xref: /linux/drivers/gpu/drm/exynos/exynos_drm_gem.h (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /* exynos_drm_gem.h
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Authoer: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11 
12 #ifndef _EXYNOS_DRM_GEM_H_
13 #define _EXYNOS_DRM_GEM_H_
14 
15 #include <drm/drm_gem.h>
16 
17 #define to_exynos_gem_obj(x)	container_of(x,\
18 			struct exynos_drm_gem_obj, base)
19 
20 #define IS_NONCONTIG_BUFFER(f)		(f & EXYNOS_BO_NONCONTIG)
21 
22 /*
23  * exynos drm buffer structure.
24  *
25  * @base: a gem object.
26  *	- a new handle to this gem object would be created
27  *	by drm_gem_handle_create().
28  * @buffer: a pointer to exynos_drm_gem_buffer object.
29  *	- contain the information to memory region allocated
30  *	by user request or at framebuffer creation.
31  *	continuous memory region allocated by user request
32  *	or at framebuffer creation.
33  * @flags: indicate memory type to allocated buffer and cache attruibute.
34  * @size: size requested from user, in bytes and this size is aligned
35  *	in page unit.
36  * @cookie: cookie returned by dma_alloc_attrs
37  * @kvaddr: kernel virtual address to allocated memory region.
38  * @dma_addr: bus address(accessed by dma) to allocated memory region.
39  *	- this address could be physical address without IOMMU and
40  *	device address with IOMMU.
41  * @pages: Array of backing pages.
42  * @sgt: Imported sg_table.
43  *
44  * P.S. this object would be transferred to user as kms_bo.handle so
45  *	user can access the buffer through kms_bo.handle.
46  */
47 struct exynos_drm_gem_obj {
48 	struct drm_gem_object	base;
49 	unsigned int		flags;
50 	unsigned long		size;
51 	void			*cookie;
52 	void __iomem		*kvaddr;
53 	dma_addr_t		dma_addr;
54 	struct dma_attrs	dma_attrs;
55 	struct page		**pages;
56 	struct sg_table		*sgt;
57 };
58 
59 struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
60 
61 /* destroy a buffer with gem object */
62 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
63 
64 /* create a new buffer with gem object */
65 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
66 						unsigned int flags,
67 						unsigned long size);
68 
69 /*
70  * request gem object creation and buffer allocation as the size
71  * that it is calculated with framebuffer information such as width,
72  * height and bpp.
73  */
74 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
75 				struct drm_file *file_priv);
76 
77 /*
78  * get dma address from gem handle and this function could be used for
79  * other drivers such as 2d/3d acceleration drivers.
80  * with this function call, gem object reference count would be increased.
81  */
82 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
83 					unsigned int gem_handle,
84 					struct drm_file *filp);
85 
86 /*
87  * put dma address from gem handle and this function could be used for
88  * other drivers such as 2d/3d acceleration drivers.
89  * with this function call, gem object reference count would be decreased.
90  */
91 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
92 					unsigned int gem_handle,
93 					struct drm_file *filp);
94 
95 /* map user space allocated by malloc to pages. */
96 int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
97 				      struct drm_file *file_priv);
98 
99 /* get buffer information to memory region allocated by gem. */
100 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
101 				      struct drm_file *file_priv);
102 
103 /* get buffer size to gem handle. */
104 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
105 						unsigned int gem_handle,
106 						struct drm_file *file_priv);
107 
108 /* free gem object. */
109 void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
110 
111 /* create memory region for drm framebuffer. */
112 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
113 			       struct drm_device *dev,
114 			       struct drm_mode_create_dumb *args);
115 
116 /* map memory region for drm framebuffer to user space. */
117 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
118 				   struct drm_device *dev, uint32_t handle,
119 				   uint64_t *offset);
120 
121 /* page fault handler and mmap fault address(virtual) to physical memory. */
122 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
123 
124 /* set vm_flags and we can change the vm attribute to other one at here. */
125 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
126 
127 static inline int vma_is_io(struct vm_area_struct *vma)
128 {
129 	return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
130 }
131 
132 /* get a copy of a virtual memory region. */
133 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
134 
135 /* release a userspace virtual memory area. */
136 void exynos_gem_put_vma(struct vm_area_struct *vma);
137 
138 /* get pages from user space. */
139 int exynos_gem_get_pages_from_userptr(unsigned long start,
140 						unsigned int npages,
141 						struct page **pages,
142 						struct vm_area_struct *vma);
143 
144 /* drop the reference to pages. */
145 void exynos_gem_put_pages_to_userptr(struct page **pages,
146 					unsigned int npages,
147 					struct vm_area_struct *vma);
148 
149 /* map sgt with dma region. */
150 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
151 				struct sg_table *sgt,
152 				enum dma_data_direction dir);
153 
154 /* unmap sgt from dma region. */
155 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
156 				struct sg_table *sgt,
157 				enum dma_data_direction dir);
158 
159 /* low-level interface prime helpers */
160 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj);
161 struct drm_gem_object *
162 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
163 				     struct dma_buf_attachment *attach,
164 				     struct sg_table *sgt);
165 void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj);
166 void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
167 
168 #endif
169