xref: /linux/drivers/gpu/drm/exynos/exynos_drm_gem.h (revision b889fcf63cb62e7fdb7816565e28f44dbe4a76a5)
1 /* exynos_drm_gem.h
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Authoer: Inki Dae <inki.dae@samsung.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  */
25 
26 #ifndef _EXYNOS_DRM_GEM_H_
27 #define _EXYNOS_DRM_GEM_H_
28 
29 #define to_exynos_gem_obj(x)	container_of(x,\
30 			struct exynos_drm_gem_obj, base)
31 
32 #define IS_NONCONTIG_BUFFER(f)		(f & EXYNOS_BO_NONCONTIG)
33 
34 /*
35  * exynos drm gem buffer structure.
36  *
37  * @kvaddr: kernel virtual address to allocated memory region.
38  * *userptr: user space address.
39  * @dma_addr: bus address(accessed by dma) to allocated memory region.
40  *	- this address could be physical address without IOMMU and
41  *	device address with IOMMU.
42  * @write: whether pages will be written to by the caller.
43  * @pages: Array of backing pages.
44  * @sgt: sg table to transfer page data.
45  * @size: size of allocated memory region.
46  * @pfnmap: indicate whether memory region from userptr is mmaped with
47  *	VM_PFNMAP or not.
48  */
49 struct exynos_drm_gem_buf {
50 	void __iomem		*kvaddr;
51 	unsigned long		userptr;
52 	dma_addr_t		dma_addr;
53 	struct dma_attrs	dma_attrs;
54 	unsigned int		write;
55 	struct page		**pages;
56 	struct sg_table		*sgt;
57 	unsigned long		size;
58 	bool			pfnmap;
59 };
60 
61 /*
62  * exynos drm buffer structure.
63  *
64  * @base: a gem object.
65  *	- a new handle to this gem object would be created
66  *	by drm_gem_handle_create().
67  * @buffer: a pointer to exynos_drm_gem_buffer object.
68  *	- contain the information to memory region allocated
69  *	by user request or at framebuffer creation.
70  *	continuous memory region allocated by user request
71  *	or at framebuffer creation.
72  * @size: size requested from user, in bytes and this size is aligned
73  *	in page unit.
74  * @vma: a pointer to vm_area.
75  * @flags: indicate memory type to allocated buffer and cache attruibute.
76  *
77  * P.S. this object would be transfered to user as kms_bo.handle so
78  *	user can access the buffer through kms_bo.handle.
79  */
80 struct exynos_drm_gem_obj {
81 	struct drm_gem_object		base;
82 	struct exynos_drm_gem_buf	*buffer;
83 	unsigned long			size;
84 	struct vm_area_struct		*vma;
85 	unsigned int			flags;
86 };
87 
88 struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
89 
90 /* destroy a buffer with gem object */
91 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
92 
93 /* create a private gem object and initialize it. */
94 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
95 						      unsigned long size);
96 
97 /* create a new buffer with gem object */
98 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
99 						unsigned int flags,
100 						unsigned long size);
101 
102 /*
103  * request gem object creation and buffer allocation as the size
104  * that it is calculated with framebuffer information such as width,
105  * height and bpp.
106  */
107 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
108 				struct drm_file *file_priv);
109 
110 /*
111  * get dma address from gem handle and this function could be used for
112  * other drivers such as 2d/3d acceleration drivers.
113  * with this function call, gem object reference count would be increased.
114  */
115 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
116 					unsigned int gem_handle,
117 					struct drm_file *filp);
118 
119 /*
120  * put dma address from gem handle and this function could be used for
121  * other drivers such as 2d/3d acceleration drivers.
122  * with this function call, gem object reference count would be decreased.
123  */
124 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
125 					unsigned int gem_handle,
126 					struct drm_file *filp);
127 
128 /* get buffer offset to map to user space. */
129 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
130 				    struct drm_file *file_priv);
131 
132 /*
133  * mmap the physically continuous memory that a gem object contains
134  * to user space.
135  */
136 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
137 			      struct drm_file *file_priv);
138 
139 /* map user space allocated by malloc to pages. */
140 int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
141 				      struct drm_file *file_priv);
142 
143 /* get buffer information to memory region allocated by gem. */
144 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
145 				      struct drm_file *file_priv);
146 
147 /* initialize gem object. */
148 int exynos_drm_gem_init_object(struct drm_gem_object *obj);
149 
150 /* free gem object. */
151 void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
152 
153 /* create memory region for drm framebuffer. */
154 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
155 			       struct drm_device *dev,
156 			       struct drm_mode_create_dumb *args);
157 
158 /* map memory region for drm framebuffer to user space. */
159 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
160 				   struct drm_device *dev, uint32_t handle,
161 				   uint64_t *offset);
162 
163 /*
164  * destroy memory region allocated.
165  *	- a gem handle and physical memory region pointed by a gem object
166  *	would be released by drm_gem_handle_delete().
167  */
168 int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
169 				struct drm_device *dev,
170 				unsigned int handle);
171 
172 /* page fault handler and mmap fault address(virtual) to physical memory. */
173 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
174 
175 /* set vm_flags and we can change the vm attribute to other one at here. */
176 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
177 
178 static inline int vma_is_io(struct vm_area_struct *vma)
179 {
180 	return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
181 }
182 
183 /* get a copy of a virtual memory region. */
184 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
185 
186 /* release a userspace virtual memory area. */
187 void exynos_gem_put_vma(struct vm_area_struct *vma);
188 
189 /* get pages from user space. */
190 int exynos_gem_get_pages_from_userptr(unsigned long start,
191 						unsigned int npages,
192 						struct page **pages,
193 						struct vm_area_struct *vma);
194 
195 /* drop the reference to pages. */
196 void exynos_gem_put_pages_to_userptr(struct page **pages,
197 					unsigned int npages,
198 					struct vm_area_struct *vma);
199 
200 /* map sgt with dma region. */
201 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
202 				struct sg_table *sgt,
203 				enum dma_data_direction dir);
204 
205 /* unmap sgt from dma region. */
206 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
207 				struct sg_table *sgt,
208 				enum dma_data_direction dir);
209 
210 #endif
211