xref: /linux/drivers/gpu/drm/exynos/exynos_drm_gem.h (revision 005438a8eef063495ac059d128eea71b58de50e5)
1 /* exynos_drm_gem.h
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Authoer: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11 
12 #ifndef _EXYNOS_DRM_GEM_H_
13 #define _EXYNOS_DRM_GEM_H_
14 
15 #include <drm/drm_gem.h>
16 
17 #define to_exynos_gem_obj(x)	container_of(x,\
18 			struct exynos_drm_gem_obj, base)
19 
20 #define IS_NONCONTIG_BUFFER(f)		(f & EXYNOS_BO_NONCONTIG)
21 
22 /*
23  * exynos drm gem buffer structure.
24  *
25  * @cookie: cookie returned by dma_alloc_attrs
26  * @kvaddr: kernel virtual address to allocated memory region.
27  * *userptr: user space address.
28  * @dma_addr: bus address(accessed by dma) to allocated memory region.
29  *	- this address could be physical address without IOMMU and
30  *	device address with IOMMU.
31  * @write: whether pages will be written to by the caller.
32  * @pages: Array of backing pages.
33  * @sgt: sg table to transfer page data.
34  * @size: size of allocated memory region.
35  * @pfnmap: indicate whether memory region from userptr is mmaped with
36  *	VM_PFNMAP or not.
37  */
38 struct exynos_drm_gem_buf {
39 	void 			*cookie;
40 	void __iomem		*kvaddr;
41 	unsigned long		userptr;
42 	dma_addr_t		dma_addr;
43 	struct dma_attrs	dma_attrs;
44 	unsigned int		write;
45 	struct page		**pages;
46 	struct sg_table		*sgt;
47 	unsigned long		size;
48 	bool			pfnmap;
49 };
50 
51 /*
52  * exynos drm buffer structure.
53  *
54  * @base: a gem object.
55  *	- a new handle to this gem object would be created
56  *	by drm_gem_handle_create().
57  * @buffer: a pointer to exynos_drm_gem_buffer object.
58  *	- contain the information to memory region allocated
59  *	by user request or at framebuffer creation.
60  *	continuous memory region allocated by user request
61  *	or at framebuffer creation.
62  * @size: size requested from user, in bytes and this size is aligned
63  *	in page unit.
64  * @flags: indicate memory type to allocated buffer and cache attruibute.
65  *
66  * P.S. this object would be transferred to user as kms_bo.handle so
67  *	user can access the buffer through kms_bo.handle.
68  */
69 struct exynos_drm_gem_obj {
70 	struct drm_gem_object		base;
71 	struct exynos_drm_gem_buf	*buffer;
72 	unsigned long			size;
73 	unsigned int			flags;
74 };
75 
76 struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
77 
78 /* destroy a buffer with gem object */
79 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
80 
81 /* create a private gem object and initialize it. */
82 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
83 						      unsigned long size);
84 
85 /* create a new buffer with gem object */
86 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
87 						unsigned int flags,
88 						unsigned long size);
89 
90 /*
91  * request gem object creation and buffer allocation as the size
92  * that it is calculated with framebuffer information such as width,
93  * height and bpp.
94  */
95 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
96 				struct drm_file *file_priv);
97 
98 /*
99  * get dma address from gem handle and this function could be used for
100  * other drivers such as 2d/3d acceleration drivers.
101  * with this function call, gem object reference count would be increased.
102  */
103 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
104 					unsigned int gem_handle,
105 					struct drm_file *filp);
106 
107 /*
108  * put dma address from gem handle and this function could be used for
109  * other drivers such as 2d/3d acceleration drivers.
110  * with this function call, gem object reference count would be decreased.
111  */
112 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
113 					unsigned int gem_handle,
114 					struct drm_file *filp);
115 
116 /* map user space allocated by malloc to pages. */
117 int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
118 				      struct drm_file *file_priv);
119 
120 /* get buffer information to memory region allocated by gem. */
121 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
122 				      struct drm_file *file_priv);
123 
124 /* get buffer size to gem handle. */
125 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
126 						unsigned int gem_handle,
127 						struct drm_file *file_priv);
128 
129 /* free gem object. */
130 void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
131 
132 /* create memory region for drm framebuffer. */
133 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
134 			       struct drm_device *dev,
135 			       struct drm_mode_create_dumb *args);
136 
137 /* map memory region for drm framebuffer to user space. */
138 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
139 				   struct drm_device *dev, uint32_t handle,
140 				   uint64_t *offset);
141 
142 /* page fault handler and mmap fault address(virtual) to physical memory. */
143 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
144 
145 /* set vm_flags and we can change the vm attribute to other one at here. */
146 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
147 
148 static inline int vma_is_io(struct vm_area_struct *vma)
149 {
150 	return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
151 }
152 
153 /* get a copy of a virtual memory region. */
154 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
155 
156 /* release a userspace virtual memory area. */
157 void exynos_gem_put_vma(struct vm_area_struct *vma);
158 
159 /* get pages from user space. */
160 int exynos_gem_get_pages_from_userptr(unsigned long start,
161 						unsigned int npages,
162 						struct page **pages,
163 						struct vm_area_struct *vma);
164 
165 /* drop the reference to pages. */
166 void exynos_gem_put_pages_to_userptr(struct page **pages,
167 					unsigned int npages,
168 					struct vm_area_struct *vma);
169 
170 /* map sgt with dma region. */
171 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
172 				struct sg_table *sgt,
173 				enum dma_data_direction dir);
174 
175 /* unmap sgt from dma region. */
176 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
177 				struct sg_table *sgt,
178 				enum dma_data_direction dir);
179 
180 #endif
181