xref: /linux/drivers/gpu/drm/exynos/exynos_drm_gem.h (revision e0bf6c5ca2d3281f231c5f0c9bf145e9513644de)
1 /* exynos_drm_gem.h
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Authoer: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11 
12 #ifndef _EXYNOS_DRM_GEM_H_
13 #define _EXYNOS_DRM_GEM_H_
14 
15 #include <drm/drm_gem.h>
16 
17 #define to_exynos_gem_obj(x)	container_of(x,\
18 			struct exynos_drm_gem_obj, base)
19 
20 #define IS_NONCONTIG_BUFFER(f)		(f & EXYNOS_BO_NONCONTIG)
21 
22 /*
23  * exynos drm gem buffer structure.
24  *
25  * @cookie: cookie returned by dma_alloc_attrs
26  * @kvaddr: kernel virtual address to allocated memory region.
27  * *userptr: user space address.
28  * @dma_addr: bus address(accessed by dma) to allocated memory region.
29  *	- this address could be physical address without IOMMU and
30  *	device address with IOMMU.
31  * @write: whether pages will be written to by the caller.
32  * @pages: Array of backing pages.
33  * @sgt: sg table to transfer page data.
34  * @size: size of allocated memory region.
35  * @pfnmap: indicate whether memory region from userptr is mmaped with
36  *	VM_PFNMAP or not.
37  */
38 struct exynos_drm_gem_buf {
39 	void 			*cookie;
40 	void __iomem		*kvaddr;
41 	unsigned long		userptr;
42 	dma_addr_t		dma_addr;
43 	struct dma_attrs	dma_attrs;
44 	unsigned int		write;
45 	struct page		**pages;
46 	struct sg_table		*sgt;
47 	unsigned long		size;
48 	bool			pfnmap;
49 };
50 
51 /*
52  * exynos drm buffer structure.
53  *
54  * @base: a gem object.
55  *	- a new handle to this gem object would be created
56  *	by drm_gem_handle_create().
57  * @buffer: a pointer to exynos_drm_gem_buffer object.
58  *	- contain the information to memory region allocated
59  *	by user request or at framebuffer creation.
60  *	continuous memory region allocated by user request
61  *	or at framebuffer creation.
62  * @size: size requested from user, in bytes and this size is aligned
63  *	in page unit.
64  * @vma: a pointer to vm_area.
65  * @flags: indicate memory type to allocated buffer and cache attruibute.
66  *
67  * P.S. this object would be transferred to user as kms_bo.handle so
68  *	user can access the buffer through kms_bo.handle.
69  */
70 struct exynos_drm_gem_obj {
71 	struct drm_gem_object		base;
72 	struct exynos_drm_gem_buf	*buffer;
73 	unsigned long			size;
74 	struct vm_area_struct		*vma;
75 	unsigned int			flags;
76 };
77 
78 struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
79 
80 /* destroy a buffer with gem object */
81 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
82 
83 /* create a private gem object and initialize it. */
84 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
85 						      unsigned long size);
86 
87 /* create a new buffer with gem object */
88 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
89 						unsigned int flags,
90 						unsigned long size);
91 
92 /*
93  * request gem object creation and buffer allocation as the size
94  * that it is calculated with framebuffer information such as width,
95  * height and bpp.
96  */
97 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
98 				struct drm_file *file_priv);
99 
100 /*
101  * get dma address from gem handle and this function could be used for
102  * other drivers such as 2d/3d acceleration drivers.
103  * with this function call, gem object reference count would be increased.
104  */
105 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
106 					unsigned int gem_handle,
107 					struct drm_file *filp);
108 
109 /*
110  * put dma address from gem handle and this function could be used for
111  * other drivers such as 2d/3d acceleration drivers.
112  * with this function call, gem object reference count would be decreased.
113  */
114 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
115 					unsigned int gem_handle,
116 					struct drm_file *filp);
117 
118 /* map user space allocated by malloc to pages. */
119 int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
120 				      struct drm_file *file_priv);
121 
122 /* get buffer information to memory region allocated by gem. */
123 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
124 				      struct drm_file *file_priv);
125 
126 /* get buffer size to gem handle. */
127 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
128 						unsigned int gem_handle,
129 						struct drm_file *file_priv);
130 
131 /* free gem object. */
132 void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
133 
134 /* create memory region for drm framebuffer. */
135 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
136 			       struct drm_device *dev,
137 			       struct drm_mode_create_dumb *args);
138 
139 /* map memory region for drm framebuffer to user space. */
140 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
141 				   struct drm_device *dev, uint32_t handle,
142 				   uint64_t *offset);
143 
144 /* page fault handler and mmap fault address(virtual) to physical memory. */
145 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
146 
147 /* set vm_flags and we can change the vm attribute to other one at here. */
148 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
149 
150 static inline int vma_is_io(struct vm_area_struct *vma)
151 {
152 	return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
153 }
154 
155 /* get a copy of a virtual memory region. */
156 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
157 
158 /* release a userspace virtual memory area. */
159 void exynos_gem_put_vma(struct vm_area_struct *vma);
160 
161 /* get pages from user space. */
162 int exynos_gem_get_pages_from_userptr(unsigned long start,
163 						unsigned int npages,
164 						struct page **pages,
165 						struct vm_area_struct *vma);
166 
167 /* drop the reference to pages. */
168 void exynos_gem_put_pages_to_userptr(struct page **pages,
169 					unsigned int npages,
170 					struct vm_area_struct *vma);
171 
172 /* map sgt with dma region. */
173 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
174 				struct sg_table *sgt,
175 				enum dma_data_direction dir);
176 
177 /* unmap sgt from dma region. */
178 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
179 				struct sg_table *sgt,
180 				enum dma_data_direction dir);
181 
182 #endif
183