xref: /linux/drivers/gpu/drm/msm/msm_gem.h (revision 55223394d56bab42ebac71ba52e0fd8bfdc6fc07)
1 /*
2  * Copyright (C) 2013 Red Hat
3  * Author: Rob Clark <robdclark@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #ifndef __MSM_GEM_H__
19 #define __MSM_GEM_H__
20 
21 #include <linux/kref.h>
22 #include <linux/reservation.h>
23 #include "msm_drv.h"
24 
25 /* Additional internal-use only BO flags: */
26 #define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
27 
28 struct msm_gem_address_space {
29 	const char *name;
30 	/* NOTE: mm managed at the page level, size is in # of pages
31 	 * and position mm_node->start is in # of pages:
32 	 */
33 	struct drm_mm mm;
34 	spinlock_t lock; /* Protects drm_mm node allocation/removal */
35 	struct msm_mmu *mmu;
36 	struct kref kref;
37 };
38 
39 struct msm_gem_vma {
40 	struct drm_mm_node node;
41 	uint64_t iova;
42 	struct msm_gem_address_space *aspace;
43 	struct list_head list;    /* node in msm_gem_object::vmas */
44 	bool mapped;
45 	int inuse;
46 };
47 
48 struct msm_gem_object {
49 	struct drm_gem_object base;
50 
51 	uint32_t flags;
52 
53 	/**
54 	 * Advice: are the backing pages purgeable?
55 	 */
56 	uint8_t madv;
57 
58 	/**
59 	 * count of active vmap'ing
60 	 */
61 	uint8_t vmap_count;
62 
63 	/* And object is either:
64 	 *  inactive - on priv->inactive_list
65 	 *  active   - on one one of the gpu's active_list..  well, at
66 	 *     least for now we don't have (I don't think) hw sync between
67 	 *     2d and 3d one devices which have both, meaning we need to
68 	 *     block on submit if a bo is already on other ring
69 	 *
70 	 */
71 	struct list_head mm_list;
72 	struct msm_gpu *gpu;     /* non-null if active */
73 
74 	/* Transiently in the process of submit ioctl, objects associated
75 	 * with the submit are on submit->bo_list.. this only lasts for
76 	 * the duration of the ioctl, so one bo can never be on multiple
77 	 * submit lists.
78 	 */
79 	struct list_head submit_entry;
80 
81 	struct page **pages;
82 	struct sg_table *sgt;
83 	void *vaddr;
84 
85 	struct list_head vmas;    /* list of msm_gem_vma */
86 
87 	struct llist_node freed;
88 
89 	/* normally (resv == &_resv) except for imported bo's */
90 	struct reservation_object *resv;
91 	struct reservation_object _resv;
92 
93 	/* For physically contiguous buffers.  Used when we don't have
94 	 * an IOMMU.  Also used for stolen/splashscreen buffer.
95 	 */
96 	struct drm_mm_node *vram_node;
97 	struct mutex lock; /* Protects resources associated with bo */
98 
99 	char name[32]; /* Identifier to print for the debugfs files */
100 };
101 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
102 
103 static inline bool is_active(struct msm_gem_object *msm_obj)
104 {
105 	return msm_obj->gpu != NULL;
106 }
107 
108 static inline bool is_purgeable(struct msm_gem_object *msm_obj)
109 {
110 	WARN_ON(!mutex_is_locked(&msm_obj->base.dev->struct_mutex));
111 	return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
112 			!msm_obj->base.dma_buf && !msm_obj->base.import_attach;
113 }
114 
115 static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
116 {
117 	return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
118 }
119 
120 /* The shrinker can be triggered while we hold objA->lock, and need
121  * to grab objB->lock to purge it.  Lockdep just sees these as a single
122  * class of lock, so we use subclasses to teach it the difference.
123  *
124  * OBJ_LOCK_NORMAL is implicit (ie. normal mutex_lock() call), and
125  * OBJ_LOCK_SHRINKER is used by shrinker.
126  *
127  * It is *essential* that we never go down paths that could trigger the
128  * shrinker for a purgable object.  This is ensured by checking that
129  * msm_obj->madv == MSM_MADV_WILLNEED.
130  */
131 enum msm_gem_lock {
132 	OBJ_LOCK_NORMAL,
133 	OBJ_LOCK_SHRINKER,
134 };
135 
136 void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass);
137 void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
138 void msm_gem_free_work(struct work_struct *work);
139 
140 /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
141  * associated with the cmdstream submission for synchronization (and
142  * make it easier to unwind when things go wrong, etc).  This only
143  * lasts for the duration of the submit-ioctl.
144  */
145 struct msm_gem_submit {
146 	struct drm_device *dev;
147 	struct msm_gpu *gpu;
148 	struct list_head node;   /* node in ring submit list */
149 	struct list_head bo_list;
150 	struct ww_acquire_ctx ticket;
151 	uint32_t seqno;		/* Sequence number of the submit on the ring */
152 	struct dma_fence *fence;
153 	struct msm_gpu_submitqueue *queue;
154 	struct pid *pid;    /* submitting process */
155 	bool valid;         /* true if no cmdstream patching needed */
156 	bool in_rb;         /* "sudo" mode, copy cmds into RB */
157 	struct msm_ringbuffer *ring;
158 	unsigned int nr_cmds;
159 	unsigned int nr_bos;
160 	u32 ident;	   /* A "identifier" for the submit for logging */
161 	struct {
162 		uint32_t type;
163 		uint32_t size;  /* in dwords */
164 		uint64_t iova;
165 		uint32_t idx;   /* cmdstream buffer idx in bos[] */
166 	} *cmd;  /* array of size nr_cmds */
167 	struct {
168 		uint32_t flags;
169 		union {
170 			struct msm_gem_object *obj;
171 			uint32_t handle;
172 		};
173 		uint64_t iova;
174 	} bos[0];
175 };
176 
177 #endif /* __MSM_GEM_H__ */
178