xref: /linux/drivers/gpu/drm/arm/display/komeda/komeda_kms.c (revision 95298d63c67673c654c08952672d016212b26054)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
4  * Author: James.Qian.Wang <james.qian.wang@arm.com>
5  *
6  */
7 #include <linux/component.h>
8 #include <linux/interrupt.h>
9 
10 #include <drm/drm_atomic.h>
11 #include <drm/drm_atomic_helper.h>
12 #include <drm/drm_drv.h>
13 #include <drm/drm_fb_helper.h>
14 #include <drm/drm_gem_cma_helper.h>
15 #include <drm/drm_gem_framebuffer_helper.h>
16 #include <drm/drm_irq.h>
17 #include <drm/drm_managed.h>
18 #include <drm/drm_probe_helper.h>
19 #include <drm/drm_vblank.h>
20 
21 #include "komeda_dev.h"
22 #include "komeda_framebuffer.h"
23 #include "komeda_kms.h"
24 
25 DEFINE_DRM_GEM_CMA_FOPS(komeda_cma_fops);
26 
27 static int komeda_gem_cma_dumb_create(struct drm_file *file,
28 				      struct drm_device *dev,
29 				      struct drm_mode_create_dumb *args)
30 {
31 	struct komeda_dev *mdev = dev->dev_private;
32 	u32 pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
33 
34 	args->pitch = ALIGN(pitch, mdev->chip.bus_width);
35 
36 	return drm_gem_cma_dumb_create_internal(file, dev, args);
37 }
38 
39 static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
40 {
41 	struct drm_device *drm = data;
42 	struct komeda_dev *mdev = drm->dev_private;
43 	struct komeda_kms_dev *kms = to_kdev(drm);
44 	struct komeda_events evts;
45 	irqreturn_t status;
46 	u32 i;
47 
48 	/* Call into the CHIP to recognize events */
49 	memset(&evts, 0, sizeof(evts));
50 	status = mdev->funcs->irq_handler(mdev, &evts);
51 
52 	komeda_print_events(&evts, drm);
53 
54 	/* Notify the crtc to handle the events */
55 	for (i = 0; i < kms->n_crtcs; i++)
56 		komeda_crtc_handle_event(&kms->crtcs[i], &evts);
57 
58 	return status;
59 }
60 
61 static struct drm_driver komeda_kms_driver = {
62 	.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
63 	.lastclose			= drm_fb_helper_lastclose,
64 	.gem_free_object_unlocked	= drm_gem_cma_free_object,
65 	.gem_vm_ops			= &drm_gem_cma_vm_ops,
66 	.dumb_create			= komeda_gem_cma_dumb_create,
67 	.prime_handle_to_fd		= drm_gem_prime_handle_to_fd,
68 	.prime_fd_to_handle		= drm_gem_prime_fd_to_handle,
69 	.gem_prime_get_sg_table		= drm_gem_cma_prime_get_sg_table,
70 	.gem_prime_import_sg_table	= drm_gem_cma_prime_import_sg_table,
71 	.gem_prime_vmap			= drm_gem_cma_prime_vmap,
72 	.gem_prime_vunmap		= drm_gem_cma_prime_vunmap,
73 	.gem_prime_mmap			= drm_gem_cma_prime_mmap,
74 	.fops = &komeda_cma_fops,
75 	.name = "komeda",
76 	.desc = "Arm Komeda Display Processor driver",
77 	.date = "20181101",
78 	.major = 0,
79 	.minor = 1,
80 };
81 
82 static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
83 {
84 	struct drm_device *dev = old_state->dev;
85 
86 	drm_atomic_helper_commit_modeset_disables(dev, old_state);
87 
88 	drm_atomic_helper_commit_planes(dev, old_state,
89 					DRM_PLANE_COMMIT_ACTIVE_ONLY);
90 
91 	drm_atomic_helper_commit_modeset_enables(dev, old_state);
92 
93 	drm_atomic_helper_wait_for_flip_done(dev, old_state);
94 
95 	drm_atomic_helper_commit_hw_done(old_state);
96 
97 	drm_atomic_helper_cleanup_planes(dev, old_state);
98 }
99 
100 static const struct drm_mode_config_helper_funcs komeda_mode_config_helpers = {
101 	.atomic_commit_tail = komeda_kms_commit_tail,
102 };
103 
104 static int komeda_plane_state_list_add(struct drm_plane_state *plane_st,
105 				       struct list_head *zorder_list)
106 {
107 	struct komeda_plane_state *new = to_kplane_st(plane_st);
108 	struct komeda_plane_state *node, *last;
109 
110 	last = list_empty(zorder_list) ?
111 	       NULL : list_last_entry(zorder_list, typeof(*last), zlist_node);
112 
113 	/* Considering the list sequence is zpos increasing, so if list is empty
114 	 * or the zpos of new node bigger than the last node in list, no need
115 	 * loop and just insert the new one to the tail of the list.
116 	 */
117 	if (!last || (new->base.zpos > last->base.zpos)) {
118 		list_add_tail(&new->zlist_node, zorder_list);
119 		return 0;
120 	}
121 
122 	/* Build the list by zpos increasing */
123 	list_for_each_entry(node, zorder_list, zlist_node) {
124 		if (new->base.zpos < node->base.zpos) {
125 			list_add_tail(&new->zlist_node, &node->zlist_node);
126 			break;
127 		} else if (node->base.zpos == new->base.zpos) {
128 			struct drm_plane *a = node->base.plane;
129 			struct drm_plane *b = new->base.plane;
130 
131 			/* Komeda doesn't support setting a same zpos for
132 			 * different planes.
133 			 */
134 			DRM_DEBUG_ATOMIC("PLANE: %s and PLANE: %s are configured same zpos: %d.\n",
135 					 a->name, b->name, node->base.zpos);
136 			return -EINVAL;
137 		}
138 	}
139 
140 	return 0;
141 }
142 
143 static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
144 				      struct drm_crtc_state *crtc_st)
145 {
146 	struct drm_atomic_state *state = crtc_st->state;
147 	struct komeda_crtc *kcrtc = to_kcrtc(crtc);
148 	struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st);
149 	struct komeda_plane_state *kplane_st;
150 	struct drm_plane_state *plane_st;
151 	struct drm_plane *plane;
152 	struct list_head zorder_list;
153 	int order = 0, err;
154 
155 	DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n",
156 			 crtc->base.id, crtc->name);
157 
158 	INIT_LIST_HEAD(&zorder_list);
159 
160 	/* This loop also added all effected planes into the new state */
161 	drm_for_each_plane_mask(plane, crtc->dev, crtc_st->plane_mask) {
162 		plane_st = drm_atomic_get_plane_state(state, plane);
163 		if (IS_ERR(plane_st))
164 			return PTR_ERR(plane_st);
165 
166 		/* Build a list by zpos increasing */
167 		err = komeda_plane_state_list_add(plane_st, &zorder_list);
168 		if (err)
169 			return err;
170 	}
171 
172 	kcrtc_st->max_slave_zorder = 0;
173 
174 	list_for_each_entry(kplane_st, &zorder_list, zlist_node) {
175 		plane_st = &kplane_st->base;
176 		plane = plane_st->plane;
177 
178 		plane_st->normalized_zpos = order++;
179 		/* When layer_split has been enabled, one plane will be handled
180 		 * by two separated komeda layers (left/right), which may needs
181 		 * two zorders.
182 		 * - zorder: for left_layer for left display part.
183 		 * - zorder + 1: will be reserved for right layer.
184 		 */
185 		if (to_kplane_st(plane_st)->layer_split)
186 			order++;
187 
188 		DRM_DEBUG_ATOMIC("[PLANE:%d:%s] zpos:%d, normalized zpos: %d\n",
189 				 plane->base.id, plane->name,
190 				 plane_st->zpos, plane_st->normalized_zpos);
191 
192 		/* calculate max slave zorder */
193 		if (has_bit(drm_plane_index(plane), kcrtc->slave_planes))
194 			kcrtc_st->max_slave_zorder =
195 				max(plane_st->normalized_zpos,
196 				    kcrtc_st->max_slave_zorder);
197 	}
198 
199 	crtc_st->zpos_changed = true;
200 
201 	return 0;
202 }
203 
204 static int komeda_kms_check(struct drm_device *dev,
205 			    struct drm_atomic_state *state)
206 {
207 	struct drm_crtc *crtc;
208 	struct drm_crtc_state *new_crtc_st;
209 	int i, err;
210 
211 	err = drm_atomic_helper_check_modeset(dev, state);
212 	if (err)
213 		return err;
214 
215 	/* Komeda need to re-calculate resource assumption in every commit
216 	 * so need to add all affected_planes (even unchanged) to
217 	 * drm_atomic_state.
218 	 */
219 	for_each_new_crtc_in_state(state, crtc, new_crtc_st, i) {
220 		err = drm_atomic_add_affected_planes(state, crtc);
221 		if (err)
222 			return err;
223 
224 		err = komeda_crtc_normalize_zpos(crtc, new_crtc_st);
225 		if (err)
226 			return err;
227 	}
228 
229 	err = drm_atomic_helper_check_planes(dev, state);
230 	if (err)
231 		return err;
232 
233 	return 0;
234 }
235 
236 static const struct drm_mode_config_funcs komeda_mode_config_funcs = {
237 	.fb_create		= komeda_fb_create,
238 	.atomic_check		= komeda_kms_check,
239 	.atomic_commit		= drm_atomic_helper_commit,
240 };
241 
242 static void komeda_kms_mode_config_init(struct komeda_kms_dev *kms,
243 					struct komeda_dev *mdev)
244 {
245 	struct drm_mode_config *config = &kms->base.mode_config;
246 
247 	drm_mode_config_init(&kms->base);
248 
249 	komeda_kms_setup_crtcs(kms, mdev);
250 
251 	/* Get value from dev */
252 	config->min_width	= 0;
253 	config->min_height	= 0;
254 	config->max_width	= 4096;
255 	config->max_height	= 4096;
256 	config->allow_fb_modifiers = true;
257 
258 	config->funcs = &komeda_mode_config_funcs;
259 	config->helper_private = &komeda_mode_config_helpers;
260 }
261 
262 struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
263 {
264 	struct komeda_kms_dev *kms;
265 	struct drm_device *drm;
266 	int err;
267 
268 	kms = devm_drm_dev_alloc(mdev->dev, &komeda_kms_driver,
269 				 struct komeda_kms_dev, base);
270 	if (IS_ERR(kms))
271 		return kms;
272 
273 	drm = &kms->base;
274 
275 	drm->dev_private = mdev;
276 
277 	komeda_kms_mode_config_init(kms, mdev);
278 
279 	err = komeda_kms_add_private_objs(kms, mdev);
280 	if (err)
281 		goto cleanup_mode_config;
282 
283 	err = komeda_kms_add_planes(kms, mdev);
284 	if (err)
285 		goto cleanup_mode_config;
286 
287 	err = drm_vblank_init(drm, kms->n_crtcs);
288 	if (err)
289 		goto cleanup_mode_config;
290 
291 	err = komeda_kms_add_crtcs(kms, mdev);
292 	if (err)
293 		goto cleanup_mode_config;
294 
295 	err = komeda_kms_add_wb_connectors(kms, mdev);
296 	if (err)
297 		goto cleanup_mode_config;
298 
299 	err = component_bind_all(mdev->dev, kms);
300 	if (err)
301 		goto cleanup_mode_config;
302 
303 	drm_mode_config_reset(drm);
304 
305 	err = devm_request_irq(drm->dev, mdev->irq,
306 			       komeda_kms_irq_handler, IRQF_SHARED,
307 			       drm->driver->name, drm);
308 	if (err)
309 		goto free_component_binding;
310 
311 	drm->irq_enabled = true;
312 
313 	drm_kms_helper_poll_init(drm);
314 
315 	err = drm_dev_register(drm, 0);
316 	if (err)
317 		goto free_interrupts;
318 
319 	return kms;
320 
321 free_interrupts:
322 	drm_kms_helper_poll_fini(drm);
323 	drm->irq_enabled = false;
324 free_component_binding:
325 	component_unbind_all(mdev->dev, drm);
326 cleanup_mode_config:
327 	drm_mode_config_cleanup(drm);
328 	komeda_kms_cleanup_private_objs(kms);
329 	drm->dev_private = NULL;
330 	return ERR_PTR(err);
331 }
332 
333 void komeda_kms_detach(struct komeda_kms_dev *kms)
334 {
335 	struct drm_device *drm = &kms->base;
336 	struct komeda_dev *mdev = drm->dev_private;
337 
338 	drm_dev_unregister(drm);
339 	drm_kms_helper_poll_fini(drm);
340 	drm_atomic_helper_shutdown(drm);
341 	drm->irq_enabled = false;
342 	component_unbind_all(mdev->dev, drm);
343 	drm_mode_config_cleanup(drm);
344 	komeda_kms_cleanup_private_objs(kms);
345 	drm->dev_private = NULL;
346 }
347