xref: /titanic_44/usr/src/uts/common/io/drm/drm_irq.c (revision bda1f129971950880940a17bab0bf096d5744b0c)
1 /*
2  * drm_irq.c -- IRQ IOCTL and function support
3  * Created: Fri Oct 18 2003 by anholt@FreeBSD.org
4  */
5 /*
6  * Copyright 2003 Eric Anholt
7  * All Rights Reserved.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
24  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
25  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Eric Anholt <anholt@FreeBSD.org>
29  *
30  */
31 
32 /*
33  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
34  * Use is subject to license terms.
35  */
36 
37 #include "drmP.h"
38 #include "drm.h"
39 #include "drm_io32.h"
40 
41 /*ARGSUSED*/
42 int
43 drm_irq_by_busid(DRM_IOCTL_ARGS)
44 {
45 	DRM_DEVICE;
46 	drm_irq_busid_t irq;
47 
48 	DRM_COPYFROM_WITH_RETURN(&irq, (void *)data, sizeof (irq));
49 
50 	if ((irq.busnum >> 8) != dev->pci_domain ||
51 	    (irq.busnum & 0xff) != dev->pci_bus ||
52 	    irq.devnum != dev->pci_slot ||
53 	    irq.funcnum != dev->pci_func)
54 		return (EINVAL);
55 
56 	irq.irq = dev->irq;
57 
58 	DRM_DEBUG("%d:%d:%d => IRQ %d\n",
59 	    irq.busnum, irq.devnum, irq.funcnum, irq.irq);
60 
61 	DRM_COPYTO_WITH_RETURN((void *)data, &irq, sizeof (irq));
62 
63 	return (0);
64 }
65 
66 
67 static irqreturn_t
68 drm_irq_handler_wrap(DRM_IRQ_ARGS)
69 {
70 	drm_device_t *dev = (void *)arg;
71 	int	ret;
72 
73 	mutex_enter(&dev->irq_lock);
74 	ret = dev->driver->irq_handler(arg);
75 	mutex_exit(&dev->irq_lock);
76 
77 	return (ret);
78 }
79 
80 static void vblank_disable_fn(void *arg)
81 {
82 	struct drm_device *dev = (struct drm_device *)arg;
83 	int i;
84 
85 	if (!dev->vblank_disable_allowed)
86 		return;
87 
88 	for (i = 0; i < dev->num_crtcs; i++) {
89 		if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
90 		    atomic_read(&dev->vblank_enabled[i]) == 1) {
91 			dev->last_vblank[i] =
92 			    dev->driver->get_vblank_counter(dev, i);
93 			dev->driver->disable_vblank(dev, i);
94 			atomic_set(&dev->vblank_enabled[i], 0);
95 			DRM_DEBUG("disable vblank");
96 		}
97 	}
98 }
99 
100 void
101 drm_vblank_cleanup(struct drm_device *dev)
102 {
103 
104 	/* Bail if the driver didn't call drm_vblank_init() */
105 	if (dev->num_crtcs == 0)
106 		return;
107 
108 	vblank_disable_fn((void *)dev);
109 
110 	drm_free(dev->vbl_queues, sizeof (wait_queue_head_t) * dev->num_crtcs,
111 	    DRM_MEM_DRIVER);
112 	drm_free(dev->vbl_sigs, sizeof (struct drm_vbl_sig) * dev->num_crtcs,
113 	    DRM_MEM_DRIVER);
114 	drm_free(dev->_vblank_count, sizeof (atomic_t) *
115 	    dev->num_crtcs, DRM_MEM_DRIVER);
116 	drm_free(dev->vblank_refcount, sizeof (atomic_t) *
117 	    dev->num_crtcs, DRM_MEM_DRIVER);
118 	drm_free(dev->vblank_enabled, sizeof (int) *
119 	    dev->num_crtcs, DRM_MEM_DRIVER);
120 	drm_free(dev->last_vblank, sizeof (u32) * dev->num_crtcs,
121 	    DRM_MEM_DRIVER);
122 	dev->num_crtcs = 0;
123 }
124 
125 int
126 drm_vblank_init(struct drm_device *dev, int num_crtcs)
127 {
128 	int i, ret = ENOMEM;
129 
130 	atomic_set(&dev->vbl_signal_pending, 0);
131 	dev->num_crtcs = num_crtcs;
132 
133 
134 	dev->vbl_queues = drm_alloc(sizeof (wait_queue_head_t) * num_crtcs,
135 	    DRM_MEM_DRIVER);
136 	if (!dev->vbl_queues)
137 		goto err;
138 
139 	dev->vbl_sigs = drm_alloc(sizeof (struct drm_vbl_sig) * num_crtcs,
140 	    DRM_MEM_DRIVER);
141 	if (!dev->vbl_sigs)
142 		goto err;
143 
144 	dev->_vblank_count = drm_alloc(sizeof (atomic_t) * num_crtcs,
145 	    DRM_MEM_DRIVER);
146 	if (!dev->_vblank_count)
147 		goto err;
148 
149 	dev->vblank_refcount = drm_alloc(sizeof (atomic_t) * num_crtcs,
150 	    DRM_MEM_DRIVER);
151 	if (!dev->vblank_refcount)
152 		goto err;
153 
154 	dev->vblank_enabled = drm_alloc(num_crtcs * sizeof (int),
155 	    DRM_MEM_DRIVER);
156 	if (!dev->vblank_enabled)
157 		goto err;
158 
159 	dev->last_vblank = drm_alloc(num_crtcs * sizeof (u32), DRM_MEM_DRIVER);
160 	if (!dev->last_vblank)
161 		goto err;
162 	/* Zero per-crtc vblank stuff */
163 	for (i = 0; i < num_crtcs; i++) {
164 		DRM_INIT_WAITQUEUE(&dev->vbl_queues[i], DRM_INTR_PRI(dev));
165 		TAILQ_INIT(&dev->vbl_sigs[i]);
166 		atomic_set(&dev->_vblank_count[i], 0);
167 		atomic_set(&dev->vblank_refcount[i], 0);
168 	}
169 
170 	dev->vblank_disable_allowed = 1;
171 	return (0);
172 
173 err:
174 	DRM_ERROR("drm_vblank_init: alloc error");
175 	drm_vblank_cleanup(dev);
176 	return (ret);
177 }
178 
179 /*ARGSUSED*/
180 static int
181 drm_install_irq_handle(drm_device_t *dev)
182 {
183 	dev_info_t *dip = dev->dip;
184 
185 	if (dip == NULL) {
186 		DRM_ERROR("drm_install_irq_handle: cannot get vgatext's dip");
187 		return (DDI_FAILURE);
188 	}
189 
190 	if (ddi_intr_hilevel(dip, 0) != 0) {
191 		DRM_ERROR("drm_install_irq_handle: "
192 		    "high-level interrupts are not supported");
193 		return (DDI_FAILURE);
194 	}
195 
196 	if (ddi_get_iblock_cookie(dip, (uint_t)0,
197 	    &dev->intr_block) != DDI_SUCCESS) {
198 		DRM_ERROR("drm_install_irq_handle: cannot get iblock cookie");
199 		return (DDI_FAILURE);
200 	}
201 
202 	/* setup the interrupt handler */
203 	if (ddi_add_intr(dip, 0, &dev->intr_block,
204 	    (ddi_idevice_cookie_t *)NULL, drm_irq_handler_wrap,
205 	    (caddr_t)dev) != DDI_SUCCESS) {
206 		DRM_ERROR("drm_install_irq_handle: ddi_add_intr failed");
207 		return (DDI_FAILURE);
208 	}
209 
210 	return (DDI_SUCCESS);
211 }
212 
213 /*ARGSUSED*/
214 int
215 drm_irq_install(drm_device_t *dev)
216 {
217 	int ret;
218 
219 	if (dev->dev_private == NULL) {
220 		DRM_ERROR("drm_irq_install: dev_private is NULL");
221 		return (EINVAL);
222 	}
223 
224 	if (dev->irq_enabled) {
225 		DRM_ERROR("drm_irq_install: irq already enabled");
226 		return (EBUSY);
227 	}
228 
229 	DRM_DEBUG("drm_irq_install irq=%d\n", dev->irq);
230 
231 	/* before installing handler */
232 	ret = dev->driver->irq_preinstall(dev);
233 	if (ret)
234 		return (EINVAL);
235 
236 	/* install handler */
237 	ret  = drm_install_irq_handle(dev);
238 	if (ret != DDI_SUCCESS) {
239 		DRM_ERROR("drm_irq_install: drm_install_irq_handle failed");
240 		return (ret);
241 	}
242 
243 	/* after installing handler */
244 	dev->driver->irq_postinstall(dev);
245 
246 	dev->irq_enabled = 1;
247 	dev->context_flag = 0;
248 
249 	return (0);
250 }
251 
252 static void
253 drm_uninstall_irq_handle(drm_device_t *dev)
254 {
255 	ASSERT(dev->dip);
256 	ddi_remove_intr(dev->dip, 0, dev->intr_block);
257 }
258 
259 
260 /*ARGSUSED*/
261 int
262 drm_irq_uninstall(drm_device_t *dev)
263 {
264 	int i;
265 	if (!dev->irq_enabled) {
266 		return (EINVAL);
267 	}
268 	dev->irq_enabled = 0;
269 
270 	/*
271 	 * Wake up any waiters so they don't hang.
272 	 */
273 	DRM_SPINLOCK(&dev->vbl_lock);
274 	for (i = 0; i < dev->num_crtcs; i++) {
275 		DRM_WAKEUP(&dev->vbl_queues[i]);
276 		dev->vblank_enabled[i] = 0;
277 	}
278 	DRM_SPINUNLOCK(&dev->vbl_lock);
279 
280 	dev->driver->irq_uninstall(dev);
281 	drm_uninstall_irq_handle(dev);
282 	dev->locked_tasklet_func = NULL;
283 
284 	return (DDI_SUCCESS);
285 }
286 
287 /*ARGSUSED*/
288 int
289 drm_control(DRM_IOCTL_ARGS)
290 {
291 	DRM_DEVICE;
292 	drm_control_t ctl;
293 	int err;
294 
295 	DRM_COPYFROM_WITH_RETURN(&ctl, (void *)data, sizeof (ctl));
296 
297 	switch (ctl.func) {
298 	case DRM_INST_HANDLER:
299 		/*
300 		 * Handle drivers whose DRM used to require IRQ setup but the
301 		 * no longer does.
302 		 */
303 		return (drm_irq_install(dev));
304 	case DRM_UNINST_HANDLER:
305 		err = drm_irq_uninstall(dev);
306 		return (err);
307 	default:
308 		return (EINVAL);
309 	}
310 }
311 
312 u32
313 drm_vblank_count(struct drm_device *dev, int crtc)
314 {
315 	return (atomic_read(&dev->_vblank_count[crtc]));
316 }
317 
318 static void drm_update_vblank_count(struct drm_device *dev, int crtc)
319 {
320 	u32 cur_vblank, diff;
321 	/*
322 	 * Interrupts were disabled prior to this call, so deal with counter
323 	 * wrap if needed.
324 	 * NOTE!  It's possible we lost a full dev->max_vblank_count events
325 	 * here if the register is small or we had vblank interrupts off for
326 	 * a long time.
327 	 */
328 	cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
329 	diff = cur_vblank - dev->last_vblank[crtc];
330 	if (cur_vblank < dev->last_vblank[crtc]) {
331 		diff += dev->max_vblank_count;
332 	DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
333 	    crtc, dev->last_vblank[crtc], cur_vblank, diff);
334 	}
335 
336 	atomic_add(diff, &dev->_vblank_count[crtc]);
337 }
338 
339 static timeout_id_t timer_id = NULL;
340 
341 int
342 drm_vblank_get(struct drm_device *dev, int crtc)
343 {
344 	int ret = 0;
345 
346 	DRM_SPINLOCK(&dev->vbl_lock);
347 
348 	if (timer_id != NULL) {
349 		(void) untimeout(timer_id);
350 		timer_id = NULL;
351 	}
352 
353 	/* Going from 0->1 means we have to enable interrupts again */
354 	atomic_add(1, &dev->vblank_refcount[crtc]);
355 	if (dev->vblank_refcount[crtc] == 1 &&
356 	    atomic_read(&dev->vblank_enabled[crtc]) == 0) {
357 		ret = dev->driver->enable_vblank(dev, crtc);
358 		if (ret)
359 			atomic_dec(&dev->vblank_refcount[crtc]);
360 		else {
361 			atomic_set(&dev->vblank_enabled[crtc], 1);
362 			drm_update_vblank_count(dev, crtc);
363 		}
364 	}
365 	DRM_SPINUNLOCK(&dev->vbl_lock);
366 
367 	return (ret);
368 }
369 
370 void
371 drm_vblank_put(struct drm_device *dev, int crtc)
372 {
373 	DRM_SPINLOCK(&dev->vbl_lock);
374 	/* Last user schedules interrupt disable */
375 	atomic_dec(&dev->vblank_refcount[crtc]);
376 
377 	if (dev->vblank_refcount[crtc] == 0)
378 		timer_id = timeout(vblank_disable_fn, (void *) dev, 5*DRM_HZ);
379 
380 	DRM_SPINUNLOCK(&dev->vbl_lock);
381 }
382 
383 /*ARGSUSED*/
384 int
385 drm_wait_vblank(DRM_IOCTL_ARGS)
386 {
387 	DRM_DEVICE;
388 	drm_wait_vblank_t vblwait;
389 	int ret, flags, crtc;
390 	unsigned int	sequence;
391 
392 	if (!dev->irq_enabled) {
393 		DRM_DEBUG("wait vblank, EINVAL");
394 		return (EINVAL);
395 	}
396 #ifdef _MULTI_DATAMODEL
397 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
398 		drm_wait_vblank_32_t vblwait32;
399 		DRM_COPYFROM_WITH_RETURN(&vblwait32, (void *)data,
400 		    sizeof (vblwait32));
401 		vblwait.request.type = vblwait32.request.type;
402 		vblwait.request.sequence = vblwait32.request.sequence;
403 		vblwait.request.signal = vblwait32.request.signal;
404 	} else {
405 #endif
406 		DRM_COPYFROM_WITH_RETURN(&vblwait, (void *)data,
407 		    sizeof (vblwait));
408 #ifdef _MULTI_DATAMODEL
409 	}
410 #endif
411 
412 	if (vblwait.request.type &
413 	    ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
414 		cmn_err(CE_WARN, "drm_wait_vblank: wrong request type 0x%x",
415 		    vblwait.request.type);
416 		return (EINVAL);
417 	}
418 
419 	flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
420 	crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
421 	if (crtc >= dev->num_crtcs)
422 		return (ENOTSUP);
423 
424 	ret = drm_vblank_get(dev, crtc);
425 	if (ret) {
426 		DRM_DEBUG("can't get drm vblank");
427 		return (ret);
428 	}
429 	sequence = drm_vblank_count(dev, crtc);
430 
431 	switch (vblwait.request.type & _DRM_VBLANK_TYPES_MASK) {
432 	case _DRM_VBLANK_RELATIVE:
433 		vblwait.request.sequence += sequence;
434 		vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
435 		/*FALLTHROUGH*/
436 	case _DRM_VBLANK_ABSOLUTE:
437 		break;
438 	default:
439 		DRM_DEBUG("wait vblank return EINVAL");
440 		return (EINVAL);
441 	}
442 
443 	if ((flags & _DRM_VBLANK_NEXTONMISS) &&
444 	    (sequence - vblwait.request.sequence) <= (1<<23)) {
445 		vblwait.request.sequence = sequence + 1;
446 	}
447 
448 	if (flags & _DRM_VBLANK_SIGNAL) {
449 		/*
450 		 * Don't block process, send signal when vblank interrupt
451 		 */
452 		DRM_DEBUG("NOT SUPPORT YET, SHOULD BE ADDED");
453 		cmn_err(CE_WARN, "NOT SUPPORT YET, SHOULD BE ADDED");
454 		ret = EINVAL;
455 		goto done;
456 	} else {
457 		/* block until vblank interupt */
458 		/* shared code returns -errno */
459 		DRM_WAIT_ON(ret, &dev->vbl_queues[crtc], 3 * DRM_HZ,
460 		    ((drm_vblank_count(dev, crtc)
461 		    - vblwait.request.sequence) <= (1 << 23)));
462 		if (ret != EINTR) {
463 			struct timeval now;
464 			(void) uniqtime(&now);
465 			vblwait.reply.tval_sec = now.tv_sec;
466 			vblwait.reply.tval_usec = now.tv_usec;
467 			vblwait.reply.sequence = drm_vblank_count(dev, crtc);
468 		}
469 	}
470 
471 done:
472 #ifdef _MULTI_DATAMODEL
473 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
474 		drm_wait_vblank_32_t vblwait32;
475 		vblwait32.reply.type = vblwait.reply.type;
476 		vblwait32.reply.sequence = vblwait.reply.sequence;
477 		vblwait32.reply.tval_sec = (int32_t)vblwait.reply.tval_sec;
478 		vblwait32.reply.tval_usec = (int32_t)vblwait.reply.tval_usec;
479 		DRM_COPYTO_WITH_RETURN((void *)data, &vblwait32,
480 		    sizeof (vblwait32));
481 	} else {
482 #endif
483 		DRM_COPYTO_WITH_RETURN((void *)data, &vblwait,
484 		    sizeof (vblwait));
485 #ifdef _MULTI_DATAMODEL
486 	}
487 #endif
488 
489 	drm_vblank_put(dev, crtc);
490 	return (ret);
491 }
492 
493 
494 /*ARGSUSED*/
495 void
496 drm_vbl_send_signals(drm_device_t *dev)
497 {
498 	DRM_DEBUG("drm_vbl_send_signals");
499 }
500 
501 void
502 drm_handle_vblank(struct drm_device *dev, int crtc)
503 {
504 	atomic_inc(&dev->_vblank_count[crtc]);
505 	DRM_WAKEUP(&dev->vbl_queues[crtc]);
506 	drm_vbl_send_signals(dev);
507 }
508 
509 /*
510  * Schedule a tasklet to call back a driver hook with the HW lock held.
511  *
512  * \param dev DRM device.
513  * \param func Driver callback.
514  *
515  * This is intended for triggering actions that require the HW lock from an
516  * interrupt handler. The lock will be grabbed ASAP after the interrupt handler
517  * completes. Note that the callback may be called from interrupt or process
518  * context, it must not make any assumptions about this. Also, the HW lock will
519  * be held with the kernel context or any client context.
520  */
521 
522 void
523 drm_locked_tasklet(drm_device_t *dev, void (*func)(drm_device_t *))
524 {
525 	mutex_enter(&dev->tasklet_lock);
526 
527 	if (dev->locked_tasklet_func) {
528 		mutex_exit(&dev->tasklet_lock);
529 		return;
530 	}
531 
532 	dev->locked_tasklet_func = func;
533 
534 	mutex_exit(&dev->tasklet_lock);
535 }
536