xref: /titanic_44/usr/src/uts/common/io/drm/drm_irq.c (revision 98157a7002f4f2cf7978f3084ca5577f0a1d72b2)
1 /*
2  * drm_irq.c -- IRQ IOCTL and function support
3  * Created: Fri Oct 18 2003 by anholt@FreeBSD.org
4  */
5 /*
6  * Copyright 2003 Eric Anholt
7  * All Rights Reserved.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
24  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
25  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Eric Anholt <anholt@FreeBSD.org>
29  *
30  */
31 
32 /*
33  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
34  * Use is subject to license terms.
35  */
36 
37 #pragma ident	"%Z%%M%	%I%	%E% SMI"
38 
39 #include "drmP.h"
40 #include "drm.h"
41 #include "drm_io32.h"
42 
43 /*ARGSUSED*/
44 int
45 drm_irq_by_busid(DRM_IOCTL_ARGS)
46 {
47 	DRM_DEVICE;
48 	drm_irq_busid_t irq;
49 
50 	DRM_COPYFROM_WITH_RETURN(&irq, (void *)data, sizeof (irq));
51 
52 	if ((irq.busnum >> 8) != dev->pci_domain ||
53 	    (irq.busnum & 0xff) != dev->pci_bus ||
54 	    irq.devnum != dev->pci_slot ||
55 	    irq.funcnum != dev->pci_func)
56 		return (EINVAL);
57 
58 	irq.irq = dev->irq;
59 
60 	DRM_COPYTO_WITH_RETURN((void *)data, &irq, sizeof (irq));
61 
62 	return (0);
63 }
64 
65 
66 static irqreturn_t
67 drm_irq_handler_wrap(DRM_IRQ_ARGS)
68 {
69 	drm_device_t *dev = (void *)arg;
70 	int	ret;
71 
72 	mutex_enter(&dev->irq_lock);
73 	ret = dev->driver->irq_handler(arg);
74 	mutex_exit(&dev->irq_lock);
75 
76 	return (ret);
77 }
78 
79 
80 /*ARGSUSED*/
81 static int
82 drm_install_irq_handle(drm_device_t *dev)
83 {
84 	dev_info_t *dip = dev->dip;
85 
86 	if (dip == NULL) {
87 		DRM_ERROR("drm_install_irq_handle: cannot get vgatext's dip");
88 		return (DDI_FAILURE);
89 	}
90 
91 	if (ddi_intr_hilevel(dip, 0) != 0) {
92 		DRM_ERROR("drm_install_irq_handle: "
93 		    "high-level interrupts are not supported");
94 		return (DDI_FAILURE);
95 	}
96 
97 	if (ddi_get_iblock_cookie(dip, (uint_t)0,
98 	    &dev->intr_block) != DDI_SUCCESS) {
99 		DRM_ERROR("drm_install_irq_handle: cannot get iblock cookie");
100 		return (DDI_FAILURE);
101 	}
102 
103 	mutex_init(&dev->irq_lock, NULL, MUTEX_DRIVER, (void *)dev->intr_block);
104 
105 	/* setup the interrupt handler */
106 	if (ddi_add_intr(dip, 0, &dev->intr_block,
107 	    (ddi_idevice_cookie_t *)NULL, drm_irq_handler_wrap,
108 	    (caddr_t)dev) != DDI_SUCCESS) {
109 		DRM_ERROR("drm_install_irq_handle: ddi_add_intr failed");
110 		return (DDI_FAILURE);
111 	}
112 
113 	return (DDI_SUCCESS);
114 }
115 
116 /*ARGSUSED*/
117 int
118 drm_irq_install(drm_device_t *dev)
119 {
120 	int ret;
121 
122 	if (dev->dev_private == NULL) {
123 		DRM_ERROR("drm_irq_install: dev_private is NULL");
124 		return (EINVAL);
125 	}
126 
127 	if (dev->irq_enabled) {
128 		DRM_ERROR("drm_irq_install: irq already enabled");
129 		return (EBUSY);
130 	}
131 
132 	dev->context_flag = 0;
133 	mutex_init(&dev->tasklet_lock, NULL, MUTEX_DRIVER, NULL);
134 
135 
136 	/* before installing handler */
137 	dev->driver->irq_preinstall(dev);
138 
139 	/* install handler */
140 	ret  = drm_install_irq_handle(dev);
141 	if (ret != DDI_SUCCESS) {
142 		DRM_ERROR("drm_irq_install: drm_install_irq_handle failed");
143 		return (ret);
144 	}
145 
146 	if (dev->driver->use_vbl_irq) {
147 		DRM_INIT_WAITQUEUE(&dev->vbl_queue, DRM_INTR_PRI(dev));
148 	}
149 
150 	/* after installing handler */
151 	dev->driver->irq_postinstall(dev);
152 
153 	dev->irq_enabled = 1;
154 
155 	return (0);
156 }
157 
158 static void
159 drm_uninstall_irq_handle(drm_device_t *dev)
160 {
161 	ASSERT(dev->dip);
162 	ddi_remove_intr(dev->dip, 0, dev->intr_block);
163 	mutex_destroy(&dev->irq_lock);
164 }
165 
166 
167 /*ARGSUSED*/
168 int
169 drm_irq_uninstall(drm_device_t *dev)
170 {
171 
172 	if (!dev->irq_enabled) {
173 		return (EINVAL);
174 	}
175 	dev->irq_enabled = 0;
176 	dev->driver->irq_uninstall(dev);
177 	drm_uninstall_irq_handle(dev);
178 	dev->locked_tasklet_func = NULL;
179 	if (dev->driver->use_vbl_irq) {
180 		DRM_FINI_WAITQUEUE(&dev->vbl_queue);
181 	}
182 	mutex_destroy(&dev->tasklet_lock);
183 	return (DDI_SUCCESS);
184 }
185 
186 /*ARGSUSED*/
187 int
188 drm_control(DRM_IOCTL_ARGS)
189 {
190 	DRM_DEVICE;
191 	drm_control_t ctl;
192 	int err;
193 
194 	DRM_COPYFROM_WITH_RETURN(&ctl, (void *)data, sizeof (ctl));
195 
196 	switch (ctl.func) {
197 	case DRM_INST_HANDLER:
198 		/*
199 		 * Handle drivers whose DRM used to require IRQ setup but the
200 		 * no longer does.
201 		 */
202 		return (drm_irq_install(dev));
203 	case DRM_UNINST_HANDLER:
204 		err = drm_irq_uninstall(dev);
205 		return (err);
206 	default:
207 		return (EINVAL);
208 	}
209 }
210 
211 /*ARGSUSED*/
212 int
213 drm_wait_vblank(DRM_IOCTL_ARGS)
214 {
215 	DRM_DEVICE;
216 	drm_wait_vblank_t vblwait;
217 	struct timeval now;
218 	int ret, flags;
219 	unsigned int	sequence;
220 
221 	if (!dev->irq_enabled)
222 		return (EINVAL);
223 
224 #ifdef _MULTI_DATAMODEL
225 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
226 		drm_wait_vblank_32_t vblwait32;
227 		DRM_COPYFROM_WITH_RETURN(&vblwait32, (void *)data,
228 		    sizeof (vblwait32));
229 		vblwait.request.type = vblwait32.request.type;
230 		vblwait.request.sequence = vblwait32.request.sequence;
231 		vblwait.request.signal = vblwait32.request.signal;
232 	} else {
233 #endif
234 		DRM_COPYFROM_WITH_RETURN(&vblwait, (void *)data,
235 		    sizeof (vblwait));
236 #ifdef _MULTI_DATAMODEL
237 	}
238 #endif
239 
240 	if (vblwait.request.type &
241 	    ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
242 		cmn_err(CE_WARN, "drm_wait_vblank: wrong request type 0x%x",
243 		    vblwait.request.type);
244 		return (EINVAL);
245 	}
246 
247 	flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
248 	if (flags & _DRM_VBLANK_SECONDARY) {
249 		if (dev->driver->use_vbl_irq2 != 1) {
250 			cmn_err(CE_WARN, "wait_vblank: driver %s doesn't"
251 			    "support second vblank interrupt",
252 			    dev->driver->driver_name);
253 		}
254 	} else {
255 		if (dev->driver->use_vbl_irq != 1) {
256 			cmn_err(CE_WARN, "wait_vblank: driver %s doesn't"
257 			    "support vblank interrupt",
258 			    dev->driver->driver_name);
259 		}
260 	}
261 
262 	sequence = atomic_read((flags & _DRM_VBLANK_SECONDARY) ?
263 	    &dev->vbl_received2 : &dev->vbl_received);
264 
265 	if (vblwait.request.type & _DRM_VBLANK_RELATIVE) {
266 		vblwait.request.sequence += sequence;
267 		vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
268 	}
269 #ifdef DEBUG
270 	else if ((vblwait.request.type & _DRM_VBLANK_ABSOLUTE) == 0) {
271 		cmn_err(CE_WARN, "vblank_wait: unkown request type");
272 		return (EINVAL);
273 	}
274 #endif
275 
276 	if ((flags & _DRM_VBLANK_NEXTONMISS) &&
277 	    (sequence - vblwait.request.sequence) <= (1<<23)) {
278 		vblwait.request.sequence = sequence + 1;
279 	}
280 
281 	if (flags & _DRM_VBLANK_SIGNAL) {
282 		/*
283 		 * Don't block process, send signal when vblank interrupt
284 		 */
285 
286 		cmn_err(CE_WARN, "NOT SUPPORT YET, SHOULD BE ADDED");
287 		ret = EINVAL;
288 	} else {
289 		/* block until vblank interupt */
290 
291 		if (flags & _DRM_VBLANK_SECONDARY) {
292 			ret = dev->driver->vblank_wait2(dev,
293 			    &vblwait.request.sequence);
294 		} else {
295 			ret = dev->driver->vblank_wait(dev,
296 			    &vblwait.request.sequence);
297 		}
298 
299 		(void) uniqtime(&now);
300 		vblwait.reply.tval_sec = now.tv_sec;
301 		vblwait.reply.tval_usec = now.tv_usec;
302 	}
303 
304 #ifdef _MULTI_DATAMODEL
305 	if (ddi_model_convert_from(mode & FMODELS) == DDI_MODEL_ILP32) {
306 		drm_wait_vblank_32_t vblwait32;
307 		vblwait32.reply.type = vblwait.reply.type;
308 		vblwait32.reply.sequence = vblwait.reply.sequence;
309 		vblwait32.reply.tval_sec = vblwait.reply.tval_sec;
310 		vblwait32.reply.tval_usec = vblwait.reply.tval_usec;
311 		DRM_COPYTO_WITH_RETURN((void *)data, &vblwait32,
312 		    sizeof (vblwait32));
313 	} else {
314 #endif
315 		DRM_COPYTO_WITH_RETURN((void *)data, &vblwait,
316 		    sizeof (vblwait));
317 #ifdef _MULTI_DATAMODEL
318 	}
319 #endif
320 	return (ret);
321 }
322 
323 
324 /*ARGSUSED*/
325 void
326 drm_vbl_send_signals(drm_device_t *dev)
327 {
328 	drm_vbl_sig_t *vbl_sig;
329 	unsigned int vbl_seq = atomic_read(&dev->vbl_received);
330 	proc_t *pp;
331 
332 	vbl_sig = TAILQ_FIRST(&dev->vbl_sig_list);
333 	while (vbl_sig != NULL) {
334 		drm_vbl_sig_t *next = TAILQ_NEXT(vbl_sig, link);
335 
336 		if ((vbl_seq - vbl_sig->sequence) <= (1<<23)) {
337 			pp = prfind(vbl_sig->pid);
338 			if (pp != NULL)
339 				psignal(pp, vbl_sig->signo);
340 
341 			TAILQ_REMOVE(&dev->vbl_sig_list, vbl_sig, link);
342 			drm_free(vbl_sig, sizeof (*vbl_sig), DRM_MEM_DRIVER);
343 		}
344 		vbl_sig = next;
345 	}
346 }
347 
348 /*
349  * Schedule a tasklet to call back a driver hook with the HW lock held.
350  *
351  * \param dev DRM device.
352  * \param func Driver callback.
353  *
354  * This is intended for triggering actions that require the HW lock from an
355  * interrupt handler. The lock will be grabbed ASAP after the interrupt handler
356  * completes. Note that the callback may be called from interrupt or process
357  * context, it must not make any assumptions about this. Also, the HW lock will
358  * be held with the kernel context or any client context.
359  */
360 
361 void
362 drm_locked_tasklet(drm_device_t *dev, void (*func)(drm_device_t *))
363 {
364 	mutex_enter(&dev->tasklet_lock);
365 
366 	if (dev->locked_tasklet_func) {
367 		mutex_exit(&dev->tasklet_lock);
368 		return;
369 	}
370 
371 	dev->locked_tasklet_func = func;
372 
373 	mutex_exit(&dev->tasklet_lock);
374 }
375