xref: /linux/drivers/gpu/drm/omapdrm/omap_irq.c (revision 48dea9a700c8728cc31a1dd44588b97578de86ee)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
4  * Author: Rob Clark <rob.clark@linaro.org>
5  */
6 
7 #include <drm/drm_vblank.h>
8 
9 #include "omap_drv.h"
10 
11 struct omap_irq_wait {
12 	struct list_head node;
13 	wait_queue_head_t wq;
14 	u32 irqmask;
15 	int count;
16 };
17 
18 /* call with wait_lock and dispc runtime held */
19 static void omap_irq_update(struct drm_device *dev)
20 {
21 	struct omap_drm_private *priv = dev->dev_private;
22 	struct omap_irq_wait *wait;
23 	u32 irqmask = priv->irq_mask;
24 
25 	assert_spin_locked(&priv->wait_lock);
26 
27 	list_for_each_entry(wait, &priv->wait_list, node)
28 		irqmask |= wait->irqmask;
29 
30 	DBG("irqmask=%08x", irqmask);
31 
32 	priv->dispc_ops->write_irqenable(priv->dispc, irqmask);
33 }
34 
35 static void omap_irq_wait_handler(struct omap_irq_wait *wait)
36 {
37 	wait->count--;
38 	wake_up(&wait->wq);
39 }
40 
41 struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
42 		u32 irqmask, int count)
43 {
44 	struct omap_drm_private *priv = dev->dev_private;
45 	struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL);
46 	unsigned long flags;
47 
48 	init_waitqueue_head(&wait->wq);
49 	wait->irqmask = irqmask;
50 	wait->count = count;
51 
52 	spin_lock_irqsave(&priv->wait_lock, flags);
53 	list_add(&wait->node, &priv->wait_list);
54 	omap_irq_update(dev);
55 	spin_unlock_irqrestore(&priv->wait_lock, flags);
56 
57 	return wait;
58 }
59 
60 int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
61 		unsigned long timeout)
62 {
63 	struct omap_drm_private *priv = dev->dev_private;
64 	unsigned long flags;
65 	int ret;
66 
67 	ret = wait_event_timeout(wait->wq, (wait->count <= 0), timeout);
68 
69 	spin_lock_irqsave(&priv->wait_lock, flags);
70 	list_del(&wait->node);
71 	omap_irq_update(dev);
72 	spin_unlock_irqrestore(&priv->wait_lock, flags);
73 
74 	kfree(wait);
75 
76 	return ret == 0 ? -1 : 0;
77 }
78 
79 int omap_irq_enable_framedone(struct drm_crtc *crtc, bool enable)
80 {
81 	struct drm_device *dev = crtc->dev;
82 	struct omap_drm_private *priv = dev->dev_private;
83 	unsigned long flags;
84 	enum omap_channel channel = omap_crtc_channel(crtc);
85 	int framedone_irq =
86 		priv->dispc_ops->mgr_get_framedone_irq(priv->dispc, channel);
87 
88 	DBG("dev=%p, crtc=%u, enable=%d", dev, channel, enable);
89 
90 	spin_lock_irqsave(&priv->wait_lock, flags);
91 	if (enable)
92 		priv->irq_mask |= framedone_irq;
93 	else
94 		priv->irq_mask &= ~framedone_irq;
95 	omap_irq_update(dev);
96 	spin_unlock_irqrestore(&priv->wait_lock, flags);
97 
98 	return 0;
99 }
100 
101 /**
102  * enable_vblank - enable vblank interrupt events
103  * @dev: DRM device
104  * @pipe: which irq to enable
105  *
106  * Enable vblank interrupts for @crtc.  If the device doesn't have
107  * a hardware vblank counter, this routine should be a no-op, since
108  * interrupts will have to stay on to keep the count accurate.
109  *
110  * RETURNS
111  * Zero on success, appropriate errno if the given @crtc's vblank
112  * interrupt cannot be enabled.
113  */
114 int omap_irq_enable_vblank(struct drm_crtc *crtc)
115 {
116 	struct drm_device *dev = crtc->dev;
117 	struct omap_drm_private *priv = dev->dev_private;
118 	unsigned long flags;
119 	enum omap_channel channel = omap_crtc_channel(crtc);
120 
121 	DBG("dev=%p, crtc=%u", dev, channel);
122 
123 	spin_lock_irqsave(&priv->wait_lock, flags);
124 	priv->irq_mask |= priv->dispc_ops->mgr_get_vsync_irq(priv->dispc,
125 							     channel);
126 	omap_irq_update(dev);
127 	spin_unlock_irqrestore(&priv->wait_lock, flags);
128 
129 	return 0;
130 }
131 
132 /**
133  * disable_vblank - disable vblank interrupt events
134  * @dev: DRM device
135  * @pipe: which irq to enable
136  *
137  * Disable vblank interrupts for @crtc.  If the device doesn't have
138  * a hardware vblank counter, this routine should be a no-op, since
139  * interrupts will have to stay on to keep the count accurate.
140  */
141 void omap_irq_disable_vblank(struct drm_crtc *crtc)
142 {
143 	struct drm_device *dev = crtc->dev;
144 	struct omap_drm_private *priv = dev->dev_private;
145 	unsigned long flags;
146 	enum omap_channel channel = omap_crtc_channel(crtc);
147 
148 	DBG("dev=%p, crtc=%u", dev, channel);
149 
150 	spin_lock_irqsave(&priv->wait_lock, flags);
151 	priv->irq_mask &= ~priv->dispc_ops->mgr_get_vsync_irq(priv->dispc,
152 							      channel);
153 	omap_irq_update(dev);
154 	spin_unlock_irqrestore(&priv->wait_lock, flags);
155 }
156 
157 static void omap_irq_fifo_underflow(struct omap_drm_private *priv,
158 				    u32 irqstatus)
159 {
160 	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
161 				      DEFAULT_RATELIMIT_BURST);
162 	static const struct {
163 		const char *name;
164 		u32 mask;
165 	} sources[] = {
166 		{ "gfx", DISPC_IRQ_GFX_FIFO_UNDERFLOW },
167 		{ "vid1", DISPC_IRQ_VID1_FIFO_UNDERFLOW },
168 		{ "vid2", DISPC_IRQ_VID2_FIFO_UNDERFLOW },
169 		{ "vid3", DISPC_IRQ_VID3_FIFO_UNDERFLOW },
170 	};
171 
172 	const u32 mask = DISPC_IRQ_GFX_FIFO_UNDERFLOW
173 		       | DISPC_IRQ_VID1_FIFO_UNDERFLOW
174 		       | DISPC_IRQ_VID2_FIFO_UNDERFLOW
175 		       | DISPC_IRQ_VID3_FIFO_UNDERFLOW;
176 	unsigned int i;
177 
178 	spin_lock(&priv->wait_lock);
179 	irqstatus &= priv->irq_mask & mask;
180 	spin_unlock(&priv->wait_lock);
181 
182 	if (!irqstatus)
183 		return;
184 
185 	if (!__ratelimit(&_rs))
186 		return;
187 
188 	DRM_ERROR("FIFO underflow on ");
189 
190 	for (i = 0; i < ARRAY_SIZE(sources); ++i) {
191 		if (sources[i].mask & irqstatus)
192 			pr_cont("%s ", sources[i].name);
193 	}
194 
195 	pr_cont("(0x%08x)\n", irqstatus);
196 }
197 
198 static void omap_irq_ocp_error_handler(struct drm_device *dev,
199 	u32 irqstatus)
200 {
201 	if (!(irqstatus & DISPC_IRQ_OCP_ERR))
202 		return;
203 
204 	dev_err_ratelimited(dev->dev, "OCP error\n");
205 }
206 
207 static irqreturn_t omap_irq_handler(int irq, void *arg)
208 {
209 	struct drm_device *dev = (struct drm_device *) arg;
210 	struct omap_drm_private *priv = dev->dev_private;
211 	struct omap_irq_wait *wait, *n;
212 	unsigned long flags;
213 	unsigned int id;
214 	u32 irqstatus;
215 
216 	irqstatus = priv->dispc_ops->read_irqstatus(priv->dispc);
217 	priv->dispc_ops->clear_irqstatus(priv->dispc, irqstatus);
218 	priv->dispc_ops->read_irqstatus(priv->dispc);	/* flush posted write */
219 
220 	VERB("irqs: %08x", irqstatus);
221 
222 	for (id = 0; id < priv->num_pipes; id++) {
223 		struct drm_crtc *crtc = priv->pipes[id].crtc;
224 		enum omap_channel channel = omap_crtc_channel(crtc);
225 
226 		if (irqstatus & priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, channel)) {
227 			drm_handle_vblank(dev, id);
228 			omap_crtc_vblank_irq(crtc);
229 		}
230 
231 		if (irqstatus & priv->dispc_ops->mgr_get_sync_lost_irq(priv->dispc, channel))
232 			omap_crtc_error_irq(crtc, irqstatus);
233 
234 		if (irqstatus & priv->dispc_ops->mgr_get_framedone_irq(priv->dispc, channel))
235 			omap_crtc_framedone_irq(crtc, irqstatus);
236 	}
237 
238 	omap_irq_ocp_error_handler(dev, irqstatus);
239 	omap_irq_fifo_underflow(priv, irqstatus);
240 
241 	spin_lock_irqsave(&priv->wait_lock, flags);
242 	list_for_each_entry_safe(wait, n, &priv->wait_list, node) {
243 		if (wait->irqmask & irqstatus)
244 			omap_irq_wait_handler(wait);
245 	}
246 	spin_unlock_irqrestore(&priv->wait_lock, flags);
247 
248 	return IRQ_HANDLED;
249 }
250 
251 static const u32 omap_underflow_irqs[] = {
252 	[OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
253 	[OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
254 	[OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
255 	[OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
256 };
257 
258 /*
259  * We need a special version, instead of just using drm_irq_install(),
260  * because we need to register the irq via omapdss.  Once omapdss and
261  * omapdrm are merged together we can assign the dispc hwmod data to
262  * ourselves and drop these and just use drm_irq_{install,uninstall}()
263  */
264 
265 int omap_drm_irq_install(struct drm_device *dev)
266 {
267 	struct omap_drm_private *priv = dev->dev_private;
268 	unsigned int num_mgrs = priv->dispc_ops->get_num_mgrs(priv->dispc);
269 	unsigned int max_planes;
270 	unsigned int i;
271 	int ret;
272 
273 	spin_lock_init(&priv->wait_lock);
274 	INIT_LIST_HEAD(&priv->wait_list);
275 
276 	priv->irq_mask = DISPC_IRQ_OCP_ERR;
277 
278 	max_planes = min(ARRAY_SIZE(priv->planes),
279 			 ARRAY_SIZE(omap_underflow_irqs));
280 	for (i = 0; i < max_planes; ++i) {
281 		if (priv->planes[i])
282 			priv->irq_mask |= omap_underflow_irqs[i];
283 	}
284 
285 	for (i = 0; i < num_mgrs; ++i)
286 		priv->irq_mask |= priv->dispc_ops->mgr_get_sync_lost_irq(priv->dispc, i);
287 
288 	priv->dispc_ops->runtime_get(priv->dispc);
289 	priv->dispc_ops->clear_irqstatus(priv->dispc, 0xffffffff);
290 	priv->dispc_ops->runtime_put(priv->dispc);
291 
292 	ret = priv->dispc_ops->request_irq(priv->dispc, omap_irq_handler, dev);
293 	if (ret < 0)
294 		return ret;
295 
296 	dev->irq_enabled = true;
297 
298 	return 0;
299 }
300 
301 void omap_drm_irq_uninstall(struct drm_device *dev)
302 {
303 	struct omap_drm_private *priv = dev->dev_private;
304 
305 	if (!dev->irq_enabled)
306 		return;
307 
308 	dev->irq_enabled = false;
309 
310 	priv->dispc_ops->free_irq(priv->dispc, dev);
311 }
312