xref: /linux/drivers/gpu/drm/omapdrm/omap_irq.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
4  * Author: Rob Clark <rob.clark@linaro.org>
5  */
6 
7 #include <drm/drm_vblank.h>
8 #include <drm/drm_print.h>
9 
10 #include "omap_drv.h"
11 
12 struct omap_irq_wait {
13 	struct list_head node;
14 	wait_queue_head_t wq;
15 	u32 irqmask;
16 	int count;
17 };
18 
19 /* call with wait_lock and dispc runtime held */
20 static void omap_irq_update(struct drm_device *dev)
21 {
22 	struct omap_drm_private *priv = dev->dev_private;
23 	struct omap_irq_wait *wait;
24 	u32 irqmask = priv->irq_mask;
25 
26 	assert_spin_locked(&priv->wait_lock);
27 
28 	list_for_each_entry(wait, &priv->wait_list, node)
29 		irqmask |= wait->irqmask;
30 
31 	DBG("irqmask=%08x", irqmask);
32 
33 	dispc_write_irqenable(priv->dispc, irqmask);
34 }
35 
36 static void omap_irq_wait_handler(struct omap_irq_wait *wait)
37 {
38 	wait->count--;
39 	wake_up(&wait->wq);
40 }
41 
42 struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
43 		u32 irqmask, int count)
44 {
45 	struct omap_drm_private *priv = dev->dev_private;
46 	struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL);
47 	unsigned long flags;
48 
49 	init_waitqueue_head(&wait->wq);
50 	wait->irqmask = irqmask;
51 	wait->count = count;
52 
53 	spin_lock_irqsave(&priv->wait_lock, flags);
54 	list_add(&wait->node, &priv->wait_list);
55 	omap_irq_update(dev);
56 	spin_unlock_irqrestore(&priv->wait_lock, flags);
57 
58 	return wait;
59 }
60 
61 int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
62 		unsigned long timeout)
63 {
64 	struct omap_drm_private *priv = dev->dev_private;
65 	unsigned long flags;
66 	int ret;
67 
68 	ret = wait_event_timeout(wait->wq, (wait->count <= 0), timeout);
69 
70 	spin_lock_irqsave(&priv->wait_lock, flags);
71 	list_del(&wait->node);
72 	omap_irq_update(dev);
73 	spin_unlock_irqrestore(&priv->wait_lock, flags);
74 
75 	kfree(wait);
76 
77 	return ret == 0 ? -1 : 0;
78 }
79 
80 int omap_irq_enable_framedone(struct drm_crtc *crtc, bool enable)
81 {
82 	struct drm_device *dev = crtc->dev;
83 	struct omap_drm_private *priv = dev->dev_private;
84 	unsigned long flags;
85 	enum omap_channel channel = omap_crtc_channel(crtc);
86 	int framedone_irq =
87 		dispc_mgr_get_framedone_irq(priv->dispc, channel);
88 
89 	DBG("dev=%p, crtc=%u, enable=%d", dev, channel, enable);
90 
91 	spin_lock_irqsave(&priv->wait_lock, flags);
92 	if (enable)
93 		priv->irq_mask |= framedone_irq;
94 	else
95 		priv->irq_mask &= ~framedone_irq;
96 	omap_irq_update(dev);
97 	spin_unlock_irqrestore(&priv->wait_lock, flags);
98 
99 	return 0;
100 }
101 
102 /**
103  * omap_irq_enable_vblank - enable vblank interrupt events
104  * @crtc: DRM CRTC
105  *
106  * Enable vblank interrupts for @crtc.  If the device doesn't have
107  * a hardware vblank counter, this routine should be a no-op, since
108  * interrupts will have to stay on to keep the count accurate.
109  *
110  * RETURNS
111  * Zero on success, appropriate errno if the given @crtc's vblank
112  * interrupt cannot be enabled.
113  */
114 int omap_irq_enable_vblank(struct drm_crtc *crtc)
115 {
116 	struct drm_device *dev = crtc->dev;
117 	struct omap_drm_private *priv = dev->dev_private;
118 	unsigned long flags;
119 	enum omap_channel channel = omap_crtc_channel(crtc);
120 
121 	DBG("dev=%p, crtc=%u", dev, channel);
122 
123 	spin_lock_irqsave(&priv->wait_lock, flags);
124 	priv->irq_mask |= dispc_mgr_get_vsync_irq(priv->dispc,
125 							     channel);
126 	omap_irq_update(dev);
127 	spin_unlock_irqrestore(&priv->wait_lock, flags);
128 
129 	return 0;
130 }
131 
132 /**
133  * omap_irq_disable_vblank - disable vblank interrupt events
134  * @crtc: DRM CRTC
135  *
136  * Disable vblank interrupts for @crtc.  If the device doesn't have
137  * a hardware vblank counter, this routine should be a no-op, since
138  * interrupts will have to stay on to keep the count accurate.
139  */
140 void omap_irq_disable_vblank(struct drm_crtc *crtc)
141 {
142 	struct drm_device *dev = crtc->dev;
143 	struct omap_drm_private *priv = dev->dev_private;
144 	unsigned long flags;
145 	enum omap_channel channel = omap_crtc_channel(crtc);
146 
147 	DBG("dev=%p, crtc=%u", dev, channel);
148 
149 	spin_lock_irqsave(&priv->wait_lock, flags);
150 	priv->irq_mask &= ~dispc_mgr_get_vsync_irq(priv->dispc,
151 							      channel);
152 	omap_irq_update(dev);
153 	spin_unlock_irqrestore(&priv->wait_lock, flags);
154 }
155 
156 static void omap_irq_fifo_underflow(struct omap_drm_private *priv,
157 				    u32 irqstatus)
158 {
159 	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
160 				      DEFAULT_RATELIMIT_BURST);
161 	static const struct {
162 		const char *name;
163 		u32 mask;
164 	} sources[] = {
165 		{ "gfx", DISPC_IRQ_GFX_FIFO_UNDERFLOW },
166 		{ "vid1", DISPC_IRQ_VID1_FIFO_UNDERFLOW },
167 		{ "vid2", DISPC_IRQ_VID2_FIFO_UNDERFLOW },
168 		{ "vid3", DISPC_IRQ_VID3_FIFO_UNDERFLOW },
169 	};
170 
171 	const u32 mask = DISPC_IRQ_GFX_FIFO_UNDERFLOW
172 		       | DISPC_IRQ_VID1_FIFO_UNDERFLOW
173 		       | DISPC_IRQ_VID2_FIFO_UNDERFLOW
174 		       | DISPC_IRQ_VID3_FIFO_UNDERFLOW;
175 	unsigned int i;
176 
177 	spin_lock(&priv->wait_lock);
178 	irqstatus &= priv->irq_mask & mask;
179 	spin_unlock(&priv->wait_lock);
180 
181 	if (!irqstatus)
182 		return;
183 
184 	if (!__ratelimit(&_rs))
185 		return;
186 
187 	DRM_ERROR("FIFO underflow on ");
188 
189 	for (i = 0; i < ARRAY_SIZE(sources); ++i) {
190 		if (sources[i].mask & irqstatus)
191 			pr_cont("%s ", sources[i].name);
192 	}
193 
194 	pr_cont("(0x%08x)\n", irqstatus);
195 }
196 
197 static void omap_irq_ocp_error_handler(struct drm_device *dev,
198 	u32 irqstatus)
199 {
200 	if (!(irqstatus & DISPC_IRQ_OCP_ERR))
201 		return;
202 
203 	dev_err_ratelimited(dev->dev, "OCP error\n");
204 }
205 
206 static irqreturn_t omap_irq_handler(int irq, void *arg)
207 {
208 	struct drm_device *dev = (struct drm_device *) arg;
209 	struct omap_drm_private *priv = dev->dev_private;
210 	struct omap_irq_wait *wait, *n;
211 	unsigned long flags;
212 	unsigned int id;
213 	u32 irqstatus;
214 
215 	irqstatus = dispc_read_irqstatus(priv->dispc);
216 	dispc_clear_irqstatus(priv->dispc, irqstatus);
217 	dispc_read_irqstatus(priv->dispc);	/* flush posted write */
218 
219 	VERB("irqs: %08x", irqstatus);
220 
221 	for (id = 0; id < priv->num_pipes; id++) {
222 		struct drm_crtc *crtc = priv->pipes[id].crtc;
223 		enum omap_channel channel = omap_crtc_channel(crtc);
224 
225 		if (irqstatus & dispc_mgr_get_vsync_irq(priv->dispc, channel)) {
226 			drm_handle_vblank(dev, id);
227 			omap_crtc_vblank_irq(crtc);
228 		}
229 
230 		if (irqstatus & dispc_mgr_get_sync_lost_irq(priv->dispc, channel))
231 			omap_crtc_error_irq(crtc, irqstatus);
232 
233 		if (irqstatus & dispc_mgr_get_framedone_irq(priv->dispc, channel))
234 			omap_crtc_framedone_irq(crtc, irqstatus);
235 	}
236 
237 	omap_irq_ocp_error_handler(dev, irqstatus);
238 	omap_irq_fifo_underflow(priv, irqstatus);
239 
240 	spin_lock_irqsave(&priv->wait_lock, flags);
241 	list_for_each_entry_safe(wait, n, &priv->wait_list, node) {
242 		if (wait->irqmask & irqstatus)
243 			omap_irq_wait_handler(wait);
244 	}
245 	spin_unlock_irqrestore(&priv->wait_lock, flags);
246 
247 	return IRQ_HANDLED;
248 }
249 
250 static const u32 omap_underflow_irqs[] = {
251 	[OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
252 	[OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
253 	[OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
254 	[OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
255 };
256 
257 int omap_drm_irq_install(struct drm_device *dev)
258 {
259 	struct omap_drm_private *priv = dev->dev_private;
260 	unsigned int num_mgrs = dispc_get_num_mgrs(priv->dispc);
261 	unsigned int max_planes;
262 	unsigned int i;
263 	int ret;
264 
265 	spin_lock_init(&priv->wait_lock);
266 	INIT_LIST_HEAD(&priv->wait_list);
267 
268 	priv->irq_mask = DISPC_IRQ_OCP_ERR;
269 
270 	max_planes = min(ARRAY_SIZE(priv->planes),
271 			 ARRAY_SIZE(omap_underflow_irqs));
272 	for (i = 0; i < max_planes; ++i) {
273 		if (priv->planes[i])
274 			priv->irq_mask |= omap_underflow_irqs[i];
275 	}
276 
277 	for (i = 0; i < num_mgrs; ++i)
278 		priv->irq_mask |= dispc_mgr_get_sync_lost_irq(priv->dispc, i);
279 
280 	dispc_runtime_get(priv->dispc);
281 	dispc_clear_irqstatus(priv->dispc, 0xffffffff);
282 	dispc_runtime_put(priv->dispc);
283 
284 	ret = dispc_request_irq(priv->dispc, omap_irq_handler, dev);
285 	if (ret < 0)
286 		return ret;
287 
288 	priv->irq_enabled = true;
289 
290 	return 0;
291 }
292 
293 void omap_drm_irq_uninstall(struct drm_device *dev)
294 {
295 	struct omap_drm_private *priv = dev->dev_private;
296 
297 	if (!priv->irq_enabled)
298 		return;
299 
300 	priv->irq_enabled = false;
301 
302 	dispc_free_irq(priv->dispc, dev);
303 }
304