xref: /linux/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c (revision 55ec81f7517fad09135f65552cea0a3ee84fff30)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
3  */
4 
5 #include <linux/bitops.h>
6 #include <linux/debugfs.h>
7 #include <linux/slab.h>
8 
9 #include "dpu_core_irq.h"
10 #include "dpu_kms.h"
11 #include "dpu_hw_interrupts.h"
12 #include "dpu_hw_util.h"
13 #include "dpu_hw_mdss.h"
14 #include "dpu_trace.h"
15 
16 /*
17  * Register offsets in MDSS register file for the interrupt registers
18  * w.r.t. the MDP base
19  */
20 #define MDP_INTF_OFF(intf)				(0x6A000 + 0x800 * (intf))
21 #define MDP_INTF_INTR_EN(intf)				(MDP_INTF_OFF(intf) + 0x1c0)
22 #define MDP_INTF_INTR_STATUS(intf)			(MDP_INTF_OFF(intf) + 0x1c4)
23 #define MDP_INTF_INTR_CLEAR(intf)			(MDP_INTF_OFF(intf) + 0x1c8)
24 #define MDP_INTF_TEAR_OFF(intf)				(0x6D700 + 0x100 * (intf))
25 #define MDP_INTF_INTR_TEAR_EN(intf)			(MDP_INTF_TEAR_OFF(intf) + 0x000)
26 #define MDP_INTF_INTR_TEAR_STATUS(intf)			(MDP_INTF_TEAR_OFF(intf) + 0x004)
27 #define MDP_INTF_INTR_TEAR_CLEAR(intf)			(MDP_INTF_TEAR_OFF(intf) + 0x008)
28 #define MDP_AD4_OFF(ad4)				(0x7C000 + 0x1000 * (ad4))
29 #define MDP_AD4_INTR_EN_OFF(ad4)			(MDP_AD4_OFF(ad4) + 0x41c)
30 #define MDP_AD4_INTR_CLEAR_OFF(ad4)			(MDP_AD4_OFF(ad4) + 0x424)
31 #define MDP_AD4_INTR_STATUS_OFF(ad4)			(MDP_AD4_OFF(ad4) + 0x420)
32 #define MDP_INTF_REV_7xxx_OFF(intf)			(0x34000 + 0x1000 * (intf))
33 #define MDP_INTF_REV_7xxx_INTR_EN(intf)			(MDP_INTF_REV_7xxx_OFF(intf) + 0x1c0)
34 #define MDP_INTF_REV_7xxx_INTR_STATUS(intf)		(MDP_INTF_REV_7xxx_OFF(intf) + 0x1c4)
35 #define MDP_INTF_REV_7xxx_INTR_CLEAR(intf)		(MDP_INTF_REV_7xxx_OFF(intf) + 0x1c8)
36 #define MDP_INTF_REV_7xxx_TEAR_OFF(intf)		(0x34800 + 0x1000 * (intf))
37 #define MDP_INTF_REV_7xxx_INTR_TEAR_EN(intf)		(MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x000)
38 #define MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(intf)	(MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x004)
39 #define MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(intf)		(MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x008)
40 
41 /**
42  * struct dpu_intr_reg - array of DPU register sets
43  * @clr_off:	offset to CLEAR reg
44  * @en_off:	offset to ENABLE reg
45  * @status_off:	offset to STATUS reg
46  */
47 struct dpu_intr_reg {
48 	u32 clr_off;
49 	u32 en_off;
50 	u32 status_off;
51 };
52 
53 /*
54  * dpu_intr_set_legacy -  List of DPU interrupt registers for DPU <= 6.x
55  */
56 static const struct dpu_intr_reg dpu_intr_set_legacy[] = {
57 	[MDP_SSPP_TOP0_INTR] = {
58 		INTR_CLEAR,
59 		INTR_EN,
60 		INTR_STATUS
61 	},
62 	[MDP_SSPP_TOP0_INTR2] = {
63 		INTR2_CLEAR,
64 		INTR2_EN,
65 		INTR2_STATUS
66 	},
67 	[MDP_SSPP_TOP0_HIST_INTR] = {
68 		HIST_INTR_CLEAR,
69 		HIST_INTR_EN,
70 		HIST_INTR_STATUS
71 	},
72 	[MDP_INTF0_INTR] = {
73 		MDP_INTF_INTR_CLEAR(0),
74 		MDP_INTF_INTR_EN(0),
75 		MDP_INTF_INTR_STATUS(0)
76 	},
77 	[MDP_INTF1_INTR] = {
78 		MDP_INTF_INTR_CLEAR(1),
79 		MDP_INTF_INTR_EN(1),
80 		MDP_INTF_INTR_STATUS(1)
81 	},
82 	[MDP_INTF2_INTR] = {
83 		MDP_INTF_INTR_CLEAR(2),
84 		MDP_INTF_INTR_EN(2),
85 		MDP_INTF_INTR_STATUS(2)
86 	},
87 	[MDP_INTF3_INTR] = {
88 		MDP_INTF_INTR_CLEAR(3),
89 		MDP_INTF_INTR_EN(3),
90 		MDP_INTF_INTR_STATUS(3)
91 	},
92 	[MDP_INTF4_INTR] = {
93 		MDP_INTF_INTR_CLEAR(4),
94 		MDP_INTF_INTR_EN(4),
95 		MDP_INTF_INTR_STATUS(4)
96 	},
97 	[MDP_INTF5_INTR] = {
98 		MDP_INTF_INTR_CLEAR(5),
99 		MDP_INTF_INTR_EN(5),
100 		MDP_INTF_INTR_STATUS(5)
101 	},
102 	[MDP_INTF1_TEAR_INTR] = {
103 		MDP_INTF_INTR_TEAR_CLEAR(1),
104 		MDP_INTF_INTR_TEAR_EN(1),
105 		MDP_INTF_INTR_TEAR_STATUS(1)
106 	},
107 	[MDP_INTF2_TEAR_INTR] = {
108 		MDP_INTF_INTR_TEAR_CLEAR(2),
109 		MDP_INTF_INTR_TEAR_EN(2),
110 		MDP_INTF_INTR_TEAR_STATUS(2)
111 	},
112 	[MDP_AD4_0_INTR] = {
113 		MDP_AD4_INTR_CLEAR_OFF(0),
114 		MDP_AD4_INTR_EN_OFF(0),
115 		MDP_AD4_INTR_STATUS_OFF(0),
116 	},
117 	[MDP_AD4_1_INTR] = {
118 		MDP_AD4_INTR_CLEAR_OFF(1),
119 		MDP_AD4_INTR_EN_OFF(1),
120 		MDP_AD4_INTR_STATUS_OFF(1),
121 	},
122 };
123 
124 /*
125  * dpu_intr_set_7xxx -  List of DPU interrupt registers for DPU >= 7.0
126  */
127 static const struct dpu_intr_reg dpu_intr_set_7xxx[] = {
128 	[MDP_SSPP_TOP0_INTR] = {
129 		INTR_CLEAR,
130 		INTR_EN,
131 		INTR_STATUS
132 	},
133 	[MDP_SSPP_TOP0_INTR2] = {
134 		INTR2_CLEAR,
135 		INTR2_EN,
136 		INTR2_STATUS
137 	},
138 	[MDP_SSPP_TOP0_HIST_INTR] = {
139 		HIST_INTR_CLEAR,
140 		HIST_INTR_EN,
141 		HIST_INTR_STATUS
142 	},
143 	[MDP_INTF0_INTR] = {
144 		MDP_INTF_REV_7xxx_INTR_CLEAR(0),
145 		MDP_INTF_REV_7xxx_INTR_EN(0),
146 		MDP_INTF_REV_7xxx_INTR_STATUS(0)
147 	},
148 	[MDP_INTF1_INTR] = {
149 		MDP_INTF_REV_7xxx_INTR_CLEAR(1),
150 		MDP_INTF_REV_7xxx_INTR_EN(1),
151 		MDP_INTF_REV_7xxx_INTR_STATUS(1)
152 	},
153 	[MDP_INTF1_TEAR_INTR] = {
154 		MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(1),
155 		MDP_INTF_REV_7xxx_INTR_TEAR_EN(1),
156 		MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(1)
157 	},
158 	[MDP_INTF2_INTR] = {
159 		MDP_INTF_REV_7xxx_INTR_CLEAR(2),
160 		MDP_INTF_REV_7xxx_INTR_EN(2),
161 		MDP_INTF_REV_7xxx_INTR_STATUS(2)
162 	},
163 	[MDP_INTF2_TEAR_INTR] = {
164 		MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(2),
165 		MDP_INTF_REV_7xxx_INTR_TEAR_EN(2),
166 		MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(2)
167 	},
168 	[MDP_INTF3_INTR] = {
169 		MDP_INTF_REV_7xxx_INTR_CLEAR(3),
170 		MDP_INTF_REV_7xxx_INTR_EN(3),
171 		MDP_INTF_REV_7xxx_INTR_STATUS(3)
172 	},
173 	[MDP_INTF4_INTR] = {
174 		MDP_INTF_REV_7xxx_INTR_CLEAR(4),
175 		MDP_INTF_REV_7xxx_INTR_EN(4),
176 		MDP_INTF_REV_7xxx_INTR_STATUS(4)
177 	},
178 	[MDP_INTF5_INTR] = {
179 		MDP_INTF_REV_7xxx_INTR_CLEAR(5),
180 		MDP_INTF_REV_7xxx_INTR_EN(5),
181 		MDP_INTF_REV_7xxx_INTR_STATUS(5)
182 	},
183 	[MDP_INTF6_INTR] = {
184 		MDP_INTF_REV_7xxx_INTR_CLEAR(6),
185 		MDP_INTF_REV_7xxx_INTR_EN(6),
186 		MDP_INTF_REV_7xxx_INTR_STATUS(6)
187 	},
188 	[MDP_INTF7_INTR] = {
189 		MDP_INTF_REV_7xxx_INTR_CLEAR(7),
190 		MDP_INTF_REV_7xxx_INTR_EN(7),
191 		MDP_INTF_REV_7xxx_INTR_STATUS(7)
192 	},
193 	[MDP_INTF8_INTR] = {
194 		MDP_INTF_REV_7xxx_INTR_CLEAR(8),
195 		MDP_INTF_REV_7xxx_INTR_EN(8),
196 		MDP_INTF_REV_7xxx_INTR_STATUS(8)
197 	},
198 };
199 
200 #define DPU_IRQ_REG(irq_idx)	(irq_idx / 32)
201 #define DPU_IRQ_MASK(irq_idx)	(BIT(irq_idx % 32))
202 
203 /**
204  * dpu_core_irq_callback_handler - dispatch core interrupts
205  * @dpu_kms:		Pointer to DPU's KMS structure
206  * @irq_idx:		interrupt index
207  */
208 static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
209 {
210 	VERB("irq_idx=%d\n", irq_idx);
211 
212 	if (!dpu_kms->hw_intr->irq_tbl[irq_idx].cb)
213 		DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
214 
215 	atomic_inc(&dpu_kms->hw_intr->irq_tbl[irq_idx].count);
216 
217 	/*
218 	 * Perform registered function callback
219 	 */
220 	dpu_kms->hw_intr->irq_tbl[irq_idx].cb(dpu_kms->hw_intr->irq_tbl[irq_idx].arg, irq_idx);
221 }
222 
223 irqreturn_t dpu_core_irq(struct msm_kms *kms)
224 {
225 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
226 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
227 	int reg_idx;
228 	int irq_idx;
229 	u32 irq_status;
230 	u32 enable_mask;
231 	int bit;
232 	unsigned long irq_flags;
233 
234 	if (!intr)
235 		return IRQ_NONE;
236 
237 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
238 	for (reg_idx = 0; reg_idx < MDP_INTR_MAX; reg_idx++) {
239 		if (!test_bit(reg_idx, &intr->irq_mask))
240 			continue;
241 
242 		/* Read interrupt status */
243 		irq_status = DPU_REG_READ(&intr->hw, intr->intr_set[reg_idx].status_off);
244 
245 		/* Read enable mask */
246 		enable_mask = DPU_REG_READ(&intr->hw, intr->intr_set[reg_idx].en_off);
247 
248 		/* and clear the interrupt */
249 		if (irq_status)
250 			DPU_REG_WRITE(&intr->hw, intr->intr_set[reg_idx].clr_off,
251 				     irq_status);
252 
253 		/* Finally update IRQ status based on enable mask */
254 		irq_status &= enable_mask;
255 
256 		if (!irq_status)
257 			continue;
258 
259 		/*
260 		 * Search through matching intr status.
261 		 */
262 		while ((bit = ffs(irq_status)) != 0) {
263 			irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
264 
265 			dpu_core_irq_callback_handler(dpu_kms, irq_idx);
266 
267 			/*
268 			 * When callback finish, clear the irq_status
269 			 * with the matching mask. Once irq_status
270 			 * is all cleared, the search can be stopped.
271 			 */
272 			irq_status &= ~BIT(bit - 1);
273 		}
274 	}
275 
276 	/* ensure register writes go through */
277 	wmb();
278 
279 	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
280 
281 	return IRQ_HANDLED;
282 }
283 
284 static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
285 {
286 	int reg_idx;
287 	const struct dpu_intr_reg *reg;
288 	const char *dbgstr = NULL;
289 	uint32_t cache_irq_mask;
290 
291 	if (!intr)
292 		return -EINVAL;
293 
294 	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
295 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
296 		return -EINVAL;
297 	}
298 
299 	/*
300 	 * The cache_irq_mask and hardware RMW operations needs to be done
301 	 * under irq_lock and it's the caller's responsibility to ensure that's
302 	 * held.
303 	 */
304 	assert_spin_locked(&intr->irq_lock);
305 
306 	reg_idx = DPU_IRQ_REG(irq_idx);
307 	reg = &intr->intr_set[reg_idx];
308 
309 	/* Is this interrupt register supported on the platform */
310 	if (WARN_ON(!reg->en_off))
311 		return -EINVAL;
312 
313 	cache_irq_mask = intr->cache_irq_mask[reg_idx];
314 	if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
315 		dbgstr = "already ";
316 	} else {
317 		dbgstr = "";
318 
319 		cache_irq_mask |= DPU_IRQ_MASK(irq_idx);
320 		/* Cleaning any pending interrupt */
321 		DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
322 		/* Enabling interrupts with the new mask */
323 		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
324 
325 		/* ensure register write goes through */
326 		wmb();
327 
328 		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
329 	}
330 
331 	pr_debug("DPU IRQ %d %senabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", irq_idx, dbgstr,
332 			DPU_IRQ_MASK(irq_idx), cache_irq_mask);
333 
334 	return 0;
335 }
336 
337 static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
338 {
339 	int reg_idx;
340 	const struct dpu_intr_reg *reg;
341 	const char *dbgstr = NULL;
342 	uint32_t cache_irq_mask;
343 
344 	if (!intr)
345 		return -EINVAL;
346 
347 	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
348 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
349 		return -EINVAL;
350 	}
351 
352 	/*
353 	 * The cache_irq_mask and hardware RMW operations needs to be done
354 	 * under irq_lock and it's the caller's responsibility to ensure that's
355 	 * held.
356 	 */
357 	assert_spin_locked(&intr->irq_lock);
358 
359 	reg_idx = DPU_IRQ_REG(irq_idx);
360 	reg = &intr->intr_set[reg_idx];
361 
362 	cache_irq_mask = intr->cache_irq_mask[reg_idx];
363 	if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
364 		dbgstr = "already ";
365 	} else {
366 		dbgstr = "";
367 
368 		cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx);
369 		/* Disable interrupts based on the new mask */
370 		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
371 		/* Cleaning any pending interrupt */
372 		DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
373 
374 		/* ensure register write goes through */
375 		wmb();
376 
377 		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
378 	}
379 
380 	pr_debug("DPU IRQ %d %sdisabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", irq_idx, dbgstr,
381 			DPU_IRQ_MASK(irq_idx), cache_irq_mask);
382 
383 	return 0;
384 }
385 
386 static void dpu_clear_irqs(struct dpu_kms *dpu_kms)
387 {
388 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
389 	int i;
390 
391 	if (!intr)
392 		return;
393 
394 	for (i = 0; i < MDP_INTR_MAX; i++) {
395 		if (test_bit(i, &intr->irq_mask))
396 			DPU_REG_WRITE(&intr->hw,
397 					intr->intr_set[i].clr_off, 0xffffffff);
398 	}
399 
400 	/* ensure register writes go through */
401 	wmb();
402 }
403 
404 static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
405 {
406 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
407 	int i;
408 
409 	if (!intr)
410 		return;
411 
412 	for (i = 0; i < MDP_INTR_MAX; i++) {
413 		if (test_bit(i, &intr->irq_mask))
414 			DPU_REG_WRITE(&intr->hw,
415 					intr->intr_set[i].en_off, 0x00000000);
416 	}
417 
418 	/* ensure register writes go through */
419 	wmb();
420 }
421 
422 u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx)
423 {
424 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
425 	int reg_idx;
426 	unsigned long irq_flags;
427 	u32 intr_status;
428 
429 	if (!intr)
430 		return 0;
431 
432 	if (irq_idx < 0) {
433 		DPU_ERROR("[%pS] invalid irq_idx=%d\n",
434 				__builtin_return_address(0), irq_idx);
435 		return 0;
436 	}
437 
438 	if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
439 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
440 		return 0;
441 	}
442 
443 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
444 
445 	reg_idx = DPU_IRQ_REG(irq_idx);
446 	intr_status = DPU_REG_READ(&intr->hw,
447 			intr->intr_set[reg_idx].status_off) &
448 		DPU_IRQ_MASK(irq_idx);
449 	if (intr_status)
450 		DPU_REG_WRITE(&intr->hw, intr->intr_set[reg_idx].clr_off,
451 				intr_status);
452 
453 	/* ensure register writes go through */
454 	wmb();
455 
456 	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
457 
458 	return intr_status;
459 }
460 
461 struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
462 		const struct dpu_mdss_cfg *m)
463 {
464 	struct dpu_hw_intr *intr;
465 	int nirq = MDP_INTR_MAX * 32;
466 	unsigned int i;
467 
468 	if (!addr || !m)
469 		return ERR_PTR(-EINVAL);
470 
471 	intr = kzalloc(struct_size(intr, irq_tbl, nirq), GFP_KERNEL);
472 	if (!intr)
473 		return ERR_PTR(-ENOMEM);
474 
475 	if (m->mdss_ver->core_major_ver >= 7)
476 		intr->intr_set = dpu_intr_set_7xxx;
477 	else
478 		intr->intr_set = dpu_intr_set_legacy;
479 
480 	intr->hw.blk_addr = addr + m->mdp[0].base;
481 
482 	intr->total_irqs = nirq;
483 
484 	intr->irq_mask = BIT(MDP_SSPP_TOP0_INTR) |
485 			 BIT(MDP_SSPP_TOP0_INTR2) |
486 			 BIT(MDP_SSPP_TOP0_HIST_INTR);
487 	for (i = 0; i < m->intf_count; i++) {
488 		const struct dpu_intf_cfg *intf = &m->intf[i];
489 
490 		if (intf->type == INTF_NONE)
491 			continue;
492 
493 		intr->irq_mask |= BIT(MDP_INTFn_INTR(intf->id));
494 
495 		if (intf->intr_tear_rd_ptr != -1)
496 			intr->irq_mask |= BIT(DPU_IRQ_REG(intf->intr_tear_rd_ptr));
497 	}
498 
499 	spin_lock_init(&intr->irq_lock);
500 
501 	return intr;
502 }
503 
504 void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
505 {
506 	kfree(intr);
507 }
508 
509 int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
510 		void (*irq_cb)(void *arg, int irq_idx),
511 		void *irq_arg)
512 {
513 	unsigned long irq_flags;
514 	int ret;
515 
516 	if (!irq_cb) {
517 		DPU_ERROR("invalid ird_idx:%d irq_cb:%ps\n", irq_idx, irq_cb);
518 		return -EINVAL;
519 	}
520 
521 	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
522 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
523 		return -EINVAL;
524 	}
525 
526 	VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
527 
528 	spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
529 
530 	if (unlikely(WARN_ON(dpu_kms->hw_intr->irq_tbl[irq_idx].cb))) {
531 		spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
532 
533 		return -EBUSY;
534 	}
535 
536 	trace_dpu_core_irq_register_callback(irq_idx, irq_cb);
537 	dpu_kms->hw_intr->irq_tbl[irq_idx].arg = irq_arg;
538 	dpu_kms->hw_intr->irq_tbl[irq_idx].cb = irq_cb;
539 
540 	ret = dpu_hw_intr_enable_irq_locked(
541 				dpu_kms->hw_intr,
542 				irq_idx);
543 	if (ret)
544 		DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
545 					irq_idx);
546 	spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
547 
548 	trace_dpu_irq_register_success(irq_idx);
549 
550 	return 0;
551 }
552 
553 int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx)
554 {
555 	unsigned long irq_flags;
556 	int ret;
557 
558 	if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
559 		DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
560 		return -EINVAL;
561 	}
562 
563 	VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
564 
565 	spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
566 	trace_dpu_core_irq_unregister_callback(irq_idx);
567 
568 	ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx);
569 	if (ret)
570 		DPU_ERROR("Fail to disable IRQ for irq_idx:%d: %d\n",
571 					irq_idx, ret);
572 
573 	dpu_kms->hw_intr->irq_tbl[irq_idx].cb = NULL;
574 	dpu_kms->hw_intr->irq_tbl[irq_idx].arg = NULL;
575 
576 	spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
577 
578 	trace_dpu_irq_unregister_success(irq_idx);
579 
580 	return 0;
581 }
582 
583 #ifdef CONFIG_DEBUG_FS
584 static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
585 {
586 	struct dpu_kms *dpu_kms = s->private;
587 	unsigned long irq_flags;
588 	int i, irq_count;
589 	void *cb;
590 
591 	for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
592 		spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
593 		irq_count = atomic_read(&dpu_kms->hw_intr->irq_tbl[i].count);
594 		cb = dpu_kms->hw_intr->irq_tbl[i].cb;
595 		spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
596 
597 		if (irq_count || cb)
598 			seq_printf(s, "idx:%d irq:%d cb:%ps\n", i, irq_count, cb);
599 	}
600 
601 	return 0;
602 }
603 
604 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
605 
606 void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
607 		struct dentry *parent)
608 {
609 	debugfs_create_file("core_irq", 0600, parent, dpu_kms,
610 		&dpu_debugfs_core_irq_fops);
611 }
612 #endif
613 
614 void dpu_core_irq_preinstall(struct msm_kms *kms)
615 {
616 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
617 	int i;
618 
619 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
620 	dpu_clear_irqs(dpu_kms);
621 	dpu_disable_all_irqs(dpu_kms);
622 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
623 
624 	for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
625 		atomic_set(&dpu_kms->hw_intr->irq_tbl[i].count, 0);
626 }
627 
628 void dpu_core_irq_uninstall(struct msm_kms *kms)
629 {
630 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
631 	int i;
632 
633 	if (!dpu_kms->hw_intr)
634 		return;
635 
636 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
637 	for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
638 		if (dpu_kms->hw_intr->irq_tbl[i].cb)
639 			DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
640 
641 	dpu_clear_irqs(dpu_kms);
642 	dpu_disable_all_irqs(dpu_kms);
643 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
644 }
645