xref: /linux/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c (revision ff124bbbca1d3a07fa1392ffdbbdeece71f68ece)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
3  */
4 
5 #include <linux/bitops.h>
6 #include <linux/debugfs.h>
7 #include <linux/slab.h>
8 
9 #include <drm/drm_managed.h>
10 
11 #include "dpu_core_irq.h"
12 #include "dpu_kms.h"
13 #include "dpu_hw_interrupts.h"
14 #include "dpu_hw_util.h"
15 #include "dpu_hw_mdss.h"
16 #include "dpu_trace.h"
17 
18 /*
19  * Register offsets in MDSS register file for the interrupt registers
20  * w.r.t. the MDP base
21  */
22 #define MDP_INTF_OFF(intf)				(0x6A000 + 0x800 * (intf))
23 #define MDP_INTF_INTR_EN(intf)				(MDP_INTF_OFF(intf) + 0x1c0)
24 #define MDP_INTF_INTR_STATUS(intf)			(MDP_INTF_OFF(intf) + 0x1c4)
25 #define MDP_INTF_INTR_CLEAR(intf)			(MDP_INTF_OFF(intf) + 0x1c8)
26 #define MDP_INTF_TEAR_OFF(intf)				(0x6D700 + 0x100 * (intf))
27 #define MDP_INTF_INTR_TEAR_EN(intf)			(MDP_INTF_TEAR_OFF(intf) + 0x000)
28 #define MDP_INTF_INTR_TEAR_STATUS(intf)			(MDP_INTF_TEAR_OFF(intf) + 0x004)
29 #define MDP_INTF_INTR_TEAR_CLEAR(intf)			(MDP_INTF_TEAR_OFF(intf) + 0x008)
30 #define MDP_AD4_OFF(ad4)				(0x7C000 + 0x1000 * (ad4))
31 #define MDP_AD4_INTR_EN_OFF(ad4)			(MDP_AD4_OFF(ad4) + 0x41c)
32 #define MDP_AD4_INTR_CLEAR_OFF(ad4)			(MDP_AD4_OFF(ad4) + 0x424)
33 #define MDP_AD4_INTR_STATUS_OFF(ad4)			(MDP_AD4_OFF(ad4) + 0x420)
34 #define MDP_INTF_REV_7xxx_OFF(intf)			(0x34000 + 0x1000 * (intf))
35 #define MDP_INTF_REV_7xxx_INTR_EN(intf)			(MDP_INTF_REV_7xxx_OFF(intf) + 0x1c0)
36 #define MDP_INTF_REV_7xxx_INTR_STATUS(intf)		(MDP_INTF_REV_7xxx_OFF(intf) + 0x1c4)
37 #define MDP_INTF_REV_7xxx_INTR_CLEAR(intf)		(MDP_INTF_REV_7xxx_OFF(intf) + 0x1c8)
38 #define MDP_INTF_REV_7xxx_TEAR_OFF(intf)		(0x34800 + 0x1000 * (intf))
39 #define MDP_INTF_REV_7xxx_INTR_TEAR_EN(intf)		(MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x000)
40 #define MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(intf)	(MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x004)
41 #define MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(intf)		(MDP_INTF_REV_7xxx_TEAR_OFF(intf) + 0x008)
42 
43 #define MDP_INTF_REV_13xx_OFF(intf)			(0x18d000 + 0x1000 * (intf))
44 #define MDP_INTF_REV_13xx_INTR_EN(intf)			(MDP_INTF_REV_13xx_OFF(intf) + 0x1c0)
45 #define MDP_INTF_REV_13xx_INTR_STATUS(intf)		(MDP_INTF_REV_13xx_OFF(intf) + 0x1c4)
46 #define MDP_INTF_REV_13xx_INTR_CLEAR(intf)		(MDP_INTF_REV_13xx_OFF(intf) + 0x1c8)
47 #define MDP_INTF_REV_13xx_TEAR_OFF(intf)		(0x18d800 + 0x1000 * (intf))
48 #define MDP_INTF_REV_13xx_INTR_TEAR_EN(intf)		(MDP_INTF_REV_13xx_TEAR_OFF(intf) + 0x000)
49 #define MDP_INTF_REV_13xx_INTR_TEAR_STATUS(intf)	(MDP_INTF_REV_13xx_TEAR_OFF(intf) + 0x004)
50 #define MDP_INTF_REV_13xx_INTR_TEAR_CLEAR(intf)		(MDP_INTF_REV_13xx_TEAR_OFF(intf) + 0x008)
51 
52 /**
53  * struct dpu_intr_reg - array of DPU register sets
54  * @clr_off:	offset to CLEAR reg
55  * @en_off:	offset to ENABLE reg
56  * @status_off:	offset to STATUS reg
57  */
58 struct dpu_intr_reg {
59 	u32 clr_off;
60 	u32 en_off;
61 	u32 status_off;
62 };
63 
64 /*
65  * dpu_intr_set_legacy -  List of DPU interrupt registers for DPU <= 6.x
66  */
67 static const struct dpu_intr_reg dpu_intr_set_legacy[] = {
68 	[MDP_SSPP_TOP0_INTR] = {
69 		INTR_CLEAR,
70 		INTR_EN,
71 		INTR_STATUS
72 	},
73 	[MDP_SSPP_TOP0_INTR2] = {
74 		INTR2_CLEAR,
75 		INTR2_EN,
76 		INTR2_STATUS
77 	},
78 	[MDP_SSPP_TOP0_HIST_INTR] = {
79 		HIST_INTR_CLEAR,
80 		HIST_INTR_EN,
81 		HIST_INTR_STATUS
82 	},
83 	[MDP_INTF0_INTR] = {
84 		MDP_INTF_INTR_CLEAR(0),
85 		MDP_INTF_INTR_EN(0),
86 		MDP_INTF_INTR_STATUS(0)
87 	},
88 	[MDP_INTF1_INTR] = {
89 		MDP_INTF_INTR_CLEAR(1),
90 		MDP_INTF_INTR_EN(1),
91 		MDP_INTF_INTR_STATUS(1)
92 	},
93 	[MDP_INTF2_INTR] = {
94 		MDP_INTF_INTR_CLEAR(2),
95 		MDP_INTF_INTR_EN(2),
96 		MDP_INTF_INTR_STATUS(2)
97 	},
98 	[MDP_INTF3_INTR] = {
99 		MDP_INTF_INTR_CLEAR(3),
100 		MDP_INTF_INTR_EN(3),
101 		MDP_INTF_INTR_STATUS(3)
102 	},
103 	[MDP_INTF4_INTR] = {
104 		MDP_INTF_INTR_CLEAR(4),
105 		MDP_INTF_INTR_EN(4),
106 		MDP_INTF_INTR_STATUS(4)
107 	},
108 	[MDP_INTF5_INTR] = {
109 		MDP_INTF_INTR_CLEAR(5),
110 		MDP_INTF_INTR_EN(5),
111 		MDP_INTF_INTR_STATUS(5)
112 	},
113 	[MDP_INTF1_TEAR_INTR] = {
114 		MDP_INTF_INTR_TEAR_CLEAR(1),
115 		MDP_INTF_INTR_TEAR_EN(1),
116 		MDP_INTF_INTR_TEAR_STATUS(1)
117 	},
118 	[MDP_INTF2_TEAR_INTR] = {
119 		MDP_INTF_INTR_TEAR_CLEAR(2),
120 		MDP_INTF_INTR_TEAR_EN(2),
121 		MDP_INTF_INTR_TEAR_STATUS(2)
122 	},
123 	[MDP_AD4_0_INTR] = {
124 		MDP_AD4_INTR_CLEAR_OFF(0),
125 		MDP_AD4_INTR_EN_OFF(0),
126 		MDP_AD4_INTR_STATUS_OFF(0),
127 	},
128 	[MDP_AD4_1_INTR] = {
129 		MDP_AD4_INTR_CLEAR_OFF(1),
130 		MDP_AD4_INTR_EN_OFF(1),
131 		MDP_AD4_INTR_STATUS_OFF(1),
132 	},
133 };
134 
135 /*
136  * dpu_intr_set_7xxx -  List of DPU interrupt registers for DPU >= 7.0
137  */
138 static const struct dpu_intr_reg dpu_intr_set_7xxx[] = {
139 	[MDP_SSPP_TOP0_INTR] = {
140 		INTR_CLEAR,
141 		INTR_EN,
142 		INTR_STATUS
143 	},
144 	[MDP_SSPP_TOP0_INTR2] = {
145 		INTR2_CLEAR,
146 		INTR2_EN,
147 		INTR2_STATUS
148 	},
149 	[MDP_SSPP_TOP0_HIST_INTR] = {
150 		HIST_INTR_CLEAR,
151 		HIST_INTR_EN,
152 		HIST_INTR_STATUS
153 	},
154 	[MDP_INTF0_INTR] = {
155 		MDP_INTF_REV_7xxx_INTR_CLEAR(0),
156 		MDP_INTF_REV_7xxx_INTR_EN(0),
157 		MDP_INTF_REV_7xxx_INTR_STATUS(0)
158 	},
159 	[MDP_INTF1_INTR] = {
160 		MDP_INTF_REV_7xxx_INTR_CLEAR(1),
161 		MDP_INTF_REV_7xxx_INTR_EN(1),
162 		MDP_INTF_REV_7xxx_INTR_STATUS(1)
163 	},
164 	[MDP_INTF1_TEAR_INTR] = {
165 		MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(1),
166 		MDP_INTF_REV_7xxx_INTR_TEAR_EN(1),
167 		MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(1)
168 	},
169 	[MDP_INTF2_INTR] = {
170 		MDP_INTF_REV_7xxx_INTR_CLEAR(2),
171 		MDP_INTF_REV_7xxx_INTR_EN(2),
172 		MDP_INTF_REV_7xxx_INTR_STATUS(2)
173 	},
174 	[MDP_INTF2_TEAR_INTR] = {
175 		MDP_INTF_REV_7xxx_INTR_TEAR_CLEAR(2),
176 		MDP_INTF_REV_7xxx_INTR_TEAR_EN(2),
177 		MDP_INTF_REV_7xxx_INTR_TEAR_STATUS(2)
178 	},
179 	[MDP_INTF3_INTR] = {
180 		MDP_INTF_REV_7xxx_INTR_CLEAR(3),
181 		MDP_INTF_REV_7xxx_INTR_EN(3),
182 		MDP_INTF_REV_7xxx_INTR_STATUS(3)
183 	},
184 	[MDP_INTF4_INTR] = {
185 		MDP_INTF_REV_7xxx_INTR_CLEAR(4),
186 		MDP_INTF_REV_7xxx_INTR_EN(4),
187 		MDP_INTF_REV_7xxx_INTR_STATUS(4)
188 	},
189 	[MDP_INTF5_INTR] = {
190 		MDP_INTF_REV_7xxx_INTR_CLEAR(5),
191 		MDP_INTF_REV_7xxx_INTR_EN(5),
192 		MDP_INTF_REV_7xxx_INTR_STATUS(5)
193 	},
194 	[MDP_INTF6_INTR] = {
195 		MDP_INTF_REV_7xxx_INTR_CLEAR(6),
196 		MDP_INTF_REV_7xxx_INTR_EN(6),
197 		MDP_INTF_REV_7xxx_INTR_STATUS(6)
198 	},
199 	[MDP_INTF7_INTR] = {
200 		MDP_INTF_REV_7xxx_INTR_CLEAR(7),
201 		MDP_INTF_REV_7xxx_INTR_EN(7),
202 		MDP_INTF_REV_7xxx_INTR_STATUS(7)
203 	},
204 	[MDP_INTF8_INTR] = {
205 		MDP_INTF_REV_7xxx_INTR_CLEAR(8),
206 		MDP_INTF_REV_7xxx_INTR_EN(8),
207 		MDP_INTF_REV_7xxx_INTR_STATUS(8)
208 	},
209 };
210 
211 /*
212  * dpu_intr_set_13xx -  List of DPU interrupt registers for DPU >= 13.0
213  */
214 static const struct dpu_intr_reg dpu_intr_set_13xx[] = {
215 	[MDP_SSPP_TOP0_INTR] = {
216 		INTR_CLEAR,
217 		INTR_EN,
218 		INTR_STATUS
219 	},
220 	[MDP_SSPP_TOP0_INTR2] = {
221 		INTR2_CLEAR,
222 		INTR2_EN,
223 		INTR2_STATUS
224 	},
225 	[MDP_SSPP_TOP0_HIST_INTR] = {
226 		HIST_INTR_CLEAR,
227 		HIST_INTR_EN,
228 		HIST_INTR_STATUS
229 	},
230 	[MDP_INTF0_INTR] = {
231 		MDP_INTF_REV_13xx_INTR_CLEAR(0),
232 		MDP_INTF_REV_13xx_INTR_EN(0),
233 		MDP_INTF_REV_13xx_INTR_STATUS(0)
234 	},
235 	[MDP_INTF1_INTR] = {
236 		MDP_INTF_REV_13xx_INTR_CLEAR(1),
237 		MDP_INTF_REV_13xx_INTR_EN(1),
238 		MDP_INTF_REV_13xx_INTR_STATUS(1)
239 	},
240 	[MDP_INTF1_TEAR_INTR] = {
241 		MDP_INTF_REV_13xx_INTR_TEAR_CLEAR(1),
242 		MDP_INTF_REV_13xx_INTR_TEAR_EN(1),
243 		MDP_INTF_REV_13xx_INTR_TEAR_STATUS(1)
244 	},
245 	[MDP_INTF2_INTR] = {
246 		MDP_INTF_REV_13xx_INTR_CLEAR(2),
247 		MDP_INTF_REV_13xx_INTR_EN(2),
248 		MDP_INTF_REV_13xx_INTR_STATUS(2)
249 	},
250 	[MDP_INTF2_TEAR_INTR] = {
251 		MDP_INTF_REV_13xx_INTR_TEAR_CLEAR(2),
252 		MDP_INTF_REV_13xx_INTR_TEAR_EN(2),
253 		MDP_INTF_REV_13xx_INTR_TEAR_STATUS(2)
254 	},
255 	[MDP_INTF3_INTR] = {
256 		MDP_INTF_REV_13xx_INTR_CLEAR(3),
257 		MDP_INTF_REV_13xx_INTR_EN(3),
258 		MDP_INTF_REV_13xx_INTR_STATUS(3)
259 	},
260 	[MDP_INTF4_INTR] = {
261 		MDP_INTF_REV_13xx_INTR_CLEAR(4),
262 		MDP_INTF_REV_13xx_INTR_EN(4),
263 		MDP_INTF_REV_13xx_INTR_STATUS(4)
264 	},
265 	[MDP_INTF5_INTR] = {
266 		MDP_INTF_REV_13xx_INTR_CLEAR(5),
267 		MDP_INTF_REV_13xx_INTR_EN(5),
268 		MDP_INTF_REV_13xx_INTR_STATUS(5)
269 	},
270 	[MDP_INTF6_INTR] = {
271 		MDP_INTF_REV_13xx_INTR_CLEAR(6),
272 		MDP_INTF_REV_13xx_INTR_EN(6),
273 		MDP_INTF_REV_13xx_INTR_STATUS(6)
274 	},
275 	[MDP_INTF7_INTR] = {
276 		MDP_INTF_REV_13xx_INTR_CLEAR(7),
277 		MDP_INTF_REV_13xx_INTR_EN(7),
278 		MDP_INTF_REV_13xx_INTR_STATUS(7)
279 	},
280 	[MDP_INTF8_INTR] = {
281 		MDP_INTF_REV_13xx_INTR_CLEAR(8),
282 		MDP_INTF_REV_13xx_INTR_EN(8),
283 		MDP_INTF_REV_13xx_INTR_STATUS(8)
284 	},
285 };
286 
287 #define DPU_IRQ_MASK(irq_idx)	(BIT(DPU_IRQ_BIT(irq_idx)))
288 
289 static inline bool dpu_core_irq_is_valid(unsigned int irq_idx)
290 {
291 	return irq_idx && irq_idx <= DPU_NUM_IRQS;
292 }
293 
294 static inline struct dpu_hw_intr_entry *dpu_core_irq_get_entry(struct dpu_hw_intr *intr,
295 							       unsigned int irq_idx)
296 {
297 	return &intr->irq_tbl[irq_idx - 1];
298 }
299 
300 /**
301  * dpu_core_irq_callback_handler - dispatch core interrupts
302  * @dpu_kms:		Pointer to DPU's KMS structure
303  * @irq_idx:		interrupt index
304  */
305 static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, unsigned int irq_idx)
306 {
307 	struct dpu_hw_intr_entry *irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
308 
309 	VERB("IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
310 
311 	if (!irq_entry->cb) {
312 		DRM_ERROR("no registered cb, IRQ=[%d, %d]\n",
313 			  DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
314 		return;
315 	}
316 
317 	atomic_inc(&irq_entry->count);
318 
319 	/*
320 	 * Perform registered function callback
321 	 */
322 	irq_entry->cb(irq_entry->arg);
323 }
324 
325 /**
326  * dpu_core_irq - core IRQ handler
327  * @kms:		MSM KMS handle
328  * @return:		interrupt handling status
329  */
330 irqreturn_t dpu_core_irq(struct msm_kms *kms)
331 {
332 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
333 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
334 	int reg_idx;
335 	unsigned int irq_idx;
336 	u32 irq_status;
337 	u32 enable_mask;
338 	int bit;
339 	unsigned long irq_flags;
340 
341 	if (!intr)
342 		return IRQ_NONE;
343 
344 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
345 	for (reg_idx = 0; reg_idx < MDP_INTR_MAX; reg_idx++) {
346 		if (!test_bit(reg_idx, &intr->irq_mask))
347 			continue;
348 
349 		/* Read interrupt status */
350 		irq_status = DPU_REG_READ(&intr->hw, intr->intr_set[reg_idx].status_off);
351 
352 		/* Read enable mask */
353 		enable_mask = DPU_REG_READ(&intr->hw, intr->intr_set[reg_idx].en_off);
354 
355 		/* and clear the interrupt */
356 		if (irq_status)
357 			DPU_REG_WRITE(&intr->hw, intr->intr_set[reg_idx].clr_off,
358 				     irq_status);
359 
360 		/* Finally update IRQ status based on enable mask */
361 		irq_status &= enable_mask;
362 
363 		if (!irq_status)
364 			continue;
365 
366 		/*
367 		 * Search through matching intr status.
368 		 */
369 		while ((bit = ffs(irq_status)) != 0) {
370 			irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
371 
372 			dpu_core_irq_callback_handler(dpu_kms, irq_idx);
373 
374 			/*
375 			 * When callback finish, clear the irq_status
376 			 * with the matching mask. Once irq_status
377 			 * is all cleared, the search can be stopped.
378 			 */
379 			irq_status &= ~BIT(bit - 1);
380 		}
381 	}
382 
383 	/* ensure register writes go through */
384 	wmb();
385 
386 	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
387 
388 	return IRQ_HANDLED;
389 }
390 
391 static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr,
392 					 unsigned int irq_idx)
393 {
394 	int reg_idx;
395 	const struct dpu_intr_reg *reg;
396 	const char *dbgstr = NULL;
397 	uint32_t cache_irq_mask;
398 
399 	if (!intr)
400 		return -EINVAL;
401 
402 	if (!dpu_core_irq_is_valid(irq_idx)) {
403 		pr_err("invalid IRQ=[%d, %d]\n",
404 		       DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
405 		return -EINVAL;
406 	}
407 
408 	/*
409 	 * The cache_irq_mask and hardware RMW operations needs to be done
410 	 * under irq_lock and it's the caller's responsibility to ensure that's
411 	 * held.
412 	 */
413 	assert_spin_locked(&intr->irq_lock);
414 
415 	reg_idx = DPU_IRQ_REG(irq_idx);
416 	reg = &intr->intr_set[reg_idx];
417 
418 	/* Is this interrupt register supported on the platform */
419 	if (WARN_ON(!reg->en_off))
420 		return -EINVAL;
421 
422 	cache_irq_mask = intr->cache_irq_mask[reg_idx];
423 	if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
424 		dbgstr = "already ";
425 	} else {
426 		dbgstr = "";
427 
428 		cache_irq_mask |= DPU_IRQ_MASK(irq_idx);
429 		/* Cleaning any pending interrupt */
430 		DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
431 		/* Enabling interrupts with the new mask */
432 		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
433 
434 		/* ensure register write goes through */
435 		wmb();
436 
437 		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
438 	}
439 
440 	pr_debug("DPU IRQ=[%d, %d] %senabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n",
441 		 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), dbgstr,
442 			DPU_IRQ_MASK(irq_idx), cache_irq_mask);
443 
444 	return 0;
445 }
446 
447 static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr,
448 					  unsigned int irq_idx)
449 {
450 	int reg_idx;
451 	const struct dpu_intr_reg *reg;
452 	const char *dbgstr = NULL;
453 	uint32_t cache_irq_mask;
454 
455 	if (!intr)
456 		return -EINVAL;
457 
458 	if (!dpu_core_irq_is_valid(irq_idx)) {
459 		pr_err("invalid IRQ=[%d, %d]\n",
460 		       DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
461 		return -EINVAL;
462 	}
463 
464 	/*
465 	 * The cache_irq_mask and hardware RMW operations needs to be done
466 	 * under irq_lock and it's the caller's responsibility to ensure that's
467 	 * held.
468 	 */
469 	assert_spin_locked(&intr->irq_lock);
470 
471 	reg_idx = DPU_IRQ_REG(irq_idx);
472 	reg = &intr->intr_set[reg_idx];
473 
474 	cache_irq_mask = intr->cache_irq_mask[reg_idx];
475 	if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
476 		dbgstr = "already ";
477 	} else {
478 		dbgstr = "";
479 
480 		cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx);
481 		/* Disable interrupts based on the new mask */
482 		DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
483 		/* Cleaning any pending interrupt */
484 		DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
485 
486 		/* ensure register write goes through */
487 		wmb();
488 
489 		intr->cache_irq_mask[reg_idx] = cache_irq_mask;
490 	}
491 
492 	pr_debug("DPU IRQ=[%d, %d] %sdisabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n",
493 		 DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), dbgstr,
494 			DPU_IRQ_MASK(irq_idx), cache_irq_mask);
495 
496 	return 0;
497 }
498 
499 static void dpu_clear_irqs(struct dpu_kms *dpu_kms)
500 {
501 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
502 	int i;
503 
504 	if (!intr)
505 		return;
506 
507 	for (i = 0; i < MDP_INTR_MAX; i++) {
508 		if (test_bit(i, &intr->irq_mask))
509 			DPU_REG_WRITE(&intr->hw,
510 					intr->intr_set[i].clr_off, 0xffffffff);
511 	}
512 
513 	/* ensure register writes go through */
514 	wmb();
515 }
516 
517 static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
518 {
519 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
520 	int i;
521 
522 	if (!intr)
523 		return;
524 
525 	for (i = 0; i < MDP_INTR_MAX; i++) {
526 		if (test_bit(i, &intr->irq_mask))
527 			DPU_REG_WRITE(&intr->hw,
528 					intr->intr_set[i].en_off, 0x00000000);
529 	}
530 
531 	/* ensure register writes go through */
532 	wmb();
533 }
534 
535 /**
536  * dpu_core_irq_read - IRQ helper function for reading IRQ status
537  * @dpu_kms:		DPU handle
538  * @irq_idx:		irq index
539  * @return:		non-zero if irq detected; otherwise no irq detected
540  */
541 u32 dpu_core_irq_read(struct dpu_kms *dpu_kms,
542 		      unsigned int irq_idx)
543 {
544 	struct dpu_hw_intr *intr = dpu_kms->hw_intr;
545 	int reg_idx;
546 	unsigned long irq_flags;
547 	u32 intr_status;
548 
549 	if (!intr)
550 		return 0;
551 
552 	if (!dpu_core_irq_is_valid(irq_idx)) {
553 		pr_err("invalid IRQ=[%d, %d]\n", DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
554 		return 0;
555 	}
556 
557 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
558 
559 	reg_idx = DPU_IRQ_REG(irq_idx);
560 	intr_status = DPU_REG_READ(&intr->hw,
561 			intr->intr_set[reg_idx].status_off) &
562 		DPU_IRQ_MASK(irq_idx);
563 	if (intr_status)
564 		DPU_REG_WRITE(&intr->hw, intr->intr_set[reg_idx].clr_off,
565 				intr_status);
566 
567 	/* ensure register writes go through */
568 	wmb();
569 
570 	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
571 
572 	return intr_status;
573 }
574 
575 /**
576  * dpu_hw_intr_init(): Initializes the interrupts hw object
577  * @dev:  Corresponding device for devres management
578  * @addr: mapped register io address of MDP
579  * @m:    pointer to MDSS catalog data
580  */
581 struct dpu_hw_intr *dpu_hw_intr_init(struct drm_device *dev,
582 				     void __iomem *addr,
583 				     const struct dpu_mdss_cfg *m)
584 {
585 	struct dpu_hw_intr *intr;
586 	unsigned int i;
587 
588 	if (!addr || !m)
589 		return ERR_PTR(-EINVAL);
590 
591 	intr = drmm_kzalloc(dev, sizeof(*intr), GFP_KERNEL);
592 	if (!intr)
593 		return ERR_PTR(-ENOMEM);
594 
595 	if (m->mdss_ver->core_major_ver >= 13)
596 		intr->intr_set = dpu_intr_set_13xx;
597 	else if (m->mdss_ver->core_major_ver >= 7)
598 		intr->intr_set = dpu_intr_set_7xxx;
599 	else
600 		intr->intr_set = dpu_intr_set_legacy;
601 
602 	intr->hw.blk_addr = addr + m->mdp[0].base;
603 
604 	intr->irq_mask = BIT(MDP_SSPP_TOP0_INTR) |
605 			 BIT(MDP_SSPP_TOP0_INTR2) |
606 			 BIT(MDP_SSPP_TOP0_HIST_INTR);
607 	for (i = 0; i < m->intf_count; i++) {
608 		const struct dpu_intf_cfg *intf = &m->intf[i];
609 
610 		if (intf->type == INTF_NONE)
611 			continue;
612 
613 		intr->irq_mask |= BIT(MDP_INTFn_INTR(intf->id));
614 
615 		if (intf->intr_tear_rd_ptr)
616 			intr->irq_mask |= BIT(DPU_IRQ_REG(intf->intr_tear_rd_ptr));
617 	}
618 
619 	spin_lock_init(&intr->irq_lock);
620 
621 	return intr;
622 }
623 
624 /**
625  * dpu_core_irq_register_callback - For registering callback function on IRQ
626  *                             interrupt
627  * @dpu_kms:		DPU handle
628  * @irq_idx:		irq index
629  * @irq_cb:		IRQ callback function.
630  * @irq_arg:		IRQ callback argument.
631  * @return:		0 for success registering callback, otherwise failure
632  *
633  * This function supports registration of multiple callbacks for each interrupt.
634  */
635 int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
636 				   unsigned int irq_idx,
637 				   void (*irq_cb)(void *arg),
638 				   void *irq_arg)
639 {
640 	struct dpu_hw_intr_entry *irq_entry;
641 	unsigned long irq_flags;
642 	int ret;
643 
644 	if (!irq_cb) {
645 		DPU_ERROR("IRQ=[%d, %d] NULL callback\n",
646 			  DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
647 		return -EINVAL;
648 	}
649 
650 	if (!dpu_core_irq_is_valid(irq_idx)) {
651 		DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
652 			  DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
653 		return -EINVAL;
654 	}
655 
656 	VERB("[%pS] IRQ=[%d, %d]\n", __builtin_return_address(0),
657 	     DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
658 
659 	spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
660 
661 	irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
662 	if (unlikely(WARN_ON(irq_entry->cb))) {
663 		spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
664 
665 		return -EBUSY;
666 	}
667 
668 	trace_dpu_core_irq_register_callback(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
669 	irq_entry->arg = irq_arg;
670 	irq_entry->cb = irq_cb;
671 
672 	ret = dpu_hw_intr_enable_irq_locked(
673 				dpu_kms->hw_intr,
674 				irq_idx);
675 	if (ret)
676 		DPU_ERROR("Failed/ to enable IRQ=[%d, %d]\n",
677 			  DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
678 	spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
679 
680 	trace_dpu_irq_register_success(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
681 
682 	return 0;
683 }
684 
685 /**
686  * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ
687  *                             interrupt
688  * @dpu_kms:		DPU handle
689  * @irq_idx:		irq index
690  * @return:		0 for success registering callback, otherwise failure
691  *
692  * This function supports registration of multiple callbacks for each interrupt.
693  */
694 int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms,
695 				     unsigned int irq_idx)
696 {
697 	struct dpu_hw_intr_entry *irq_entry;
698 	unsigned long irq_flags;
699 	int ret;
700 
701 	if (!dpu_core_irq_is_valid(irq_idx)) {
702 		DPU_ERROR("invalid IRQ=[%d, %d]\n",
703 			  DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
704 		return -EINVAL;
705 	}
706 
707 	VERB("[%pS] IRQ=[%d, %d]\n", __builtin_return_address(0),
708 	     DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
709 
710 	spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
711 	trace_dpu_core_irq_unregister_callback(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
712 
713 	ret = dpu_hw_intr_disable_irq_locked(dpu_kms->hw_intr, irq_idx);
714 	if (ret)
715 		DPU_ERROR("Failed to disable IRQ=[%d, %d]: %d\n",
716 			  DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), ret);
717 
718 	irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, irq_idx);
719 	irq_entry->cb = NULL;
720 	irq_entry->arg = NULL;
721 
722 	spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
723 
724 	trace_dpu_irq_unregister_success(DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
725 
726 	return 0;
727 }
728 
729 #ifdef CONFIG_DEBUG_FS
730 static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
731 {
732 	struct dpu_kms *dpu_kms = s->private;
733 	struct dpu_hw_intr_entry *irq_entry;
734 	unsigned long irq_flags;
735 	int i, irq_count;
736 	void *cb;
737 
738 	for (i = 1; i <= DPU_NUM_IRQS; i++) {
739 		spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
740 		irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
741 		irq_count = atomic_read(&irq_entry->count);
742 		cb = irq_entry->cb;
743 		spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
744 
745 		if (irq_count || cb)
746 			seq_printf(s, "IRQ=[%d, %d] count:%d cb:%ps\n",
747 				   DPU_IRQ_REG(i), DPU_IRQ_BIT(i), irq_count, cb);
748 	}
749 
750 	return 0;
751 }
752 
753 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
754 
755 /**
756  * dpu_debugfs_core_irq_init - register core irq debugfs
757  * @dpu_kms: pointer to kms
758  * @parent: debugfs directory root
759  */
760 void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
761 		struct dentry *parent)
762 {
763 	debugfs_create_file("core_irq", 0600, parent, dpu_kms,
764 		&dpu_debugfs_core_irq_fops);
765 }
766 #endif
767 
768 /**
769  * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler
770  * @kms:		MSM KMS handle
771  * @return:		none
772  */
773 void dpu_core_irq_preinstall(struct msm_kms *kms)
774 {
775 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
776 	struct dpu_hw_intr_entry *irq_entry;
777 	int i;
778 
779 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
780 	dpu_clear_irqs(dpu_kms);
781 	dpu_disable_all_irqs(dpu_kms);
782 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
783 
784 	for (i = 1; i <= DPU_NUM_IRQS; i++) {
785 		irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
786 		atomic_set(&irq_entry->count, 0);
787 	}
788 }
789 
790 /**
791  * dpu_core_irq_uninstall - uninstall core IRQ handler
792  * @kms:		MSM KMS handle
793  * @return:		none
794  */
795 void dpu_core_irq_uninstall(struct msm_kms *kms)
796 {
797 	struct dpu_kms *dpu_kms = to_dpu_kms(kms);
798 	struct dpu_hw_intr_entry *irq_entry;
799 	int i;
800 
801 	if (!dpu_kms->hw_intr)
802 		return;
803 
804 	pm_runtime_get_sync(&dpu_kms->pdev->dev);
805 	for (i = 1; i <= DPU_NUM_IRQS; i++) {
806 		irq_entry = dpu_core_irq_get_entry(dpu_kms->hw_intr, i);
807 		if (irq_entry->cb)
808 			DPU_ERROR("IRQ=[%d, %d] still enabled/registered\n",
809 				  DPU_IRQ_REG(i), DPU_IRQ_BIT(i));
810 	}
811 
812 	dpu_clear_irqs(dpu_kms);
813 	dpu_disable_all_irqs(dpu_kms);
814 	pm_runtime_put_sync(&dpu_kms->pdev->dev);
815 }
816