xref: /linux/drivers/gpu/host1x/hw/intr_hw.c (revision 3f1c07fc21c68bd3bd2df9d2c9441f6485e934d9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Tegra host1x Interrupt Management
4  *
5  * Copyright (C) 2010 Google, Inc.
6  * Copyright (c) 2010-2013, NVIDIA Corporation.
7  */
8 
9 #include <linux/io.h>
10 
11 #include "../intr.h"
12 #include "../dev.h"
13 
process_32_syncpts(struct host1x * host,unsigned long val,u32 reg_offset)14 static void process_32_syncpts(struct host1x *host, unsigned long val, u32 reg_offset)
15 {
16 	unsigned int id;
17 
18 	if (!val)
19 		return;
20 
21 	host1x_sync_writel(host, val, HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(reg_offset));
22 	host1x_sync_writel(host, val, HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(reg_offset));
23 
24 	for_each_set_bit(id, &val, 32)
25 		host1x_intr_handle_interrupt(host, reg_offset * 32 + id);
26 }
27 
syncpt_thresh_isr(int irq,void * dev_id)28 static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
29 {
30 	struct host1x_intr_irq_data *irq_data = dev_id;
31 	struct host1x *host = irq_data->host;
32 	unsigned long reg;
33 	unsigned int i;
34 
35 #if !defined(CONFIG_64BIT)
36 	for (i = irq_data->offset; i < DIV_ROUND_UP(host->info->nb_pts, 32);
37 	     i += host->num_syncpt_irqs) {
38 		reg = host1x_sync_readl(host,
39 			HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
40 
41 		process_32_syncpts(host, reg, i);
42 	}
43 #elif HOST1X_HW == 6 || HOST1X_HW == 7
44 	/*
45 	 * Tegra186 and Tegra194 have the first INT_STATUS register not 64-bit aligned,
46 	 * and only have one interrupt line.
47 	 */
48 	reg = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(0));
49 	process_32_syncpts(host, reg, 0);
50 
51 	for (i = 1; i < (host->info->nb_pts / 32) - 1; i += 2) {
52 		reg = host1x_sync_readq(host,
53 			HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
54 
55 		process_32_syncpts(host, lower_32_bits(reg), i);
56 		process_32_syncpts(host, upper_32_bits(reg), i + 1);
57 	}
58 
59 	reg = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
60 	process_32_syncpts(host, reg, i);
61 #else
62 	/* All 64-bit capable SoCs have number of syncpoints divisible by 64 */
63 	for (i = irq_data->offset; i < DIV_ROUND_UP(host->info->nb_pts, 64);
64 	     i += host->num_syncpt_irqs) {
65 		reg = host1x_sync_readq(host,
66 			HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i * 2));
67 
68 		process_32_syncpts(host, lower_32_bits(reg), i * 2 + 0);
69 		process_32_syncpts(host, upper_32_bits(reg), i * 2 + 1);
70 	}
71 #endif
72 
73 	return IRQ_HANDLED;
74 }
75 
host1x_intr_disable_all_syncpt_intrs(struct host1x * host)76 static void host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
77 {
78 	unsigned int i;
79 
80 	for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); ++i) {
81 		host1x_sync_writel(host, 0xffffffffu,
82 			HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i));
83 		host1x_sync_writel(host, 0xffffffffu,
84 			HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
85 	}
86 }
87 
88 static int
host1x_intr_init_host_sync(struct host1x * host,u32 cpm)89 host1x_intr_init_host_sync(struct host1x *host, u32 cpm)
90 {
91 #if HOST1X_HW < 6
92 	/* disable the ip_busy_timeout. this prevents write drops */
93 	host1x_sync_writel(host, 0, HOST1X_SYNC_IP_BUSY_TIMEOUT);
94 
95 	/*
96 	 * increase the auto-ack timout to the maximum value. 2d will hang
97 	 * otherwise on Tegra2.
98 	 */
99 	host1x_sync_writel(host, 0xff, HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
100 
101 	/* update host clocks per usec */
102 	host1x_sync_writel(host, cpm, HOST1X_SYNC_USEC_CLK);
103 #endif
104 #if HOST1X_HW >= 8
105 	u32 id;
106 
107 	/*
108 	 * Program threshold interrupt destination among 8 lines per VM,
109 	 * per syncpoint. For each group of 64 syncpoints (corresponding to two
110 	 * interrupt status registers), direct to one interrupt line, going
111 	 * around in a round robin fashion.
112 	 */
113 	for (id = 0; id < host->info->nb_pts; id++) {
114 		u32 reg_offset = id / 64;
115 		u32 irq_index = reg_offset % host->num_syncpt_irqs;
116 
117 		host1x_sync_writel(host, irq_index, HOST1X_SYNC_SYNCPT_INTR_DEST(id));
118 	}
119 #endif
120 
121 	return 0;
122 }
123 
host1x_intr_set_syncpt_threshold(struct host1x * host,unsigned int id,u32 thresh)124 static void host1x_intr_set_syncpt_threshold(struct host1x *host,
125 					      unsigned int id,
126 					      u32 thresh)
127 {
128 	host1x_sync_writel(host, thresh, HOST1X_SYNC_SYNCPT_INT_THRESH(id));
129 }
130 
host1x_intr_enable_syncpt_intr(struct host1x * host,unsigned int id)131 static void host1x_intr_enable_syncpt_intr(struct host1x *host,
132 					    unsigned int id)
133 {
134 	host1x_sync_writel(host, BIT(id % 32),
135 		HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id / 32));
136 }
137 
host1x_intr_disable_syncpt_intr(struct host1x * host,unsigned int id)138 static void host1x_intr_disable_syncpt_intr(struct host1x *host,
139 					     unsigned int id)
140 {
141 	host1x_sync_writel(host, BIT(id % 32),
142 		HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id / 32));
143 	host1x_sync_writel(host, BIT(id % 32),
144 		HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id / 32));
145 }
146 
147 static const struct host1x_intr_ops host1x_intr_ops = {
148 	.init_host_sync = host1x_intr_init_host_sync,
149 	.set_syncpt_threshold = host1x_intr_set_syncpt_threshold,
150 	.enable_syncpt_intr = host1x_intr_enable_syncpt_intr,
151 	.disable_syncpt_intr = host1x_intr_disable_syncpt_intr,
152 	.disable_all_syncpt_intrs = host1x_intr_disable_all_syncpt_intrs,
153 	.isr = syncpt_thresh_isr,
154 };
155