1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Tegra host1x Interrupt Management
4 *
5 * Copyright (C) 2010 Google, Inc.
6 * Copyright (c) 2010-2013, NVIDIA Corporation.
7 */
8
9 #include <linux/io.h>
10
11 #include "../intr.h"
12 #include "../dev.h"
13
syncpt_thresh_isr(int irq,void * dev_id)14 static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
15 {
16 struct host1x_intr_irq_data *irq_data = dev_id;
17 struct host1x *host = irq_data->host;
18 unsigned long reg;
19 unsigned int i, id;
20
21 for (i = irq_data->offset; i < DIV_ROUND_UP(host->info->nb_pts, 32);
22 i += host->num_syncpt_irqs) {
23 reg = host1x_sync_readl(host,
24 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
25
26 host1x_sync_writel(host, reg,
27 HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i));
28 host1x_sync_writel(host, reg,
29 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
30
31 for_each_set_bit(id, ®, 32)
32 host1x_intr_handle_interrupt(host, i * 32 + id);
33 }
34
35 return IRQ_HANDLED;
36 }
37
host1x_intr_disable_all_syncpt_intrs(struct host1x * host)38 static void host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
39 {
40 unsigned int i;
41
42 for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); ++i) {
43 host1x_sync_writel(host, 0xffffffffu,
44 HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i));
45 host1x_sync_writel(host, 0xffffffffu,
46 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
47 }
48 }
49
50 static int
host1x_intr_init_host_sync(struct host1x * host,u32 cpm)51 host1x_intr_init_host_sync(struct host1x *host, u32 cpm)
52 {
53 #if HOST1X_HW < 6
54 /* disable the ip_busy_timeout. this prevents write drops */
55 host1x_sync_writel(host, 0, HOST1X_SYNC_IP_BUSY_TIMEOUT);
56
57 /*
58 * increase the auto-ack timout to the maximum value. 2d will hang
59 * otherwise on Tegra2.
60 */
61 host1x_sync_writel(host, 0xff, HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
62
63 /* update host clocks per usec */
64 host1x_sync_writel(host, cpm, HOST1X_SYNC_USEC_CLK);
65 #endif
66 #if HOST1X_HW >= 8
67 u32 id;
68
69 /*
70 * Program threshold interrupt destination among 8 lines per VM,
71 * per syncpoint. For each group of 32 syncpoints (corresponding to one
72 * interrupt status register), direct to one interrupt line, going
73 * around in a round robin fashion.
74 */
75 for (id = 0; id < host->info->nb_pts; id++) {
76 u32 reg_offset = id / 32;
77 u32 irq_index = reg_offset % host->num_syncpt_irqs;
78
79 host1x_sync_writel(host, irq_index, HOST1X_SYNC_SYNCPT_INTR_DEST(id));
80 }
81 #endif
82
83 return 0;
84 }
85
host1x_intr_set_syncpt_threshold(struct host1x * host,unsigned int id,u32 thresh)86 static void host1x_intr_set_syncpt_threshold(struct host1x *host,
87 unsigned int id,
88 u32 thresh)
89 {
90 host1x_sync_writel(host, thresh, HOST1X_SYNC_SYNCPT_INT_THRESH(id));
91 }
92
host1x_intr_enable_syncpt_intr(struct host1x * host,unsigned int id)93 static void host1x_intr_enable_syncpt_intr(struct host1x *host,
94 unsigned int id)
95 {
96 host1x_sync_writel(host, BIT(id % 32),
97 HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id / 32));
98 }
99
host1x_intr_disable_syncpt_intr(struct host1x * host,unsigned int id)100 static void host1x_intr_disable_syncpt_intr(struct host1x *host,
101 unsigned int id)
102 {
103 host1x_sync_writel(host, BIT(id % 32),
104 HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id / 32));
105 host1x_sync_writel(host, BIT(id % 32),
106 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id / 32));
107 }
108
109 static const struct host1x_intr_ops host1x_intr_ops = {
110 .init_host_sync = host1x_intr_init_host_sync,
111 .set_syncpt_threshold = host1x_intr_set_syncpt_threshold,
112 .enable_syncpt_intr = host1x_intr_enable_syncpt_intr,
113 .disable_syncpt_intr = host1x_intr_disable_syncpt_intr,
114 .disable_all_syncpt_intrs = host1x_intr_disable_all_syncpt_intrs,
115 .isr = syncpt_thresh_isr,
116 };
117