xref: /linux/drivers/gpu/host1x/hw/syncpt_hw.c (revision 6fdcba32711044c35c0e1b094cbd8f3f0b4472c9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Tegra host1x Syncpoints
4  *
5  * Copyright (c) 2010-2013, NVIDIA Corporation.
6  */
7 
8 #include <linux/io.h>
9 
10 #include "../dev.h"
11 #include "../syncpt.h"
12 
13 /*
14  * Write the current syncpoint value back to hw.
15  */
16 static void syncpt_restore(struct host1x_syncpt *sp)
17 {
18 	u32 min = host1x_syncpt_read_min(sp);
19 	struct host1x *host = sp->host;
20 
21 	host1x_sync_writel(host, min, HOST1X_SYNC_SYNCPT(sp->id));
22 }
23 
24 /*
25  * Write the current waitbase value back to hw.
26  */
27 static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
28 {
29 #if HOST1X_HW < 7
30 	struct host1x *host = sp->host;
31 
32 	host1x_sync_writel(host, sp->base_val,
33 			   HOST1X_SYNC_SYNCPT_BASE(sp->id));
34 #endif
35 }
36 
37 /*
38  * Read waitbase value from hw.
39  */
40 static void syncpt_read_wait_base(struct host1x_syncpt *sp)
41 {
42 #if HOST1X_HW < 7
43 	struct host1x *host = sp->host;
44 
45 	sp->base_val =
46 		host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(sp->id));
47 #endif
48 }
49 
50 /*
51  * Updates the last value read from hardware.
52  */
53 static u32 syncpt_load(struct host1x_syncpt *sp)
54 {
55 	struct host1x *host = sp->host;
56 	u32 old, live;
57 
58 	/* Loop in case there's a race writing to min_val */
59 	do {
60 		old = host1x_syncpt_read_min(sp);
61 		live = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT(sp->id));
62 	} while ((u32)atomic_cmpxchg(&sp->min_val, old, live) != old);
63 
64 	if (!host1x_syncpt_check_max(sp, live))
65 		dev_err(host->dev, "%s failed: id=%u, min=%d, max=%d\n",
66 			__func__, sp->id, host1x_syncpt_read_min(sp),
67 			host1x_syncpt_read_max(sp));
68 
69 	return live;
70 }
71 
72 /*
73  * Write a cpu syncpoint increment to the hardware, without touching
74  * the cache.
75  */
76 static int syncpt_cpu_incr(struct host1x_syncpt *sp)
77 {
78 	struct host1x *host = sp->host;
79 	u32 reg_offset = sp->id / 32;
80 
81 	if (!host1x_syncpt_client_managed(sp) &&
82 	    host1x_syncpt_idle(sp))
83 		return -EINVAL;
84 
85 	host1x_sync_writel(host, BIT(sp->id % 32),
86 			   HOST1X_SYNC_SYNCPT_CPU_INCR(reg_offset));
87 	wmb();
88 
89 	return 0;
90 }
91 
92 /**
93  * syncpt_assign_to_channel() - Assign syncpoint to channel
94  * @sp: syncpoint
95  * @ch: channel
96  *
97  * On chips with the syncpoint protection feature (Tegra186+), assign @sp to
98  * @ch, preventing other channels from incrementing the syncpoints. If @ch is
99  * NULL, unassigns the syncpoint.
100  *
101  * On older chips, do nothing.
102  */
103 static void syncpt_assign_to_channel(struct host1x_syncpt *sp,
104 				  struct host1x_channel *ch)
105 {
106 #if HOST1X_HW >= 6
107 	struct host1x *host = sp->host;
108 
109 	if (!host->hv_regs)
110 		return;
111 
112 	host1x_sync_writel(host,
113 			   HOST1X_SYNC_SYNCPT_CH_APP_CH(ch ? ch->id : 0xff),
114 			   HOST1X_SYNC_SYNCPT_CH_APP(sp->id));
115 #endif
116 }
117 
118 /**
119  * syncpt_enable_protection() - Enable syncpoint protection
120  * @host: host1x instance
121  *
122  * On chips with the syncpoint protection feature (Tegra186+), enable this
123  * feature. On older chips, do nothing.
124  */
125 static void syncpt_enable_protection(struct host1x *host)
126 {
127 #if HOST1X_HW >= 6
128 	if (!host->hv_regs)
129 		return;
130 
131 	host1x_hypervisor_writel(host, HOST1X_HV_SYNCPT_PROT_EN_CH_EN,
132 				 HOST1X_HV_SYNCPT_PROT_EN);
133 #endif
134 }
135 
136 static const struct host1x_syncpt_ops host1x_syncpt_ops = {
137 	.restore = syncpt_restore,
138 	.restore_wait_base = syncpt_restore_wait_base,
139 	.load_wait_base = syncpt_read_wait_base,
140 	.load = syncpt_load,
141 	.cpu_incr = syncpt_cpu_incr,
142 	.assign_to_channel = syncpt_assign_to_channel,
143 	.enable_protection = syncpt_enable_protection,
144 };
145