xref: /linux/drivers/base/regmap/regmap-sdw-mbq.c (revision 2aa680df68062e4e0c356ec2aa7100c13654907b)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright(c) 2020 Intel Corporation.
3 
4 #include <linux/bits.h>
5 #include <linux/delay.h>
6 #include <linux/device.h>
7 #include <linux/errno.h>
8 #include <linux/iopoll.h>
9 #include <linux/module.h>
10 #include <linux/regmap.h>
11 #include <linux/soundwire/sdw.h>
12 #include <linux/soundwire/sdw_registers.h>
13 #include <sound/sdca_function.h>
14 #include "internal.h"
15 
16 struct regmap_mbq_context {
17 	struct device *dev;
18 	struct sdw_slave *sdw;
19 
20 	struct regmap_sdw_mbq_cfg cfg;
21 
22 	int val_size;
23 	bool (*readable_reg)(struct device *dev, unsigned int reg);
24 };
25 
26 static int regmap_sdw_mbq_size(struct regmap_mbq_context *ctx, unsigned int reg)
27 {
28 	int size = ctx->val_size;
29 
30 	if (ctx->cfg.mbq_size) {
31 		size = ctx->cfg.mbq_size(ctx->dev, reg);
32 		if (!size || size > ctx->val_size)
33 			return -EINVAL;
34 	}
35 
36 	return size;
37 }
38 
39 static bool regmap_sdw_mbq_deferrable(struct regmap_mbq_context *ctx, unsigned int reg)
40 {
41 	if (ctx->cfg.deferrable)
42 		return ctx->cfg.deferrable(ctx->dev, reg);
43 
44 	return false;
45 }
46 
47 static int regmap_sdw_mbq_poll_busy(struct sdw_slave *slave, unsigned int reg,
48 				    struct regmap_mbq_context *ctx)
49 {
50 	struct device *dev = ctx->dev;
51 	int val, ret = 0;
52 
53 	dev_dbg(dev, "Deferring transaction for 0x%x\n", reg);
54 
55 	reg = SDW_SDCA_CTL(SDW_SDCA_CTL_FUNC(reg), 0,
56 			   SDCA_CTL_ENTITY_0_FUNCTION_STATUS, 0);
57 
58 	if (ctx->readable_reg(dev, reg)) {
59 		ret = read_poll_timeout(sdw_read_no_pm, val,
60 					val < 0 || !(val & SDCA_CTL_ENTITY_0_FUNCTION_BUSY),
61 					ctx->cfg.timeout_us, ctx->cfg.retry_us,
62 					false, slave, reg);
63 		if (val < 0)
64 			return val;
65 		if (ret)
66 			dev_err(dev, "Function busy timed out 0x%x: %d\n", reg, val);
67 	} else {
68 		fsleep(ctx->cfg.timeout_us);
69 	}
70 
71 	return ret;
72 }
73 
74 static int regmap_sdw_mbq_write_impl(struct sdw_slave *slave,
75 				     unsigned int reg, unsigned int val,
76 				     int mbq_size, bool deferrable)
77 {
78 	int shift = mbq_size * BITS_PER_BYTE;
79 	int ret;
80 
81 	while (--mbq_size > 0) {
82 		shift -= BITS_PER_BYTE;
83 
84 		ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg),
85 				      (val >> shift) & 0xff);
86 		if (ret < 0)
87 			return ret;
88 	}
89 
90 	ret = sdw_write_no_pm(slave, reg, val & 0xff);
91 	if (deferrable && ret == -ENODATA)
92 		return -EAGAIN;
93 
94 	return ret;
95 }
96 
97 static int regmap_sdw_mbq_write(void *context, unsigned int reg, unsigned int val)
98 {
99 	struct regmap_mbq_context *ctx = context;
100 	struct sdw_slave *slave = ctx->sdw;
101 	bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
102 	int mbq_size = regmap_sdw_mbq_size(ctx, reg);
103 	int ret;
104 
105 	if (mbq_size < 0)
106 		return mbq_size;
107 
108 	/*
109 	 * Technically the spec does allow a device to set itself to busy for
110 	 * internal reasons, but since it doesn't provide any information on
111 	 * how to handle timeouts in that case, for now the code will only
112 	 * process a single wait/timeout on function busy and a single retry
113 	 * of the transaction.
114 	 */
115 	ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size, deferrable);
116 	if (ret == -EAGAIN) {
117 		ret = regmap_sdw_mbq_poll_busy(slave, reg, ctx);
118 		if (ret)
119 			return ret;
120 
121 		ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size, false);
122 	}
123 
124 	return ret;
125 }
126 
127 static int regmap_sdw_mbq_read_impl(struct sdw_slave *slave,
128 				    unsigned int reg, unsigned int *val,
129 				    int mbq_size, bool deferrable)
130 {
131 	int shift = BITS_PER_BYTE;
132 	int read;
133 
134 	read = sdw_read_no_pm(slave, reg);
135 	if (read < 0) {
136 		if (deferrable && read == -ENODATA)
137 			return -EAGAIN;
138 
139 		return read;
140 	}
141 
142 	*val = read;
143 
144 	while (--mbq_size > 0) {
145 		read = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg));
146 		if (read < 0)
147 			return read;
148 
149 		*val |= read << shift;
150 		shift += BITS_PER_BYTE;
151 	}
152 
153 	return 0;
154 }
155 
156 static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *val)
157 {
158 	struct regmap_mbq_context *ctx = context;
159 	struct sdw_slave *slave = ctx->sdw;
160 	bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
161 	int mbq_size = regmap_sdw_mbq_size(ctx, reg);
162 	int ret;
163 
164 	if (mbq_size < 0)
165 		return mbq_size;
166 
167 	/*
168 	 * Technically the spec does allow a device to set itself to busy for
169 	 * internal reasons, but since it doesn't provide any information on
170 	 * how to handle timeouts in that case, for now the code will only
171 	 * process a single wait/timeout on function busy and a single retry
172 	 * of the transaction.
173 	 */
174 	ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size, deferrable);
175 	if (ret == -EAGAIN) {
176 		ret = regmap_sdw_mbq_poll_busy(slave, reg, ctx);
177 		if (ret)
178 			return ret;
179 
180 		ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size, false);
181 	}
182 
183 	return ret;
184 }
185 
186 static const struct regmap_bus regmap_sdw_mbq = {
187 	.reg_read = regmap_sdw_mbq_read,
188 	.reg_write = regmap_sdw_mbq_write,
189 	.reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
190 	.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
191 };
192 
193 static int regmap_sdw_mbq_config_check(const struct regmap_config *config)
194 {
195 	if (config->val_bits > (sizeof(unsigned int) * BITS_PER_BYTE))
196 		return -ENOTSUPP;
197 
198 	/* Registers are 32 bits wide */
199 	if (config->reg_bits != 32)
200 		return -ENOTSUPP;
201 
202 	if (config->pad_bits != 0)
203 		return -ENOTSUPP;
204 
205 	return 0;
206 }
207 
208 static struct regmap_mbq_context *
209 regmap_sdw_mbq_gen_context(struct device *dev,
210 			   struct sdw_slave *sdw,
211 			   const struct regmap_config *config,
212 			   const struct regmap_sdw_mbq_cfg *mbq_config)
213 {
214 	struct regmap_mbq_context *ctx;
215 
216 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
217 	if (!ctx)
218 		return ERR_PTR(-ENOMEM);
219 
220 	ctx->dev = dev;
221 	ctx->sdw = sdw;
222 
223 	if (mbq_config)
224 		ctx->cfg = *mbq_config;
225 
226 	ctx->val_size = config->val_bits / BITS_PER_BYTE;
227 	ctx->readable_reg = config->readable_reg;
228 
229 	return ctx;
230 }
231 
232 struct regmap *__regmap_init_sdw_mbq(struct device *dev, struct sdw_slave *sdw,
233 				     const struct regmap_config *config,
234 				     const struct regmap_sdw_mbq_cfg *mbq_config,
235 				     struct lock_class_key *lock_key,
236 				     const char *lock_name)
237 {
238 	struct regmap_mbq_context *ctx;
239 	int ret;
240 
241 	ret = regmap_sdw_mbq_config_check(config);
242 	if (ret)
243 		return ERR_PTR(ret);
244 
245 	ctx = regmap_sdw_mbq_gen_context(dev, sdw, config, mbq_config);
246 	if (IS_ERR(ctx))
247 		return ERR_CAST(ctx);
248 
249 	return __regmap_init(dev, &regmap_sdw_mbq, ctx,
250 			     config, lock_key, lock_name);
251 }
252 EXPORT_SYMBOL_GPL(__regmap_init_sdw_mbq);
253 
254 struct regmap *__devm_regmap_init_sdw_mbq(struct device *dev, struct sdw_slave *sdw,
255 					  const struct regmap_config *config,
256 					  const struct regmap_sdw_mbq_cfg *mbq_config,
257 					  struct lock_class_key *lock_key,
258 					  const char *lock_name)
259 {
260 	struct regmap_mbq_context *ctx;
261 	int ret;
262 
263 	ret = regmap_sdw_mbq_config_check(config);
264 	if (ret)
265 		return ERR_PTR(ret);
266 
267 	ctx = regmap_sdw_mbq_gen_context(dev, sdw, config, mbq_config);
268 	if (IS_ERR(ctx))
269 		return ERR_CAST(ctx);
270 
271 	return __devm_regmap_init(dev, &regmap_sdw_mbq, ctx,
272 				  config, lock_key, lock_name);
273 }
274 EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq);
275 
276 MODULE_DESCRIPTION("regmap SoundWire MBQ Module");
277 MODULE_LICENSE("GPL");
278