xref: /linux/drivers/base/regmap/regmap-sdw-mbq.c (revision e814f3fd16acfb7f9966773953de8f740a1e3202)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright(c) 2020 Intel Corporation.
3 
4 #include <linux/bits.h>
5 #include <linux/delay.h>
6 #include <linux/device.h>
7 #include <linux/errno.h>
8 #include <linux/iopoll.h>
9 #include <linux/module.h>
10 #include <linux/regmap.h>
11 #include <linux/soundwire/sdw.h>
12 #include <linux/soundwire/sdw_registers.h>
13 #include <sound/sdca_function.h>
14 #include "internal.h"
15 
16 struct regmap_mbq_context {
17 	struct device *dev;
18 
19 	struct regmap_sdw_mbq_cfg cfg;
20 
21 	int val_size;
22 	bool (*readable_reg)(struct device *dev, unsigned int reg);
23 };
24 
25 static int regmap_sdw_mbq_size(struct regmap_mbq_context *ctx, unsigned int reg)
26 {
27 	int size = ctx->val_size;
28 
29 	if (ctx->cfg.mbq_size) {
30 		size = ctx->cfg.mbq_size(ctx->dev, reg);
31 		if (!size || size > ctx->val_size)
32 			return -EINVAL;
33 	}
34 
35 	return size;
36 }
37 
38 static bool regmap_sdw_mbq_deferrable(struct regmap_mbq_context *ctx, unsigned int reg)
39 {
40 	if (ctx->cfg.deferrable)
41 		return ctx->cfg.deferrable(ctx->dev, reg);
42 
43 	return false;
44 }
45 
46 static int regmap_sdw_mbq_poll_busy(struct sdw_slave *slave, unsigned int reg,
47 				    struct regmap_mbq_context *ctx)
48 {
49 	struct device *dev = &slave->dev;
50 	int val, ret = 0;
51 
52 	dev_dbg(dev, "Deferring transaction for 0x%x\n", reg);
53 
54 	reg = SDW_SDCA_CTL(SDW_SDCA_CTL_FUNC(reg), 0,
55 			   SDCA_CTL_ENTITY_0_FUNCTION_STATUS, 0);
56 
57 	if (ctx->readable_reg(dev, reg)) {
58 		ret = read_poll_timeout(sdw_read_no_pm, val,
59 					val < 0 || !(val & SDCA_CTL_ENTITY_0_FUNCTION_BUSY),
60 					ctx->cfg.timeout_us, ctx->cfg.retry_us,
61 					false, slave, reg);
62 		if (val < 0)
63 			return val;
64 		if (ret)
65 			dev_err(dev, "Function busy timed out 0x%x: %d\n", reg, val);
66 	} else {
67 		fsleep(ctx->cfg.timeout_us);
68 	}
69 
70 	return ret;
71 }
72 
73 static int regmap_sdw_mbq_write_impl(struct sdw_slave *slave,
74 				     unsigned int reg, unsigned int val,
75 				     int mbq_size, bool deferrable)
76 {
77 	int shift = mbq_size * BITS_PER_BYTE;
78 	int ret;
79 
80 	while (--mbq_size > 0) {
81 		shift -= BITS_PER_BYTE;
82 
83 		ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg),
84 				      (val >> shift) & 0xff);
85 		if (ret < 0)
86 			return ret;
87 	}
88 
89 	ret = sdw_write_no_pm(slave, reg, val & 0xff);
90 	if (deferrable && ret == -ENODATA)
91 		return -EAGAIN;
92 
93 	return ret;
94 }
95 
96 static int regmap_sdw_mbq_write(void *context, unsigned int reg, unsigned int val)
97 {
98 	struct regmap_mbq_context *ctx = context;
99 	struct device *dev = ctx->dev;
100 	struct sdw_slave *slave = dev_to_sdw_dev(dev);
101 	bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
102 	int mbq_size = regmap_sdw_mbq_size(ctx, reg);
103 	int ret;
104 
105 	if (mbq_size < 0)
106 		return mbq_size;
107 
108 	/*
109 	 * Technically the spec does allow a device to set itself to busy for
110 	 * internal reasons, but since it doesn't provide any information on
111 	 * how to handle timeouts in that case, for now the code will only
112 	 * process a single wait/timeout on function busy and a single retry
113 	 * of the transaction.
114 	 */
115 	ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size, deferrable);
116 	if (ret == -EAGAIN) {
117 		ret = regmap_sdw_mbq_poll_busy(slave, reg, ctx);
118 		if (ret)
119 			return ret;
120 
121 		ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size, false);
122 	}
123 
124 	return ret;
125 }
126 
127 static int regmap_sdw_mbq_read_impl(struct sdw_slave *slave,
128 				    unsigned int reg, unsigned int *val,
129 				    int mbq_size, bool deferrable)
130 {
131 	int shift = BITS_PER_BYTE;
132 	int read;
133 
134 	read = sdw_read_no_pm(slave, reg);
135 	if (read < 0) {
136 		if (deferrable && read == -ENODATA)
137 			return -EAGAIN;
138 
139 		return read;
140 	}
141 
142 	*val = read;
143 
144 	while (--mbq_size > 0) {
145 		read = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg));
146 		if (read < 0)
147 			return read;
148 
149 		*val |= read << shift;
150 		shift += BITS_PER_BYTE;
151 	}
152 
153 	return 0;
154 }
155 
156 static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *val)
157 {
158 	struct regmap_mbq_context *ctx = context;
159 	struct device *dev = ctx->dev;
160 	struct sdw_slave *slave = dev_to_sdw_dev(dev);
161 	bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
162 	int mbq_size = regmap_sdw_mbq_size(ctx, reg);
163 	int ret;
164 
165 	if (mbq_size < 0)
166 		return mbq_size;
167 
168 	/*
169 	 * Technically the spec does allow a device to set itself to busy for
170 	 * internal reasons, but since it doesn't provide any information on
171 	 * how to handle timeouts in that case, for now the code will only
172 	 * process a single wait/timeout on function busy and a single retry
173 	 * of the transaction.
174 	 */
175 	ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size, deferrable);
176 	if (ret == -EAGAIN) {
177 		ret = regmap_sdw_mbq_poll_busy(slave, reg, ctx);
178 		if (ret)
179 			return ret;
180 
181 		ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size, false);
182 	}
183 
184 	return ret;
185 }
186 
187 static const struct regmap_bus regmap_sdw_mbq = {
188 	.reg_read = regmap_sdw_mbq_read,
189 	.reg_write = regmap_sdw_mbq_write,
190 	.reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
191 	.val_format_endian_default = REGMAP_ENDIAN_LITTLE,
192 };
193 
194 static int regmap_sdw_mbq_config_check(const struct regmap_config *config)
195 {
196 	if (config->val_bits > (sizeof(unsigned int) * BITS_PER_BYTE))
197 		return -ENOTSUPP;
198 
199 	/* Registers are 32 bits wide */
200 	if (config->reg_bits != 32)
201 		return -ENOTSUPP;
202 
203 	if (config->pad_bits != 0)
204 		return -ENOTSUPP;
205 
206 	return 0;
207 }
208 
209 static struct regmap_mbq_context *
210 regmap_sdw_mbq_gen_context(struct device *dev,
211 			   const struct regmap_config *config,
212 			   const struct regmap_sdw_mbq_cfg *mbq_config)
213 {
214 	struct regmap_mbq_context *ctx;
215 
216 	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
217 	if (!ctx)
218 		return ERR_PTR(-ENOMEM);
219 
220 	ctx->dev = dev;
221 
222 	if (mbq_config)
223 		ctx->cfg = *mbq_config;
224 
225 	ctx->val_size = config->val_bits / BITS_PER_BYTE;
226 	ctx->readable_reg = config->readable_reg;
227 
228 	return ctx;
229 }
230 
231 struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw,
232 				     const struct regmap_config *config,
233 				     const struct regmap_sdw_mbq_cfg *mbq_config,
234 				     struct lock_class_key *lock_key,
235 				     const char *lock_name)
236 {
237 	struct regmap_mbq_context *ctx;
238 	int ret;
239 
240 	ret = regmap_sdw_mbq_config_check(config);
241 	if (ret)
242 		return ERR_PTR(ret);
243 
244 	ctx = regmap_sdw_mbq_gen_context(&sdw->dev, config, mbq_config);
245 	if (IS_ERR(ctx))
246 		return ERR_CAST(ctx);
247 
248 	return __regmap_init(&sdw->dev, &regmap_sdw_mbq, ctx,
249 			     config, lock_key, lock_name);
250 }
251 EXPORT_SYMBOL_GPL(__regmap_init_sdw_mbq);
252 
253 struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
254 					  const struct regmap_config *config,
255 					  const struct regmap_sdw_mbq_cfg *mbq_config,
256 					  struct lock_class_key *lock_key,
257 					  const char *lock_name)
258 {
259 	struct regmap_mbq_context *ctx;
260 	int ret;
261 
262 	ret = regmap_sdw_mbq_config_check(config);
263 	if (ret)
264 		return ERR_PTR(ret);
265 
266 	ctx = regmap_sdw_mbq_gen_context(&sdw->dev, config, mbq_config);
267 	if (IS_ERR(ctx))
268 		return ERR_CAST(ctx);
269 
270 	return __devm_regmap_init(&sdw->dev, &regmap_sdw_mbq, ctx,
271 				  config, lock_key, lock_name);
272 }
273 EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq);
274 
275 MODULE_DESCRIPTION("regmap SoundWire MBQ Module");
276 MODULE_LICENSE("GPL");
277