1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright(c) 2020 Intel Corporation.
3
4 #include <linux/bits.h>
5 #include <linux/delay.h>
6 #include <linux/device.h>
7 #include <linux/errno.h>
8 #include <linux/iopoll.h>
9 #include <linux/module.h>
10 #include <linux/regmap.h>
11 #include <linux/soundwire/sdw.h>
12 #include <linux/soundwire/sdw_registers.h>
13 #include <sound/sdca_function.h>
14 #include "internal.h"
15
16 struct regmap_mbq_context {
17 struct device *dev;
18 struct sdw_slave *sdw;
19
20 bool (*readable_reg)(struct device *dev, unsigned int reg);
21
22 struct regmap_sdw_mbq_cfg cfg;
23
24 int val_size;
25 };
26
regmap_sdw_mbq_size(struct regmap_mbq_context * ctx,unsigned int reg)27 static int regmap_sdw_mbq_size(struct regmap_mbq_context *ctx, unsigned int reg)
28 {
29 int size = ctx->val_size;
30
31 if (ctx->cfg.mbq_size) {
32 size = ctx->cfg.mbq_size(ctx->dev, reg);
33 if (!size || size > ctx->val_size)
34 return -EINVAL;
35 }
36
37 return size;
38 }
39
regmap_sdw_mbq_deferrable(struct regmap_mbq_context * ctx,unsigned int reg)40 static bool regmap_sdw_mbq_deferrable(struct regmap_mbq_context *ctx, unsigned int reg)
41 {
42 if (ctx->cfg.deferrable)
43 return ctx->cfg.deferrable(ctx->dev, reg);
44
45 return false;
46 }
47
regmap_sdw_mbq_poll_busy(struct sdw_slave * slave,unsigned int reg,struct regmap_mbq_context * ctx)48 static int regmap_sdw_mbq_poll_busy(struct sdw_slave *slave, unsigned int reg,
49 struct regmap_mbq_context *ctx)
50 {
51 struct device *dev = ctx->dev;
52 int val, ret = 0;
53
54 dev_dbg(dev, "Deferring transaction for 0x%x\n", reg);
55
56 reg = SDW_SDCA_CTL(SDW_SDCA_CTL_FUNC(reg), 0,
57 SDCA_CTL_ENTITY_0_FUNCTION_STATUS, 0);
58
59 if (ctx->readable_reg(dev, reg)) {
60 ret = read_poll_timeout(sdw_read_no_pm, val,
61 val < 0 || !(val & SDCA_CTL_ENTITY_0_FUNCTION_BUSY),
62 ctx->cfg.timeout_us, ctx->cfg.retry_us,
63 false, slave, reg);
64 if (val < 0)
65 return val;
66 if (ret)
67 dev_err(dev, "Function busy timed out 0x%x: %d\n", reg, val);
68 } else {
69 fsleep(ctx->cfg.timeout_us);
70 }
71
72 return ret;
73 }
74
regmap_sdw_mbq_write_impl(struct sdw_slave * slave,unsigned int reg,unsigned int val,int mbq_size)75 static int regmap_sdw_mbq_write_impl(struct sdw_slave *slave,
76 unsigned int reg, unsigned int val,
77 int mbq_size)
78 {
79 int shift = mbq_size * BITS_PER_BYTE;
80 int ret;
81
82 while (--mbq_size > 0) {
83 shift -= BITS_PER_BYTE;
84
85 ret = sdw_write_no_pm(slave, SDW_SDCA_MBQ_CTL(reg),
86 (val >> shift) & 0xff);
87 if (ret < 0)
88 return ret;
89 }
90
91 return sdw_write_no_pm(slave, reg, val & 0xff);
92 }
93
regmap_sdw_mbq_write(void * context,unsigned int reg,unsigned int val)94 static int regmap_sdw_mbq_write(void *context, unsigned int reg, unsigned int val)
95 {
96 struct regmap_mbq_context *ctx = context;
97 struct sdw_slave *slave = ctx->sdw;
98 struct device *dev = ctx->dev;
99 bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
100 int mbq_size = regmap_sdw_mbq_size(ctx, reg);
101 int ret;
102
103 if (mbq_size < 0)
104 return mbq_size;
105
106 /*
107 * Technically the spec does allow a device to set itself to busy for
108 * internal reasons, but since it doesn't provide any information on
109 * how to handle timeouts in that case, for now the code will only
110 * process a single wait/timeout on function busy and a single retry
111 * of the transaction.
112 */
113 ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size);
114 if (ret == -ENODATA) {
115 if (!deferrable)
116 dev_warn(dev, "Defer on undeferrable control: %x\n", reg);
117
118 ret = regmap_sdw_mbq_poll_busy(slave, reg, ctx);
119 if (ret)
120 return ret;
121
122 ret = regmap_sdw_mbq_write_impl(slave, reg, val, mbq_size);
123 }
124
125 return ret;
126 }
127
regmap_sdw_mbq_read_impl(struct sdw_slave * slave,unsigned int reg,unsigned int * val,int mbq_size)128 static int regmap_sdw_mbq_read_impl(struct sdw_slave *slave,
129 unsigned int reg, unsigned int *val,
130 int mbq_size)
131 {
132 int shift = BITS_PER_BYTE;
133 int read;
134
135 read = sdw_read_no_pm(slave, reg);
136 if (read < 0)
137 return read;
138
139 *val = read;
140
141 while (--mbq_size > 0) {
142 read = sdw_read_no_pm(slave, SDW_SDCA_MBQ_CTL(reg));
143 if (read < 0)
144 return read;
145
146 *val |= read << shift;
147 shift += BITS_PER_BYTE;
148 }
149
150 return 0;
151 }
152
regmap_sdw_mbq_read(void * context,unsigned int reg,unsigned int * val)153 static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *val)
154 {
155 struct regmap_mbq_context *ctx = context;
156 struct sdw_slave *slave = ctx->sdw;
157 struct device *dev = ctx->dev;
158 bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
159 int mbq_size = regmap_sdw_mbq_size(ctx, reg);
160 int ret;
161
162 if (mbq_size < 0)
163 return mbq_size;
164
165 /*
166 * Technically the spec does allow a device to set itself to busy for
167 * internal reasons, but since it doesn't provide any information on
168 * how to handle timeouts in that case, for now the code will only
169 * process a single wait/timeout on function busy and a single retry
170 * of the transaction.
171 */
172 ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size);
173 if (ret == -ENODATA) {
174 if (!deferrable)
175 dev_warn(dev, "Defer on undeferrable control: %x\n", reg);
176
177 ret = regmap_sdw_mbq_poll_busy(slave, reg, ctx);
178 if (ret)
179 return ret;
180
181 ret = regmap_sdw_mbq_read_impl(slave, reg, val, mbq_size);
182 }
183
184 return ret;
185 }
186
187 static const struct regmap_bus regmap_sdw_mbq = {
188 .reg_read = regmap_sdw_mbq_read,
189 .reg_write = regmap_sdw_mbq_write,
190 .reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
191 .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
192 };
193
regmap_sdw_mbq_config_check(const struct regmap_config * config)194 static int regmap_sdw_mbq_config_check(const struct regmap_config *config)
195 {
196 if (config->val_bits > (sizeof(unsigned int) * BITS_PER_BYTE))
197 return -ENOTSUPP;
198
199 /* Registers are 32 bits wide */
200 if (config->reg_bits != 32)
201 return -ENOTSUPP;
202
203 if (config->pad_bits != 0)
204 return -ENOTSUPP;
205
206 return 0;
207 }
208
209 static struct regmap_mbq_context *
regmap_sdw_mbq_gen_context(struct device * dev,struct sdw_slave * sdw,const struct regmap_config * config,const struct regmap_sdw_mbq_cfg * mbq_config)210 regmap_sdw_mbq_gen_context(struct device *dev,
211 struct sdw_slave *sdw,
212 const struct regmap_config *config,
213 const struct regmap_sdw_mbq_cfg *mbq_config)
214 {
215 struct regmap_mbq_context *ctx;
216
217 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
218 if (!ctx)
219 return ERR_PTR(-ENOMEM);
220
221 ctx->dev = dev;
222 ctx->sdw = sdw;
223
224 if (mbq_config)
225 ctx->cfg = *mbq_config;
226
227 ctx->val_size = config->val_bits / BITS_PER_BYTE;
228 ctx->readable_reg = config->readable_reg;
229
230 return ctx;
231 }
232
__regmap_init_sdw_mbq(struct device * dev,struct sdw_slave * sdw,const struct regmap_config * config,const struct regmap_sdw_mbq_cfg * mbq_config,struct lock_class_key * lock_key,const char * lock_name)233 struct regmap *__regmap_init_sdw_mbq(struct device *dev, struct sdw_slave *sdw,
234 const struct regmap_config *config,
235 const struct regmap_sdw_mbq_cfg *mbq_config,
236 struct lock_class_key *lock_key,
237 const char *lock_name)
238 {
239 struct regmap_mbq_context *ctx;
240 int ret;
241
242 ret = regmap_sdw_mbq_config_check(config);
243 if (ret)
244 return ERR_PTR(ret);
245
246 ctx = regmap_sdw_mbq_gen_context(dev, sdw, config, mbq_config);
247 if (IS_ERR(ctx))
248 return ERR_CAST(ctx);
249
250 return __regmap_init(dev, ®map_sdw_mbq, ctx,
251 config, lock_key, lock_name);
252 }
253 EXPORT_SYMBOL_GPL(__regmap_init_sdw_mbq);
254
__devm_regmap_init_sdw_mbq(struct device * dev,struct sdw_slave * sdw,const struct regmap_config * config,const struct regmap_sdw_mbq_cfg * mbq_config,struct lock_class_key * lock_key,const char * lock_name)255 struct regmap *__devm_regmap_init_sdw_mbq(struct device *dev, struct sdw_slave *sdw,
256 const struct regmap_config *config,
257 const struct regmap_sdw_mbq_cfg *mbq_config,
258 struct lock_class_key *lock_key,
259 const char *lock_name)
260 {
261 struct regmap_mbq_context *ctx;
262 int ret;
263
264 ret = regmap_sdw_mbq_config_check(config);
265 if (ret)
266 return ERR_PTR(ret);
267
268 ctx = regmap_sdw_mbq_gen_context(dev, sdw, config, mbq_config);
269 if (IS_ERR(ctx))
270 return ERR_CAST(ctx);
271
272 return __devm_regmap_init(dev, ®map_sdw_mbq, ctx,
273 config, lock_key, lock_name);
274 }
275 EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq);
276
277 MODULE_DESCRIPTION("regmap SoundWire MBQ Module");
278 MODULE_LICENSE("GPL");
279