1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2024 Analog Devices Inc.
4 * Copyright (C) 2024 BayLibre, SAS
5 */
6
7 /*
8 * SPI Offloading support.
9 *
10 * Some SPI controllers support offloading of SPI transfers. Essentially, this
11 * is the ability for a SPI controller to perform SPI transfers with minimal
12 * or even no CPU intervention, e.g. via a specialized SPI controller with a
13 * hardware trigger or via a conventional SPI controller using a non-Linux MCU
14 * processor core to offload the work.
15 */
16
17 #define DEFAULT_SYMBOL_NAMESPACE "SPI_OFFLOAD"
18
19 #include <linux/cleanup.h>
20 #include <linux/device.h>
21 #include <linux/dmaengine.h>
22 #include <linux/export.h>
23 #include <linux/kref.h>
24 #include <linux/list.h>
25 #include <linux/mutex.h>
26 #include <linux/of.h>
27 #include <linux/property.h>
28 #include <linux/spi/offload/consumer.h>
29 #include <linux/spi/offload/provider.h>
30 #include <linux/spi/offload/types.h>
31 #include <linux/spi/spi.h>
32 #include <linux/types.h>
33
34 struct spi_controller_and_offload {
35 struct spi_controller *controller;
36 struct spi_offload *offload;
37 };
38
39 struct spi_offload_trigger {
40 struct list_head list;
41 struct kref ref;
42 struct fwnode_handle *fwnode;
43 /* synchronizes calling ops and driver registration */
44 struct mutex lock;
45 /*
46 * If the provider goes away while the consumer still has a reference,
47 * ops and priv will be set to NULL and all calls will fail with -ENODEV.
48 */
49 const struct spi_offload_trigger_ops *ops;
50 void *priv;
51 };
52
53 static LIST_HEAD(spi_offload_triggers);
54 static DEFINE_MUTEX(spi_offload_triggers_lock);
55
56 /**
57 * devm_spi_offload_alloc() - Allocate offload instance
58 * @dev: Device for devm purposes and assigned to &struct spi_offload.provider_dev
59 * @priv_size: Size of private data to allocate
60 *
61 * Offload providers should use this to allocate offload instances.
62 *
63 * Return: Pointer to new offload instance or error on failure.
64 */
devm_spi_offload_alloc(struct device * dev,size_t priv_size)65 struct spi_offload *devm_spi_offload_alloc(struct device *dev,
66 size_t priv_size)
67 {
68 struct spi_offload *offload;
69 void *priv;
70
71 offload = devm_kzalloc(dev, sizeof(*offload), GFP_KERNEL);
72 if (!offload)
73 return ERR_PTR(-ENOMEM);
74
75 priv = devm_kzalloc(dev, priv_size, GFP_KERNEL);
76 if (!priv)
77 return ERR_PTR(-ENOMEM);
78
79 offload->provider_dev = dev;
80 offload->priv = priv;
81
82 return offload;
83 }
84 EXPORT_SYMBOL_GPL(devm_spi_offload_alloc);
85
spi_offload_put(void * data)86 static void spi_offload_put(void *data)
87 {
88 struct spi_controller_and_offload *resource = data;
89
90 resource->controller->put_offload(resource->offload);
91 kfree(resource);
92 }
93
94 /**
95 * devm_spi_offload_get() - Get an offload instance
96 * @dev: Device for devm purposes
97 * @spi: SPI device to use for the transfers
98 * @config: Offload configuration
99 *
100 * Peripheral drivers call this function to get an offload instance that meets
101 * the requirements specified in @config. If no suitable offload instance is
102 * available, -ENODEV is returned.
103 *
104 * Return: Offload instance or error on failure.
105 */
devm_spi_offload_get(struct device * dev,struct spi_device * spi,const struct spi_offload_config * config)106 struct spi_offload *devm_spi_offload_get(struct device *dev,
107 struct spi_device *spi,
108 const struct spi_offload_config *config)
109 {
110 struct spi_controller_and_offload *resource;
111 struct spi_offload *offload;
112 int ret;
113
114 if (!spi || !config)
115 return ERR_PTR(-EINVAL);
116
117 if (!spi->controller->get_offload)
118 return ERR_PTR(-ENODEV);
119
120 resource = kzalloc(sizeof(*resource), GFP_KERNEL);
121 if (!resource)
122 return ERR_PTR(-ENOMEM);
123
124 offload = spi->controller->get_offload(spi, config);
125 if (IS_ERR(offload)) {
126 kfree(resource);
127 return offload;
128 }
129
130 resource->controller = spi->controller;
131 resource->offload = offload;
132
133 ret = devm_add_action_or_reset(dev, spi_offload_put, resource);
134 if (ret)
135 return ERR_PTR(ret);
136
137 return offload;
138 }
139 EXPORT_SYMBOL_GPL(devm_spi_offload_get);
140
spi_offload_trigger_free(struct kref * ref)141 static void spi_offload_trigger_free(struct kref *ref)
142 {
143 struct spi_offload_trigger *trigger =
144 container_of(ref, struct spi_offload_trigger, ref);
145
146 mutex_destroy(&trigger->lock);
147 fwnode_handle_put(trigger->fwnode);
148 kfree(trigger);
149 }
150
spi_offload_trigger_put(void * data)151 static void spi_offload_trigger_put(void *data)
152 {
153 struct spi_offload_trigger *trigger = data;
154
155 scoped_guard(mutex, &trigger->lock)
156 if (trigger->ops && trigger->ops->release)
157 trigger->ops->release(trigger);
158
159 kref_put(&trigger->ref, spi_offload_trigger_free);
160 }
161
162 static struct spi_offload_trigger
spi_offload_trigger_get(enum spi_offload_trigger_type type,struct fwnode_reference_args * args)163 *spi_offload_trigger_get(enum spi_offload_trigger_type type,
164 struct fwnode_reference_args *args)
165 {
166 struct spi_offload_trigger *trigger;
167 bool match = false;
168 int ret;
169
170 guard(mutex)(&spi_offload_triggers_lock);
171
172 list_for_each_entry(trigger, &spi_offload_triggers, list) {
173 if (trigger->fwnode != args->fwnode)
174 continue;
175
176 match = trigger->ops->match(trigger, type, args->args, args->nargs);
177 if (match)
178 break;
179 }
180
181 if (!match)
182 return ERR_PTR(-EPROBE_DEFER);
183
184 guard(mutex)(&trigger->lock);
185
186 if (trigger->ops->request) {
187 ret = trigger->ops->request(trigger, type, args->args, args->nargs);
188 if (ret)
189 return ERR_PTR(ret);
190 }
191
192 kref_get(&trigger->ref);
193
194 return trigger;
195 }
196
197 /**
198 * devm_spi_offload_trigger_get() - Get an offload trigger instance
199 * @dev: Device for devm purposes.
200 * @offload: Offload instance connected to a trigger.
201 * @type: Trigger type to get.
202 *
203 * Return: Offload trigger instance or error on failure.
204 */
205 struct spi_offload_trigger
devm_spi_offload_trigger_get(struct device * dev,struct spi_offload * offload,enum spi_offload_trigger_type type)206 *devm_spi_offload_trigger_get(struct device *dev,
207 struct spi_offload *offload,
208 enum spi_offload_trigger_type type)
209 {
210 struct spi_offload_trigger *trigger;
211 struct fwnode_reference_args args;
212 int ret;
213
214 ret = fwnode_property_get_reference_args(dev_fwnode(offload->provider_dev),
215 "trigger-sources",
216 "#trigger-source-cells", 0, 0,
217 &args);
218 if (ret)
219 return ERR_PTR(ret);
220
221 trigger = spi_offload_trigger_get(type, &args);
222 fwnode_handle_put(args.fwnode);
223 if (IS_ERR(trigger))
224 return trigger;
225
226 ret = devm_add_action_or_reset(dev, spi_offload_trigger_put, trigger);
227 if (ret)
228 return ERR_PTR(ret);
229
230 return trigger;
231 }
232 EXPORT_SYMBOL_GPL(devm_spi_offload_trigger_get);
233
234 /**
235 * spi_offload_trigger_validate - Validate the requested trigger
236 * @trigger: Offload trigger instance
237 * @config: Trigger config to validate
238 *
239 * On success, @config may be modifed to reflect what the hardware can do.
240 * For example, the frequency of a periodic trigger may be adjusted to the
241 * nearest supported value.
242 *
243 * Callers will likely need to do additional validation of the modified trigger
244 * parameters.
245 *
246 * Return: 0 on success, negative error code on failure.
247 */
spi_offload_trigger_validate(struct spi_offload_trigger * trigger,struct spi_offload_trigger_config * config)248 int spi_offload_trigger_validate(struct spi_offload_trigger *trigger,
249 struct spi_offload_trigger_config *config)
250 {
251 guard(mutex)(&trigger->lock);
252
253 if (!trigger->ops)
254 return -ENODEV;
255
256 if (!trigger->ops->validate)
257 return -EOPNOTSUPP;
258
259 return trigger->ops->validate(trigger, config);
260 }
261 EXPORT_SYMBOL_GPL(spi_offload_trigger_validate);
262
263 /**
264 * spi_offload_trigger_enable - enables trigger for offload
265 * @offload: Offload instance
266 * @trigger: Offload trigger instance
267 * @config: Trigger config to validate
268 *
269 * There must be a prepared offload instance with the specified ID (i.e.
270 * spi_optimize_message() was called with the same offload assigned to the
271 * message). This will also reserve the bus for exclusive use by the offload
272 * instance until the trigger is disabled. Any other attempts to send a
273 * transfer or lock the bus will fail with -EBUSY during this time.
274 *
275 * Calls must be balanced with spi_offload_trigger_disable().
276 *
277 * Context: can sleep
278 * Return: 0 on success, else a negative error code.
279 */
spi_offload_trigger_enable(struct spi_offload * offload,struct spi_offload_trigger * trigger,struct spi_offload_trigger_config * config)280 int spi_offload_trigger_enable(struct spi_offload *offload,
281 struct spi_offload_trigger *trigger,
282 struct spi_offload_trigger_config *config)
283 {
284 int ret;
285
286 guard(mutex)(&trigger->lock);
287
288 if (!trigger->ops)
289 return -ENODEV;
290
291 if (offload->ops && offload->ops->trigger_enable) {
292 ret = offload->ops->trigger_enable(offload);
293 if (ret)
294 return ret;
295 }
296
297 if (trigger->ops->enable) {
298 ret = trigger->ops->enable(trigger, config);
299 if (ret) {
300 if (offload->ops && offload->ops->trigger_disable)
301 offload->ops->trigger_disable(offload);
302 return ret;
303 }
304 }
305
306 return 0;
307 }
308 EXPORT_SYMBOL_GPL(spi_offload_trigger_enable);
309
310 /**
311 * spi_offload_trigger_disable - disables hardware trigger for offload
312 * @offload: Offload instance
313 * @trigger: Offload trigger instance
314 *
315 * Disables the hardware trigger for the offload instance with the specified ID
316 * and releases the bus for use by other clients.
317 *
318 * Context: can sleep
319 */
spi_offload_trigger_disable(struct spi_offload * offload,struct spi_offload_trigger * trigger)320 void spi_offload_trigger_disable(struct spi_offload *offload,
321 struct spi_offload_trigger *trigger)
322 {
323 if (offload->ops && offload->ops->trigger_disable)
324 offload->ops->trigger_disable(offload);
325
326 guard(mutex)(&trigger->lock);
327
328 if (!trigger->ops)
329 return;
330
331 if (trigger->ops->disable)
332 trigger->ops->disable(trigger);
333 }
334 EXPORT_SYMBOL_GPL(spi_offload_trigger_disable);
335
spi_offload_release_dma_chan(void * chan)336 static void spi_offload_release_dma_chan(void *chan)
337 {
338 dma_release_channel(chan);
339 }
340
341 /**
342 * devm_spi_offload_tx_stream_request_dma_chan - Get the DMA channel info for the TX stream
343 * @dev: Device for devm purposes.
344 * @offload: Offload instance
345 *
346 * This is the DMA channel that will provide data to transfers that use the
347 * %SPI_OFFLOAD_XFER_TX_STREAM offload flag.
348 *
349 * Return: Pointer to DMA channel info, or negative error code
350 */
351 struct dma_chan
devm_spi_offload_tx_stream_request_dma_chan(struct device * dev,struct spi_offload * offload)352 *devm_spi_offload_tx_stream_request_dma_chan(struct device *dev,
353 struct spi_offload *offload)
354 {
355 struct dma_chan *chan;
356 int ret;
357
358 if (!offload->ops || !offload->ops->tx_stream_request_dma_chan)
359 return ERR_PTR(-EOPNOTSUPP);
360
361 chan = offload->ops->tx_stream_request_dma_chan(offload);
362 if (IS_ERR(chan))
363 return chan;
364
365 ret = devm_add_action_or_reset(dev, spi_offload_release_dma_chan, chan);
366 if (ret)
367 return ERR_PTR(ret);
368
369 return chan;
370 }
371 EXPORT_SYMBOL_GPL(devm_spi_offload_tx_stream_request_dma_chan);
372
373 /**
374 * devm_spi_offload_rx_stream_request_dma_chan - Get the DMA channel info for the RX stream
375 * @dev: Device for devm purposes.
376 * @offload: Offload instance
377 *
378 * This is the DMA channel that will receive data from transfers that use the
379 * %SPI_OFFLOAD_XFER_RX_STREAM offload flag.
380 *
381 * Return: Pointer to DMA channel info, or negative error code
382 */
383 struct dma_chan
devm_spi_offload_rx_stream_request_dma_chan(struct device * dev,struct spi_offload * offload)384 *devm_spi_offload_rx_stream_request_dma_chan(struct device *dev,
385 struct spi_offload *offload)
386 {
387 struct dma_chan *chan;
388 int ret;
389
390 if (!offload->ops || !offload->ops->rx_stream_request_dma_chan)
391 return ERR_PTR(-EOPNOTSUPP);
392
393 chan = offload->ops->rx_stream_request_dma_chan(offload);
394 if (IS_ERR(chan))
395 return chan;
396
397 ret = devm_add_action_or_reset(dev, spi_offload_release_dma_chan, chan);
398 if (ret)
399 return ERR_PTR(ret);
400
401 return chan;
402 }
403 EXPORT_SYMBOL_GPL(devm_spi_offload_rx_stream_request_dma_chan);
404
405 /* Triggers providers */
406
spi_offload_trigger_unregister(void * data)407 static void spi_offload_trigger_unregister(void *data)
408 {
409 struct spi_offload_trigger *trigger = data;
410
411 scoped_guard(mutex, &spi_offload_triggers_lock)
412 list_del(&trigger->list);
413
414 scoped_guard(mutex, &trigger->lock) {
415 trigger->priv = NULL;
416 trigger->ops = NULL;
417 }
418
419 kref_put(&trigger->ref, spi_offload_trigger_free);
420 }
421
422 /**
423 * devm_spi_offload_trigger_register() - Allocate and register an offload trigger
424 * @dev: Device for devm purposes.
425 * @info: Provider-specific trigger info.
426 *
427 * Return: 0 on success, else a negative error code.
428 */
devm_spi_offload_trigger_register(struct device * dev,struct spi_offload_trigger_info * info)429 int devm_spi_offload_trigger_register(struct device *dev,
430 struct spi_offload_trigger_info *info)
431 {
432 struct spi_offload_trigger *trigger;
433
434 if (!info->fwnode || !info->ops || !info->ops->match)
435 return -EINVAL;
436
437 trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
438 if (!trigger)
439 return -ENOMEM;
440
441 kref_init(&trigger->ref);
442 mutex_init(&trigger->lock);
443 trigger->fwnode = fwnode_handle_get(info->fwnode);
444 trigger->ops = info->ops;
445 trigger->priv = info->priv;
446
447 scoped_guard(mutex, &spi_offload_triggers_lock)
448 list_add_tail(&trigger->list, &spi_offload_triggers);
449
450 return devm_add_action_or_reset(dev, spi_offload_trigger_unregister, trigger);
451 }
452 EXPORT_SYMBOL_GPL(devm_spi_offload_trigger_register);
453
454 /**
455 * spi_offload_trigger_get_priv() - Get the private data for the trigger
456 *
457 * @trigger: Offload trigger instance.
458 *
459 * Return: Private data for the trigger.
460 */
spi_offload_trigger_get_priv(struct spi_offload_trigger * trigger)461 void *spi_offload_trigger_get_priv(struct spi_offload_trigger *trigger)
462 {
463 return trigger->priv;
464 }
465 EXPORT_SYMBOL_GPL(spi_offload_trigger_get_priv);
466