xref: /linux/drivers/spi/spi.c (revision b2680ba4a2ad259c7bbd856ed830b459e11d88ba)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
3 //
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
6 
7 #include <linux/acpi.h>
8 #include <linux/cache.h>
9 #include <linux/clk/clk-conf.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/dmaengine.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/export.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/highmem.h>
17 #include <linux/idr.h>
18 #include <linux/init.h>
19 #include <linux/ioport.h>
20 #include <linux/kernel.h>
21 #include <linux/kthread.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/mutex.h>
24 #include <linux/of_device.h>
25 #include <linux/of_irq.h>
26 #include <linux/percpu.h>
27 #include <linux/platform_data/x86/apple.h>
28 #include <linux/pm_domain.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/property.h>
31 #include <linux/ptp_clock_kernel.h>
32 #include <linux/sched/rt.h>
33 #include <linux/slab.h>
34 #include <linux/spi/offload/types.h>
35 #include <linux/spi/spi.h>
36 #include <linux/spi/spi-mem.h>
37 #include <uapi/linux/sched/types.h>
38 
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/spi.h>
41 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
42 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
43 
44 #include "internals.h"
45 
46 static int __spi_setup(struct spi_device *spi, bool initial_setup);
47 
48 static DEFINE_IDR(spi_controller_idr);
49 
spidev_release(struct device * dev)50 static void spidev_release(struct device *dev)
51 {
52 	struct spi_device	*spi = to_spi_device(dev);
53 
54 	spi_controller_put(spi->controller);
55 	free_percpu(spi->pcpu_statistics);
56 	kfree(spi);
57 }
58 
59 static ssize_t
modalias_show(struct device * dev,struct device_attribute * a,char * buf)60 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
61 {
62 	const struct spi_device	*spi = to_spi_device(dev);
63 	int len;
64 
65 	len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
66 	if (len != -ENODEV)
67 		return len;
68 
69 	return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
70 }
71 static DEVICE_ATTR_RO(modalias);
72 
driver_override_store(struct device * dev,struct device_attribute * a,const char * buf,size_t count)73 static ssize_t driver_override_store(struct device *dev,
74 				     struct device_attribute *a,
75 				     const char *buf, size_t count)
76 {
77 	int ret;
78 
79 	ret = __device_set_driver_override(dev, buf, count);
80 	if (ret)
81 		return ret;
82 
83 	return count;
84 }
85 
driver_override_show(struct device * dev,struct device_attribute * a,char * buf)86 static ssize_t driver_override_show(struct device *dev,
87 				    struct device_attribute *a, char *buf)
88 {
89 	guard(spinlock)(&dev->driver_override.lock);
90 	return sysfs_emit(buf, "%s\n", dev->driver_override.name ?: "");
91 }
92 static DEVICE_ATTR_RW(driver_override);
93 
spi_alloc_pcpu_stats(void)94 static struct spi_statistics __percpu *spi_alloc_pcpu_stats(void)
95 {
96 	struct spi_statistics __percpu *pcpu_stats;
97 	int cpu;
98 
99 	pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
100 	if (!pcpu_stats)
101 		return NULL;
102 
103 	for_each_possible_cpu(cpu) {
104 		struct spi_statistics *stat;
105 
106 		stat = per_cpu_ptr(pcpu_stats, cpu);
107 		u64_stats_init(&stat->syncp);
108 	}
109 
110 	return pcpu_stats;
111 }
112 
spi_emit_pcpu_stats(struct spi_statistics __percpu * stat,char * buf,size_t offset)113 static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat,
114 				   char *buf, size_t offset)
115 {
116 	u64 val = 0;
117 	int i;
118 
119 	for_each_possible_cpu(i) {
120 		const struct spi_statistics *pcpu_stats;
121 		u64_stats_t *field;
122 		unsigned int start;
123 		u64 inc;
124 
125 		pcpu_stats = per_cpu_ptr(stat, i);
126 		field = (void *)pcpu_stats + offset;
127 		do {
128 			start = u64_stats_fetch_begin(&pcpu_stats->syncp);
129 			inc = u64_stats_read(field);
130 		} while (u64_stats_fetch_retry(&pcpu_stats->syncp, start));
131 		val += inc;
132 	}
133 	return sysfs_emit(buf, "%llu\n", val);
134 }
135 
136 #define SPI_STATISTICS_ATTRS(field, file)				\
137 static ssize_t spi_controller_##field##_show(struct device *dev,	\
138 					     struct device_attribute *attr, \
139 					     char *buf)			\
140 {									\
141 	struct spi_controller *ctlr = container_of(dev,			\
142 					 struct spi_controller, dev);	\
143 	return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
144 }									\
145 static struct device_attribute dev_attr_spi_controller_##field = {	\
146 	.attr = { .name = file, .mode = 0444 },				\
147 	.show = spi_controller_##field##_show,				\
148 };									\
149 static ssize_t spi_device_##field##_show(struct device *dev,		\
150 					 struct device_attribute *attr,	\
151 					char *buf)			\
152 {									\
153 	struct spi_device *spi = to_spi_device(dev);			\
154 	return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
155 }									\
156 static struct device_attribute dev_attr_spi_device_##field = {		\
157 	.attr = { .name = file, .mode = 0444 },				\
158 	.show = spi_device_##field##_show,				\
159 }
160 
161 #define SPI_STATISTICS_SHOW_NAME(name, file, field)			\
162 static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
163 					    char *buf)			\
164 {									\
165 	return spi_emit_pcpu_stats(stat, buf,				\
166 			offsetof(struct spi_statistics, field));	\
167 }									\
168 SPI_STATISTICS_ATTRS(name, file)
169 
170 #define SPI_STATISTICS_SHOW(field)					\
171 	SPI_STATISTICS_SHOW_NAME(field, __stringify(field),		\
172 				 field)
173 
174 SPI_STATISTICS_SHOW(messages);
175 SPI_STATISTICS_SHOW(transfers);
176 SPI_STATISTICS_SHOW(errors);
177 SPI_STATISTICS_SHOW(timedout);
178 
179 SPI_STATISTICS_SHOW(spi_sync);
180 SPI_STATISTICS_SHOW(spi_sync_immediate);
181 SPI_STATISTICS_SHOW(spi_async);
182 
183 SPI_STATISTICS_SHOW(bytes);
184 SPI_STATISTICS_SHOW(bytes_rx);
185 SPI_STATISTICS_SHOW(bytes_tx);
186 
187 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)		\
188 	SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,		\
189 				 "transfer_bytes_histo_" number,	\
190 				 transfer_bytes_histo[index])
191 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
192 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
193 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
194 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
195 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
196 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
197 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
198 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
199 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
200 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
201 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
202 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
203 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
204 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
205 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
206 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
207 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
208 
209 SPI_STATISTICS_SHOW(transfers_split_maxsize);
210 
211 static struct attribute *spi_dev_attrs[] = {
212 	&dev_attr_modalias.attr,
213 	&dev_attr_driver_override.attr,
214 	NULL,
215 };
216 
217 static const struct attribute_group spi_dev_group = {
218 	.attrs  = spi_dev_attrs,
219 };
220 
221 static struct attribute *spi_device_statistics_attrs[] = {
222 	&dev_attr_spi_device_messages.attr,
223 	&dev_attr_spi_device_transfers.attr,
224 	&dev_attr_spi_device_errors.attr,
225 	&dev_attr_spi_device_timedout.attr,
226 	&dev_attr_spi_device_spi_sync.attr,
227 	&dev_attr_spi_device_spi_sync_immediate.attr,
228 	&dev_attr_spi_device_spi_async.attr,
229 	&dev_attr_spi_device_bytes.attr,
230 	&dev_attr_spi_device_bytes_rx.attr,
231 	&dev_attr_spi_device_bytes_tx.attr,
232 	&dev_attr_spi_device_transfer_bytes_histo0.attr,
233 	&dev_attr_spi_device_transfer_bytes_histo1.attr,
234 	&dev_attr_spi_device_transfer_bytes_histo2.attr,
235 	&dev_attr_spi_device_transfer_bytes_histo3.attr,
236 	&dev_attr_spi_device_transfer_bytes_histo4.attr,
237 	&dev_attr_spi_device_transfer_bytes_histo5.attr,
238 	&dev_attr_spi_device_transfer_bytes_histo6.attr,
239 	&dev_attr_spi_device_transfer_bytes_histo7.attr,
240 	&dev_attr_spi_device_transfer_bytes_histo8.attr,
241 	&dev_attr_spi_device_transfer_bytes_histo9.attr,
242 	&dev_attr_spi_device_transfer_bytes_histo10.attr,
243 	&dev_attr_spi_device_transfer_bytes_histo11.attr,
244 	&dev_attr_spi_device_transfer_bytes_histo12.attr,
245 	&dev_attr_spi_device_transfer_bytes_histo13.attr,
246 	&dev_attr_spi_device_transfer_bytes_histo14.attr,
247 	&dev_attr_spi_device_transfer_bytes_histo15.attr,
248 	&dev_attr_spi_device_transfer_bytes_histo16.attr,
249 	&dev_attr_spi_device_transfers_split_maxsize.attr,
250 	NULL,
251 };
252 
253 static const struct attribute_group spi_device_statistics_group = {
254 	.name  = "statistics",
255 	.attrs  = spi_device_statistics_attrs,
256 };
257 
258 static const struct attribute_group *spi_dev_groups[] = {
259 	&spi_dev_group,
260 	&spi_device_statistics_group,
261 	NULL,
262 };
263 
264 static struct attribute *spi_controller_statistics_attrs[] = {
265 	&dev_attr_spi_controller_messages.attr,
266 	&dev_attr_spi_controller_transfers.attr,
267 	&dev_attr_spi_controller_errors.attr,
268 	&dev_attr_spi_controller_timedout.attr,
269 	&dev_attr_spi_controller_spi_sync.attr,
270 	&dev_attr_spi_controller_spi_sync_immediate.attr,
271 	&dev_attr_spi_controller_spi_async.attr,
272 	&dev_attr_spi_controller_bytes.attr,
273 	&dev_attr_spi_controller_bytes_rx.attr,
274 	&dev_attr_spi_controller_bytes_tx.attr,
275 	&dev_attr_spi_controller_transfer_bytes_histo0.attr,
276 	&dev_attr_spi_controller_transfer_bytes_histo1.attr,
277 	&dev_attr_spi_controller_transfer_bytes_histo2.attr,
278 	&dev_attr_spi_controller_transfer_bytes_histo3.attr,
279 	&dev_attr_spi_controller_transfer_bytes_histo4.attr,
280 	&dev_attr_spi_controller_transfer_bytes_histo5.attr,
281 	&dev_attr_spi_controller_transfer_bytes_histo6.attr,
282 	&dev_attr_spi_controller_transfer_bytes_histo7.attr,
283 	&dev_attr_spi_controller_transfer_bytes_histo8.attr,
284 	&dev_attr_spi_controller_transfer_bytes_histo9.attr,
285 	&dev_attr_spi_controller_transfer_bytes_histo10.attr,
286 	&dev_attr_spi_controller_transfer_bytes_histo11.attr,
287 	&dev_attr_spi_controller_transfer_bytes_histo12.attr,
288 	&dev_attr_spi_controller_transfer_bytes_histo13.attr,
289 	&dev_attr_spi_controller_transfer_bytes_histo14.attr,
290 	&dev_attr_spi_controller_transfer_bytes_histo15.attr,
291 	&dev_attr_spi_controller_transfer_bytes_histo16.attr,
292 	&dev_attr_spi_controller_transfers_split_maxsize.attr,
293 	NULL,
294 };
295 
296 static const struct attribute_group spi_controller_statistics_group = {
297 	.name  = "statistics",
298 	.attrs  = spi_controller_statistics_attrs,
299 };
300 
301 static const struct attribute_group *spi_controller_groups[] = {
302 	&spi_controller_statistics_group,
303 	NULL,
304 };
305 
spi_statistics_add_transfer_stats(struct spi_statistics __percpu * pcpu_stats,struct spi_transfer * xfer,struct spi_message * msg)306 static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
307 					      struct spi_transfer *xfer,
308 					      struct spi_message *msg)
309 {
310 	int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
311 	struct spi_statistics *stats;
312 
313 	if (l2len < 0)
314 		l2len = 0;
315 
316 	get_cpu();
317 	stats = this_cpu_ptr(pcpu_stats);
318 	u64_stats_update_begin(&stats->syncp);
319 
320 	u64_stats_inc(&stats->transfers);
321 	u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
322 
323 	u64_stats_add(&stats->bytes, xfer->len);
324 	if (spi_valid_txbuf(msg, xfer))
325 		u64_stats_add(&stats->bytes_tx, xfer->len);
326 	if (spi_valid_rxbuf(msg, xfer))
327 		u64_stats_add(&stats->bytes_rx, xfer->len);
328 
329 	u64_stats_update_end(&stats->syncp);
330 	put_cpu();
331 }
332 
333 /*
334  * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
335  * and the sysfs version makes coldplug work too.
336  */
spi_match_id(const struct spi_device_id * id,const char * name)337 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
338 {
339 	while (id->name[0]) {
340 		if (!strcmp(name, id->name))
341 			return id;
342 		id++;
343 	}
344 	return NULL;
345 }
346 
spi_get_device_id(const struct spi_device * sdev)347 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
348 {
349 	const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
350 
351 	return spi_match_id(sdrv->id_table, sdev->modalias);
352 }
353 EXPORT_SYMBOL_GPL(spi_get_device_id);
354 
spi_get_device_match_data(const struct spi_device * sdev)355 const void *spi_get_device_match_data(const struct spi_device *sdev)
356 {
357 	const void *match;
358 
359 	match = device_get_match_data(&sdev->dev);
360 	if (match)
361 		return match;
362 
363 	return (const void *)spi_get_device_id(sdev)->driver_data;
364 }
365 EXPORT_SYMBOL_GPL(spi_get_device_match_data);
366 
spi_match_device(struct device * dev,const struct device_driver * drv)367 static int spi_match_device(struct device *dev, const struct device_driver *drv)
368 {
369 	const struct spi_device	*spi = to_spi_device(dev);
370 	const struct spi_driver	*sdrv = to_spi_driver(drv);
371 	int ret;
372 
373 	/* Check override first, and if set, only use the named driver */
374 	ret = device_match_driver_override(dev, drv);
375 	if (ret >= 0)
376 		return ret;
377 
378 	/* Attempt an OF style match */
379 	if (of_driver_match_device(dev, drv))
380 		return 1;
381 
382 	/* Then try ACPI */
383 	if (acpi_driver_match_device(dev, drv))
384 		return 1;
385 
386 	if (sdrv->id_table)
387 		return !!spi_match_id(sdrv->id_table, spi->modalias);
388 
389 	return strcmp(spi->modalias, drv->name) == 0;
390 }
391 
spi_uevent(const struct device * dev,struct kobj_uevent_env * env)392 static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env)
393 {
394 	const struct spi_device		*spi = to_spi_device(dev);
395 	int rc;
396 
397 	rc = acpi_device_uevent_modalias(dev, env);
398 	if (rc != -ENODEV)
399 		return rc;
400 
401 	return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
402 }
403 
spi_probe(struct device * dev)404 static int spi_probe(struct device *dev)
405 {
406 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
407 	struct spi_device		*spi = to_spi_device(dev);
408 	struct fwnode_handle		*fwnode = dev_fwnode(dev);
409 	int ret;
410 
411 	ret = of_clk_set_defaults(dev->of_node, false);
412 	if (ret)
413 		return ret;
414 
415 	if (is_of_node(fwnode))
416 		spi->irq = of_irq_get(dev->of_node, 0);
417 	else if (is_acpi_device_node(fwnode) && spi->irq < 0)
418 		spi->irq = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), 0);
419 	if (spi->irq == -EPROBE_DEFER)
420 		return dev_err_probe(dev, spi->irq, "Failed to get irq\n");
421 	if (spi->irq < 0)
422 		spi->irq = 0;
423 
424 	ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON |
425 					PD_FLAG_DETACH_POWER_OFF);
426 	if (ret)
427 		return ret;
428 
429 	if (sdrv->probe)
430 		ret = sdrv->probe(spi);
431 
432 	return ret;
433 }
434 
spi_remove(struct device * dev)435 static void spi_remove(struct device *dev)
436 {
437 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
438 
439 	if (sdrv->remove)
440 		sdrv->remove(to_spi_device(dev));
441 }
442 
spi_shutdown(struct device * dev)443 static void spi_shutdown(struct device *dev)
444 {
445 	if (dev->driver) {
446 		const struct spi_driver	*sdrv = to_spi_driver(dev->driver);
447 
448 		if (sdrv->shutdown)
449 			sdrv->shutdown(to_spi_device(dev));
450 	}
451 }
452 
453 const struct bus_type spi_bus_type = {
454 	.name		= "spi",
455 	.dev_groups	= spi_dev_groups,
456 	.match		= spi_match_device,
457 	.uevent		= spi_uevent,
458 	.probe		= spi_probe,
459 	.remove		= spi_remove,
460 	.shutdown	= spi_shutdown,
461 };
462 EXPORT_SYMBOL_GPL(spi_bus_type);
463 
464 /**
465  * __spi_register_driver - register a SPI driver
466  * @owner: owner module of the driver to register
467  * @sdrv: the driver to register
468  * Context: can sleep
469  *
470  * Return: zero on success, else a negative error code.
471  */
__spi_register_driver(struct module * owner,struct spi_driver * sdrv)472 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
473 {
474 	sdrv->driver.owner = owner;
475 	sdrv->driver.bus = &spi_bus_type;
476 
477 	/*
478 	 * For Really Good Reasons we use spi: modaliases not of:
479 	 * modaliases for DT so module autoloading won't work if we
480 	 * don't have a spi_device_id as well as a compatible string.
481 	 */
482 	if (sdrv->driver.of_match_table) {
483 		const struct of_device_id *of_id;
484 
485 		for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
486 		     of_id++) {
487 			const char *of_name;
488 
489 			/* Strip off any vendor prefix */
490 			of_name = strnchr(of_id->compatible,
491 					  sizeof(of_id->compatible), ',');
492 			if (of_name)
493 				of_name++;
494 			else
495 				of_name = of_id->compatible;
496 
497 			if (sdrv->id_table) {
498 				const struct spi_device_id *spi_id;
499 
500 				spi_id = spi_match_id(sdrv->id_table, of_name);
501 				if (spi_id)
502 					continue;
503 			} else {
504 				if (strcmp(sdrv->driver.name, of_name) == 0)
505 					continue;
506 			}
507 
508 			pr_warn("SPI driver %s has no spi_device_id for %s\n",
509 				sdrv->driver.name, of_id->compatible);
510 		}
511 	}
512 
513 	return driver_register(&sdrv->driver);
514 }
515 EXPORT_SYMBOL_GPL(__spi_register_driver);
516 
517 /*-------------------------------------------------------------------------*/
518 
519 /*
520  * SPI devices should normally not be created by SPI device drivers; that
521  * would make them board-specific.  Similarly with SPI controller drivers.
522  * Device registration normally goes into like arch/.../mach.../board-YYY.c
523  * with other readonly (flashable) information about mainboard devices.
524  */
525 
526 struct boardinfo {
527 	struct list_head	list;
528 	struct spi_board_info	board_info;
529 };
530 
531 static LIST_HEAD(board_list);
532 static LIST_HEAD(spi_controller_list);
533 
534 /*
535  * Used to protect add/del operation for board_info list and
536  * spi_controller list, and their matching process also used
537  * to protect object of type struct idr.
538  */
539 static DEFINE_MUTEX(board_lock);
540 
541 /**
542  * spi_alloc_device - Allocate a new SPI device
543  * @ctlr: Controller to which device is connected
544  * Context: can sleep
545  *
546  * Allows a driver to allocate and initialize a spi_device without
547  * registering it immediately.  This allows a driver to directly
548  * fill the spi_device with device parameters before calling
549  * spi_add_device() on it.
550  *
551  * Caller is responsible to call spi_add_device() on the returned
552  * spi_device structure to add it to the SPI controller.  If the caller
553  * needs to discard the spi_device without adding it, then it should
554  * call spi_dev_put() on it.
555  *
556  * Return: a pointer to the new device, or NULL.
557  */
spi_alloc_device(struct spi_controller * ctlr)558 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
559 {
560 	struct spi_device	*spi;
561 
562 	if (!spi_controller_get(ctlr))
563 		return NULL;
564 
565 	spi = kzalloc_obj(*spi);
566 	if (!spi) {
567 		spi_controller_put(ctlr);
568 		return NULL;
569 	}
570 
571 	spi->pcpu_statistics = spi_alloc_pcpu_stats();
572 	if (!spi->pcpu_statistics) {
573 		kfree(spi);
574 		spi_controller_put(ctlr);
575 		return NULL;
576 	}
577 
578 	spi->controller = ctlr;
579 	spi->dev.parent = &ctlr->dev;
580 	spi->dev.bus = &spi_bus_type;
581 	spi->dev.release = spidev_release;
582 	spi->mode = ctlr->buswidth_override_bits;
583 	spi->num_chipselect = 1;
584 
585 	device_initialize(&spi->dev);
586 	return spi;
587 }
588 EXPORT_SYMBOL_GPL(spi_alloc_device);
589 
spi_dev_set_name(struct spi_device * spi)590 static void spi_dev_set_name(struct spi_device *spi)
591 {
592 	struct device *dev = &spi->dev;
593 	struct fwnode_handle *fwnode = dev_fwnode(dev);
594 
595 	if (is_acpi_device_node(fwnode)) {
596 		dev_set_name(dev, "spi-%s", acpi_dev_name(to_acpi_device_node(fwnode)));
597 		return;
598 	}
599 
600 	if (is_software_node(fwnode)) {
601 		dev_set_name(dev, "spi-%pfwP", fwnode);
602 		return;
603 	}
604 
605 	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
606 		     spi_get_chipselect(spi, 0));
607 }
608 
609 /*
610  * Zero(0) is a valid physical CS value and can be located at any
611  * logical CS in the spi->chip_select[]. If all the physical CS
612  * are initialized to 0 then It would be difficult to differentiate
613  * between a valid physical CS 0 & an unused logical CS whose physical
614  * CS can be 0. As a solution to this issue initialize all the CS to -1.
615  * Now all the unused logical CS will have -1 physical CS value & can be
616  * ignored while performing physical CS validity checks.
617  */
618 #define SPI_INVALID_CS		((s8)-1)
619 
spi_dev_check_cs(struct device * dev,struct spi_device * spi,u8 idx,struct spi_device * new_spi,u8 new_idx)620 static inline int spi_dev_check_cs(struct device *dev,
621 				   struct spi_device *spi, u8 idx,
622 				   struct spi_device *new_spi, u8 new_idx)
623 {
624 	u8 cs, cs_new;
625 	u8 idx_new;
626 
627 	cs = spi_get_chipselect(spi, idx);
628 	for (idx_new = new_idx; idx_new < new_spi->num_chipselect; idx_new++) {
629 		cs_new = spi_get_chipselect(new_spi, idx_new);
630 		if (cs == cs_new) {
631 			dev_err(dev, "chipselect %u already in use\n", cs_new);
632 			return -EBUSY;
633 		}
634 	}
635 	return 0;
636 }
637 
638 struct spi_dev_check_info {
639 	struct spi_device *new_spi;
640 	struct spi_device *parent;	/* set for ancillary devices */
641 };
642 
spi_dev_check(struct device * dev,void * data)643 static int spi_dev_check(struct device *dev, void *data)
644 {
645 	struct spi_device *spi = to_spi_device(dev);
646 	struct spi_dev_check_info *info = data;
647 	struct spi_device *new_spi = info->new_spi;
648 	int status, idx;
649 
650 	/*
651 	 * When registering an ancillary device, skip checking against the
652 	 * parent device since the ancillary is intentionally using one of
653 	 * the parent's chip selects.
654 	 */
655 	if (info->parent && spi == info->parent)
656 		return 0;
657 
658 	if (spi->controller == new_spi->controller) {
659 		for (idx = 0; idx < spi->num_chipselect; idx++) {
660 			status = spi_dev_check_cs(dev, spi, idx, new_spi, 0);
661 			if (status)
662 				return status;
663 		}
664 	}
665 	return 0;
666 }
667 
spi_cleanup(struct spi_device * spi)668 static void spi_cleanup(struct spi_device *spi)
669 {
670 	if (spi->controller->cleanup)
671 		spi->controller->cleanup(spi);
672 }
673 
__spi_add_device(struct spi_device * spi,struct spi_device * parent)674 static int __spi_add_device(struct spi_device *spi, struct spi_device *parent)
675 {
676 	struct spi_controller *ctlr = spi->controller;
677 	struct device *dev = ctlr->dev.parent;
678 	struct spi_dev_check_info check_info;
679 	int status, idx;
680 	u8 cs;
681 
682 	if (spi->num_chipselect > SPI_DEVICE_CS_CNT_MAX) {
683 		dev_err(dev, "num_cs %d > max %d\n", spi->num_chipselect,
684 			SPI_DEVICE_CS_CNT_MAX);
685 		return -EOVERFLOW;
686 	}
687 
688 	for (idx = 0; idx < spi->num_chipselect; idx++) {
689 		/* Chipselects are numbered 0..max; validate. */
690 		cs = spi_get_chipselect(spi, idx);
691 		if (cs >= ctlr->num_chipselect) {
692 			dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx),
693 				ctlr->num_chipselect);
694 			return -EINVAL;
695 		}
696 	}
697 
698 	/*
699 	 * Make sure that multiple logical CS doesn't map to the same physical CS.
700 	 * For example, spi->chip_select[0] != spi->chip_select[1] and so on.
701 	 */
702 	if (!spi_controller_is_target(ctlr)) {
703 		for (idx = 0; idx < spi->num_chipselect; idx++) {
704 			status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
705 			if (status)
706 				return status;
707 		}
708 	}
709 
710 	/* Initialize unused logical CS as invalid */
711 	for (idx = spi->num_chipselect; idx < SPI_DEVICE_CS_CNT_MAX; idx++)
712 		spi_set_chipselect(spi, idx, SPI_INVALID_CS);
713 
714 	/* Set the bus ID string */
715 	spi_dev_set_name(spi);
716 
717 	/*
718 	 * We need to make sure there's no other device with this
719 	 * chipselect **BEFORE** we call setup(), else we'll trash
720 	 * its configuration.
721 	 */
722 	check_info.new_spi = spi;
723 	check_info.parent = parent;
724 	status = bus_for_each_dev(&spi_bus_type, NULL, &check_info, spi_dev_check);
725 	if (status)
726 		return status;
727 
728 	/* Controller may unregister concurrently */
729 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
730 	    !device_is_registered(&ctlr->dev)) {
731 		return -ENODEV;
732 	}
733 
734 	if (ctlr->cs_gpiods) {
735 		u8 cs;
736 
737 		for (idx = 0; idx < spi->num_chipselect; idx++) {
738 			cs = spi_get_chipselect(spi, idx);
739 			spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
740 		}
741 	}
742 
743 	/*
744 	 * Drivers may modify this initial i/o setup, but will
745 	 * normally rely on the device being setup.  Devices
746 	 * using SPI_CS_HIGH can't coexist well otherwise...
747 	 */
748 	status = __spi_setup(spi, true);
749 	if (status < 0) {
750 		dev_err(dev, "can't setup %s, status %d\n",
751 				dev_name(&spi->dev), status);
752 		return status;
753 	}
754 
755 	/* Device may be bound to an active driver when this returns */
756 	status = device_add(&spi->dev);
757 	if (status < 0) {
758 		dev_err(dev, "can't add %s, status %d\n",
759 				dev_name(&spi->dev), status);
760 		spi_cleanup(spi);
761 	} else {
762 		dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
763 	}
764 
765 	return status;
766 }
767 
768 /**
769  * spi_add_device - Add spi_device allocated with spi_alloc_device
770  * @spi: spi_device to register
771  *
772  * Companion function to spi_alloc_device.  Devices allocated with
773  * spi_alloc_device can be added onto the SPI bus with this function.
774  *
775  * Return: 0 on success; negative errno on failure
776  */
spi_add_device(struct spi_device * spi)777 int spi_add_device(struct spi_device *spi)
778 {
779 	struct spi_controller *ctlr = spi->controller;
780 	int status;
781 
782 	/* Set the bus ID string */
783 	spi_dev_set_name(spi);
784 
785 	mutex_lock(&ctlr->add_lock);
786 	status = __spi_add_device(spi, NULL);
787 	mutex_unlock(&ctlr->add_lock);
788 	return status;
789 }
790 EXPORT_SYMBOL_GPL(spi_add_device);
791 
792 /**
793  * spi_new_device - instantiate one new SPI device
794  * @ctlr: Controller to which device is connected
795  * @chip: Describes the SPI device
796  * Context: can sleep
797  *
798  * On typical mainboards, this is purely internal; and it's not needed
799  * after board init creates the hard-wired devices.  Some development
800  * platforms may not be able to use spi_register_board_info though, and
801  * this is exported so that for example a USB or parport based adapter
802  * driver could add devices (which it would learn about out-of-band).
803  *
804  * Return: the new device, or NULL.
805  */
spi_new_device(struct spi_controller * ctlr,struct spi_board_info * chip)806 struct spi_device *spi_new_device(struct spi_controller *ctlr,
807 				  struct spi_board_info *chip)
808 {
809 	struct spi_device	*proxy;
810 	int			status;
811 
812 	/*
813 	 * NOTE:  caller did any chip->bus_num checks necessary.
814 	 *
815 	 * Also, unless we change the return value convention to use
816 	 * error-or-pointer (not NULL-or-pointer), troubleshootability
817 	 * suggests syslogged diagnostics are best here (ugh).
818 	 */
819 
820 	proxy = spi_alloc_device(ctlr);
821 	if (!proxy)
822 		return NULL;
823 
824 	WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
825 
826 	/* Use provided chip-select for proxy device */
827 	spi_set_chipselect(proxy, 0, chip->chip_select);
828 
829 	proxy->max_speed_hz = chip->max_speed_hz;
830 	proxy->mode = chip->mode;
831 	proxy->irq = chip->irq;
832 	strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
833 	proxy->dev.platform_data = (void *) chip->platform_data;
834 	proxy->controller_data = chip->controller_data;
835 	proxy->controller_state = NULL;
836 	/*
837 	 * By default spi->chip_select[0] will hold the physical CS number,
838 	 * so set bit 0 in spi->cs_index_mask.
839 	 */
840 	proxy->cs_index_mask = BIT(0);
841 
842 	if (chip->swnode) {
843 		status = device_add_software_node(&proxy->dev, chip->swnode);
844 		if (status) {
845 			dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
846 				chip->modalias, status);
847 			goto err_dev_put;
848 		}
849 	}
850 
851 	status = spi_add_device(proxy);
852 	if (status < 0)
853 		goto err_dev_put;
854 
855 	return proxy;
856 
857 err_dev_put:
858 	device_remove_software_node(&proxy->dev);
859 	spi_dev_put(proxy);
860 	return NULL;
861 }
862 EXPORT_SYMBOL_GPL(spi_new_device);
863 
864 /**
865  * spi_unregister_device - unregister a single SPI device
866  * @spi: spi_device to unregister
867  *
868  * Start making the passed SPI device vanish. Normally this would be handled
869  * by spi_unregister_controller().
870  */
spi_unregister_device(struct spi_device * spi)871 void spi_unregister_device(struct spi_device *spi)
872 {
873 	struct fwnode_handle *fwnode;
874 
875 	if (!spi)
876 		return;
877 
878 	fwnode = dev_fwnode(&spi->dev);
879 	if (is_of_node(fwnode)) {
880 		of_node_clear_flag(to_of_node(fwnode), OF_POPULATED);
881 		of_node_put(to_of_node(fwnode));
882 	} else if (is_acpi_device_node(fwnode)) {
883 		acpi_device_clear_enumerated(to_acpi_device_node(fwnode));
884 	}
885 	device_remove_software_node(&spi->dev);
886 	device_del(&spi->dev);
887 	spi_cleanup(spi);
888 	put_device(&spi->dev);
889 }
890 EXPORT_SYMBOL_GPL(spi_unregister_device);
891 
spi_match_controller_to_boardinfo(struct spi_controller * ctlr,struct spi_board_info * bi)892 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
893 					      struct spi_board_info *bi)
894 {
895 	struct spi_device *dev;
896 
897 	if (ctlr->bus_num != bi->bus_num)
898 		return;
899 
900 	dev = spi_new_device(ctlr, bi);
901 	if (!dev)
902 		dev_err(ctlr->dev.parent, "can't create new device for %s\n",
903 			bi->modalias);
904 }
905 
906 /**
907  * spi_register_board_info - register SPI devices for a given board
908  * @info: array of chip descriptors
909  * @n: how many descriptors are provided
910  * Context: can sleep
911  *
912  * Board-specific early init code calls this (probably during arch_initcall)
913  * with segments of the SPI device table.  Any device nodes are created later,
914  * after the relevant parent SPI controller (bus_num) is defined.  We keep
915  * this table of devices forever, so that reloading a controller driver will
916  * not make Linux forget about these hard-wired devices.
917  *
918  * Other code can also call this, e.g. a particular add-on board might provide
919  * SPI devices through its expansion connector, so code initializing that board
920  * would naturally declare its SPI devices.
921  *
922  * The board info passed can safely be __initdata ... but be careful of
923  * any embedded pointers (platform_data, etc), they're copied as-is.
924  *
925  * Return: zero on success, else a negative error code.
926  */
spi_register_board_info(struct spi_board_info const * info,unsigned n)927 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
928 {
929 	struct boardinfo *bi;
930 	int i;
931 
932 	if (!n)
933 		return 0;
934 
935 	bi = kzalloc_objs(*bi, n);
936 	if (!bi)
937 		return -ENOMEM;
938 
939 	for (i = 0; i < n; i++, bi++, info++) {
940 		struct spi_controller *ctlr;
941 
942 		memcpy(&bi->board_info, info, sizeof(*info));
943 
944 		mutex_lock(&board_lock);
945 		list_add_tail(&bi->list, &board_list);
946 		list_for_each_entry(ctlr, &spi_controller_list, list)
947 			spi_match_controller_to_boardinfo(ctlr,
948 							  &bi->board_info);
949 		mutex_unlock(&board_lock);
950 	}
951 
952 	return 0;
953 }
954 
955 /*-------------------------------------------------------------------------*/
956 
957 /* Core methods for SPI resource management */
958 
959 /**
960  * spi_res_alloc - allocate a spi resource that is life-cycle managed
961  *                 during the processing of a spi_message while using
962  *                 spi_transfer_one
963  * @spi:     the SPI device for which we allocate memory
964  * @release: the release code to execute for this resource
965  * @size:    size to alloc and return
966  * @gfp:     GFP allocation flags
967  *
968  * Return: the pointer to the allocated data
969  *
970  * This may get enhanced in the future to allocate from a memory pool
971  * of the @spi_device or @spi_controller to avoid repeated allocations.
972  */
spi_res_alloc(struct spi_device * spi,spi_res_release_t release,size_t size,gfp_t gfp)973 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
974 			   size_t size, gfp_t gfp)
975 {
976 	struct spi_res *sres;
977 
978 	sres = kzalloc(sizeof(*sres) + size, gfp);
979 	if (!sres)
980 		return NULL;
981 
982 	INIT_LIST_HEAD(&sres->entry);
983 	sres->release = release;
984 
985 	return sres->data;
986 }
987 
988 /**
989  * spi_res_free - free an SPI resource
990  * @res: pointer to the custom data of a resource
991  */
spi_res_free(void * res)992 static void spi_res_free(void *res)
993 {
994 	struct spi_res *sres = container_of(res, struct spi_res, data);
995 
996 	WARN_ON(!list_empty(&sres->entry));
997 	kfree(sres);
998 }
999 
1000 /**
1001  * spi_res_add - add a spi_res to the spi_message
1002  * @message: the SPI message
1003  * @res:     the spi_resource
1004  */
spi_res_add(struct spi_message * message,void * res)1005 static void spi_res_add(struct spi_message *message, void *res)
1006 {
1007 	struct spi_res *sres = container_of(res, struct spi_res, data);
1008 
1009 	WARN_ON(!list_empty(&sres->entry));
1010 	list_add_tail(&sres->entry, &message->resources);
1011 }
1012 
1013 /**
1014  * spi_res_release - release all SPI resources for this message
1015  * @ctlr:  the @spi_controller
1016  * @message: the @spi_message
1017  */
spi_res_release(struct spi_controller * ctlr,struct spi_message * message)1018 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
1019 {
1020 	struct spi_res *res, *tmp;
1021 
1022 	list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
1023 		if (res->release)
1024 			res->release(ctlr, message, res->data);
1025 
1026 		list_del(&res->entry);
1027 
1028 		kfree(res);
1029 	}
1030 }
1031 
1032 /*-------------------------------------------------------------------------*/
1033 #define spi_for_each_valid_cs(spi, idx)				\
1034 	for (idx = 0; idx < spi->num_chipselect; idx++)		\
1035 		if (!(spi->cs_index_mask & BIT(idx))) {} else
1036 
spi_is_last_cs(struct spi_device * spi)1037 static inline bool spi_is_last_cs(struct spi_device *spi)
1038 {
1039 	u8 idx;
1040 	bool last = false;
1041 
1042 	spi_for_each_valid_cs(spi, idx) {
1043 		if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx))
1044 			last = true;
1045 	}
1046 	return last;
1047 }
1048 
spi_toggle_csgpiod(struct spi_device * spi,u8 idx,bool enable,bool activate)1049 static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool activate)
1050 {
1051 	/*
1052 	 * Historically ACPI has no means of the GPIO polarity and
1053 	 * thus the SPISerialBus() resource defines it on the per-chip
1054 	 * basis. In order to avoid a chain of negations, the GPIO
1055 	 * polarity is considered being Active High. Even for the cases
1056 	 * when _DSD() is involved (in the updated versions of ACPI)
1057 	 * the GPIO CS polarity must be defined Active High to avoid
1058 	 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
1059 	 * into account.
1060 	 */
1061 	if (is_acpi_device_node(dev_fwnode(&spi->dev)))
1062 		gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable);
1063 	else
1064 		/* Polarity handled by GPIO library */
1065 		gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), activate);
1066 
1067 	if (activate)
1068 		spi_delay_exec(&spi->cs_setup, NULL);
1069 	else
1070 		spi_delay_exec(&spi->cs_inactive, NULL);
1071 }
1072 
spi_set_cs(struct spi_device * spi,bool enable,bool force)1073 static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
1074 {
1075 	bool activate = enable;
1076 	u8 idx;
1077 
1078 	/*
1079 	 * Avoid calling into the driver (or doing delays) if the chip select
1080 	 * isn't actually changing from the last time this was called.
1081 	 */
1082 	if (!force && (enable == spi_is_last_cs(spi)) &&
1083 	    (spi->controller->last_cs_index_mask == spi->cs_index_mask) &&
1084 	    (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
1085 		return;
1086 
1087 	trace_spi_set_cs(spi, activate);
1088 
1089 	spi->controller->last_cs_index_mask = spi->cs_index_mask;
1090 	for (idx = 0; idx < SPI_DEVICE_CS_CNT_MAX; idx++) {
1091 		if (enable && idx < spi->num_chipselect)
1092 			spi->controller->last_cs[idx] = spi_get_chipselect(spi, 0);
1093 		else
1094 			spi->controller->last_cs[idx] = SPI_INVALID_CS;
1095 	}
1096 
1097 	spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
1098 	if (spi->controller->last_cs_mode_high)
1099 		enable = !enable;
1100 
1101 	/*
1102 	 * Handle chip select delays for GPIO based CS or controllers without
1103 	 * programmable chip select timing.
1104 	 */
1105 	if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate)
1106 		spi_delay_exec(&spi->cs_hold, NULL);
1107 
1108 	if (spi_is_csgpiod(spi)) {
1109 		if (!(spi->mode & SPI_NO_CS)) {
1110 			spi_for_each_valid_cs(spi, idx) {
1111 				if (spi_get_csgpiod(spi, idx))
1112 					spi_toggle_csgpiod(spi, idx, enable, activate);
1113 			}
1114 		}
1115 		/* Some SPI controllers need both GPIO CS & ->set_cs() */
1116 		if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
1117 		    spi->controller->set_cs)
1118 			spi->controller->set_cs(spi, !enable);
1119 	} else if (spi->controller->set_cs) {
1120 		spi->controller->set_cs(spi, !enable);
1121 	}
1122 
1123 	if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) {
1124 		if (activate)
1125 			spi_delay_exec(&spi->cs_setup, NULL);
1126 		else
1127 			spi_delay_exec(&spi->cs_inactive, NULL);
1128 	}
1129 }
1130 
1131 #ifdef CONFIG_HAS_DMA
spi_map_buf_attrs(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir,unsigned long attrs)1132 static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
1133 			     struct sg_table *sgt, void *buf, size_t len,
1134 			     enum dma_data_direction dir, unsigned long attrs)
1135 {
1136 	const bool vmalloced_buf = is_vmalloc_addr(buf);
1137 	unsigned int max_seg_size = dma_get_max_seg_size(dev);
1138 #ifdef CONFIG_HIGHMEM
1139 	const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
1140 				(unsigned long)buf < (PKMAP_BASE +
1141 					(LAST_PKMAP * PAGE_SIZE)));
1142 #else
1143 	const bool kmap_buf = false;
1144 #endif
1145 	int desc_len;
1146 	int sgs;
1147 	struct page *vm_page;
1148 	struct scatterlist *sg;
1149 	void *sg_buf;
1150 	size_t min;
1151 	int i, ret;
1152 
1153 	if (vmalloced_buf || kmap_buf) {
1154 		desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
1155 		sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1156 	} else if (virt_addr_valid(buf)) {
1157 		desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1158 		sgs = DIV_ROUND_UP(len, desc_len);
1159 	} else {
1160 		return -EINVAL;
1161 	}
1162 
1163 	ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1164 	if (ret != 0)
1165 		return ret;
1166 
1167 	sg = &sgt->sgl[0];
1168 	for (i = 0; i < sgs; i++) {
1169 
1170 		if (vmalloced_buf || kmap_buf) {
1171 			/*
1172 			 * Next scatterlist entry size is the minimum between
1173 			 * the desc_len and the remaining buffer length that
1174 			 * fits in a page.
1175 			 */
1176 			min = min_t(size_t, desc_len,
1177 				    min_t(size_t, len,
1178 					  PAGE_SIZE - offset_in_page(buf)));
1179 			if (vmalloced_buf)
1180 				vm_page = vmalloc_to_page(buf);
1181 			else
1182 				vm_page = kmap_to_page(buf);
1183 			if (!vm_page) {
1184 				sg_free_table(sgt);
1185 				return -ENOMEM;
1186 			}
1187 			sg_set_page(sg, vm_page,
1188 				    min, offset_in_page(buf));
1189 		} else {
1190 			min = min_t(size_t, len, desc_len);
1191 			sg_buf = buf;
1192 			sg_set_buf(sg, sg_buf, min);
1193 		}
1194 
1195 		buf += min;
1196 		len -= min;
1197 		sg = sg_next(sg);
1198 	}
1199 
1200 	ret = dma_map_sgtable(dev, sgt, dir, attrs);
1201 	if (ret < 0) {
1202 		sg_free_table(sgt);
1203 		return ret;
1204 	}
1205 
1206 	return 0;
1207 }
1208 
spi_map_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir)1209 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1210 		struct sg_table *sgt, void *buf, size_t len,
1211 		enum dma_data_direction dir)
1212 {
1213 	return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
1214 }
1215 
spi_unmap_buf_attrs(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)1216 static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
1217 				struct device *dev, struct sg_table *sgt,
1218 				enum dma_data_direction dir,
1219 				unsigned long attrs)
1220 {
1221 	dma_unmap_sgtable(dev, sgt, dir, attrs);
1222 	sg_free_table(sgt);
1223 	sgt->orig_nents = 0;
1224 	sgt->nents = 0;
1225 }
1226 
spi_unmap_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)1227 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1228 		   struct sg_table *sgt, enum dma_data_direction dir)
1229 {
1230 	spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
1231 }
1232 
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1233 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1234 {
1235 	struct device *tx_dev, *rx_dev;
1236 	struct spi_transfer *xfer;
1237 	int ret;
1238 
1239 	if (!ctlr->can_dma)
1240 		return 0;
1241 
1242 	if (ctlr->dma_tx)
1243 		tx_dev = ctlr->dma_tx->device->dev;
1244 	else if (ctlr->dma_map_dev)
1245 		tx_dev = ctlr->dma_map_dev;
1246 	else
1247 		tx_dev = ctlr->dev.parent;
1248 
1249 	if (ctlr->dma_rx)
1250 		rx_dev = ctlr->dma_rx->device->dev;
1251 	else if (ctlr->dma_map_dev)
1252 		rx_dev = ctlr->dma_map_dev;
1253 	else
1254 		rx_dev = ctlr->dev.parent;
1255 
1256 	ret = -ENOMSG;
1257 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1258 		/* The sync is done before each transfer. */
1259 		unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1260 
1261 		if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1262 			continue;
1263 
1264 		if (xfer->tx_buf != NULL) {
1265 			ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1266 						(void *)xfer->tx_buf,
1267 						xfer->len, DMA_TO_DEVICE,
1268 						attrs);
1269 			if (ret != 0)
1270 				return ret;
1271 
1272 			xfer->tx_sg_mapped = true;
1273 		}
1274 
1275 		if (xfer->rx_buf != NULL) {
1276 			ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1277 						xfer->rx_buf, xfer->len,
1278 						DMA_FROM_DEVICE, attrs);
1279 			if (ret != 0) {
1280 				spi_unmap_buf_attrs(ctlr, tx_dev,
1281 						&xfer->tx_sg, DMA_TO_DEVICE,
1282 						attrs);
1283 
1284 				return ret;
1285 			}
1286 
1287 			xfer->rx_sg_mapped = true;
1288 		}
1289 	}
1290 	/* No transfer has been mapped, bail out with success */
1291 	if (ret)
1292 		return 0;
1293 
1294 	ctlr->cur_rx_dma_dev = rx_dev;
1295 	ctlr->cur_tx_dma_dev = tx_dev;
1296 
1297 	return 0;
1298 }
1299 
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1300 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1301 {
1302 	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1303 	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1304 	struct spi_transfer *xfer;
1305 
1306 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1307 		/* The sync has already been done after each transfer. */
1308 		unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1309 
1310 		if (xfer->rx_sg_mapped)
1311 			spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1312 					    DMA_FROM_DEVICE, attrs);
1313 		xfer->rx_sg_mapped = false;
1314 
1315 		if (xfer->tx_sg_mapped)
1316 			spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1317 					    DMA_TO_DEVICE, attrs);
1318 		xfer->tx_sg_mapped = false;
1319 	}
1320 
1321 	return 0;
1322 }
1323 
spi_dma_sync_for_device(struct spi_controller * ctlr,struct spi_transfer * xfer)1324 static void spi_dma_sync_for_device(struct spi_controller *ctlr,
1325 				    struct spi_transfer *xfer)
1326 {
1327 	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1328 	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1329 
1330 	if (xfer->tx_sg_mapped)
1331 		dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1332 	if (xfer->rx_sg_mapped)
1333 		dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1334 }
1335 
spi_dma_sync_for_cpu(struct spi_controller * ctlr,struct spi_transfer * xfer)1336 static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
1337 				 struct spi_transfer *xfer)
1338 {
1339 	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1340 	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1341 
1342 	if (xfer->rx_sg_mapped)
1343 		dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1344 	if (xfer->tx_sg_mapped)
1345 		dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1346 }
1347 #else /* !CONFIG_HAS_DMA */
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1348 static inline int __spi_map_msg(struct spi_controller *ctlr,
1349 				struct spi_message *msg)
1350 {
1351 	return 0;
1352 }
1353 
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1354 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1355 				  struct spi_message *msg)
1356 {
1357 	return 0;
1358 }
1359 
spi_dma_sync_for_device(struct spi_controller * ctrl,struct spi_transfer * xfer)1360 static void spi_dma_sync_for_device(struct spi_controller *ctrl,
1361 				    struct spi_transfer *xfer)
1362 {
1363 }
1364 
spi_dma_sync_for_cpu(struct spi_controller * ctrl,struct spi_transfer * xfer)1365 static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
1366 				 struct spi_transfer *xfer)
1367 {
1368 }
1369 #endif /* !CONFIG_HAS_DMA */
1370 
spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1371 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1372 				struct spi_message *msg)
1373 {
1374 	struct spi_transfer *xfer;
1375 
1376 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1377 		/*
1378 		 * Restore the original value of tx_buf or rx_buf if they are
1379 		 * NULL.
1380 		 */
1381 		if (xfer->tx_buf == ctlr->dummy_tx)
1382 			xfer->tx_buf = NULL;
1383 		if (xfer->rx_buf == ctlr->dummy_rx)
1384 			xfer->rx_buf = NULL;
1385 	}
1386 
1387 	return __spi_unmap_msg(ctlr, msg);
1388 }
1389 
spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1390 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1391 {
1392 	struct spi_transfer *xfer;
1393 	void *tmp;
1394 	unsigned int max_tx, max_rx;
1395 
1396 	if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1397 		&& !(msg->spi->mode & SPI_3WIRE)) {
1398 		max_tx = 0;
1399 		max_rx = 0;
1400 
1401 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1402 			if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1403 			    !xfer->tx_buf)
1404 				max_tx = max(xfer->len, max_tx);
1405 			if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1406 			    !xfer->rx_buf)
1407 				max_rx = max(xfer->len, max_rx);
1408 		}
1409 
1410 		if (max_tx) {
1411 			tmp = krealloc(ctlr->dummy_tx, max_tx,
1412 				       GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1413 			if (!tmp)
1414 				return -ENOMEM;
1415 			ctlr->dummy_tx = tmp;
1416 		}
1417 
1418 		if (max_rx) {
1419 			tmp = krealloc(ctlr->dummy_rx, max_rx,
1420 				       GFP_KERNEL | GFP_DMA);
1421 			if (!tmp)
1422 				return -ENOMEM;
1423 			ctlr->dummy_rx = tmp;
1424 		}
1425 
1426 		if (max_tx || max_rx) {
1427 			list_for_each_entry(xfer, &msg->transfers,
1428 					    transfer_list) {
1429 				if (!xfer->len)
1430 					continue;
1431 				if (!xfer->tx_buf)
1432 					xfer->tx_buf = ctlr->dummy_tx;
1433 				if (!xfer->rx_buf)
1434 					xfer->rx_buf = ctlr->dummy_rx;
1435 			}
1436 		}
1437 	}
1438 
1439 	return __spi_map_msg(ctlr, msg);
1440 }
1441 
spi_transfer_wait(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer * xfer)1442 static int spi_transfer_wait(struct spi_controller *ctlr,
1443 			     struct spi_message *msg,
1444 			     struct spi_transfer *xfer)
1445 {
1446 	struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1447 	struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1448 	u32 speed_hz = xfer->speed_hz;
1449 	unsigned long long ms;
1450 
1451 	if (spi_controller_is_target(ctlr)) {
1452 		if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1453 			dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1454 			return -EINTR;
1455 		}
1456 	} else {
1457 		if (!speed_hz)
1458 			speed_hz = 100000;
1459 
1460 		/*
1461 		 * For each byte we wait for 8 cycles of the SPI clock.
1462 		 * Since speed is defined in Hz and we want milliseconds,
1463 		 * use respective multiplier, but before the division,
1464 		 * otherwise we may get 0 for short transfers.
1465 		 */
1466 		ms = 8LL * MSEC_PER_SEC * xfer->len;
1467 		do_div(ms, speed_hz);
1468 
1469 		/*
1470 		 * Increase it twice and add 200 ms tolerance, use
1471 		 * predefined maximum in case of overflow.
1472 		 */
1473 		ms += ms + 200;
1474 		if (ms > UINT_MAX)
1475 			ms = UINT_MAX;
1476 
1477 		ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1478 						 msecs_to_jiffies(ms));
1479 
1480 		if (ms == 0) {
1481 			SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1482 			SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1483 			dev_err(&msg->spi->dev,
1484 				"SPI transfer timed out\n");
1485 			return -ETIMEDOUT;
1486 		}
1487 
1488 		if (xfer->error & SPI_TRANS_FAIL_IO)
1489 			return -EIO;
1490 	}
1491 
1492 	return 0;
1493 }
1494 
_spi_transfer_delay_ns(u32 ns)1495 static void _spi_transfer_delay_ns(u32 ns)
1496 {
1497 	if (!ns)
1498 		return;
1499 	if (ns <= NSEC_PER_USEC) {
1500 		ndelay(ns);
1501 	} else {
1502 		u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1503 
1504 		fsleep(us);
1505 	}
1506 }
1507 
spi_delay_to_ns(struct spi_delay * _delay,struct spi_transfer * xfer)1508 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1509 {
1510 	u32 delay = _delay->value;
1511 	u32 unit = _delay->unit;
1512 	u32 hz;
1513 
1514 	if (!delay)
1515 		return 0;
1516 
1517 	switch (unit) {
1518 	case SPI_DELAY_UNIT_USECS:
1519 		delay *= NSEC_PER_USEC;
1520 		break;
1521 	case SPI_DELAY_UNIT_NSECS:
1522 		/* Nothing to do here */
1523 		break;
1524 	case SPI_DELAY_UNIT_SCK:
1525 		/* Clock cycles need to be obtained from spi_transfer */
1526 		if (!xfer)
1527 			return -EINVAL;
1528 		/*
1529 		 * If there is unknown effective speed, approximate it
1530 		 * by underestimating with half of the requested Hz.
1531 		 */
1532 		hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1533 		if (!hz)
1534 			return -EINVAL;
1535 
1536 		/* Convert delay to nanoseconds */
1537 		delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1538 		break;
1539 	default:
1540 		return -EINVAL;
1541 	}
1542 
1543 	return delay;
1544 }
1545 EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1546 
spi_delay_exec(struct spi_delay * _delay,struct spi_transfer * xfer)1547 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1548 {
1549 	int delay;
1550 
1551 	might_sleep();
1552 
1553 	if (!_delay)
1554 		return -EINVAL;
1555 
1556 	delay = spi_delay_to_ns(_delay, xfer);
1557 	if (delay < 0)
1558 		return delay;
1559 
1560 	_spi_transfer_delay_ns(delay);
1561 
1562 	return 0;
1563 }
1564 EXPORT_SYMBOL_GPL(spi_delay_exec);
1565 
_spi_transfer_cs_change_delay(struct spi_message * msg,struct spi_transfer * xfer)1566 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1567 					  struct spi_transfer *xfer)
1568 {
1569 	u32 default_delay_ns = 10 * NSEC_PER_USEC;
1570 	u32 delay = xfer->cs_change_delay.value;
1571 	u32 unit = xfer->cs_change_delay.unit;
1572 	int ret;
1573 
1574 	/* Return early on "fast" mode - for everything but USECS */
1575 	if (!delay) {
1576 		if (unit == SPI_DELAY_UNIT_USECS)
1577 			_spi_transfer_delay_ns(default_delay_ns);
1578 		return;
1579 	}
1580 
1581 	ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1582 	if (ret) {
1583 		dev_err_once(&msg->spi->dev,
1584 			     "Use of unsupported delay unit %i, using default of %luus\n",
1585 			     unit, default_delay_ns / NSEC_PER_USEC);
1586 		_spi_transfer_delay_ns(default_delay_ns);
1587 	}
1588 }
1589 
spi_transfer_cs_change_delay_exec(struct spi_message * msg,struct spi_transfer * xfer)1590 void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
1591 						  struct spi_transfer *xfer)
1592 {
1593 	_spi_transfer_cs_change_delay(msg, xfer);
1594 }
1595 EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec);
1596 
1597 /*
1598  * spi_transfer_one_message - Default implementation of transfer_one_message()
1599  *
1600  * This is a standard implementation of transfer_one_message() for
1601  * drivers which implement a transfer_one() operation.  It provides
1602  * standard handling of delays and chip select management.
1603  */
spi_transfer_one_message(struct spi_controller * ctlr,struct spi_message * msg)1604 static int spi_transfer_one_message(struct spi_controller *ctlr,
1605 				    struct spi_message *msg)
1606 {
1607 	struct spi_transfer *xfer;
1608 	bool keep_cs = false;
1609 	int ret = 0;
1610 	struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1611 	struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1612 
1613 	xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
1614 	spi_set_cs(msg->spi, !xfer->cs_off, false);
1615 
1616 	SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1617 	SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1618 
1619 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1620 		trace_spi_transfer_start(msg, xfer);
1621 
1622 		spi_statistics_add_transfer_stats(statm, xfer, msg);
1623 		spi_statistics_add_transfer_stats(stats, xfer, msg);
1624 
1625 		if (!ctlr->ptp_sts_supported) {
1626 			xfer->ptp_sts_word_pre = 0;
1627 			ptp_read_system_prets(xfer->ptp_sts);
1628 		}
1629 
1630 		if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1631 			reinit_completion(&ctlr->xfer_completion);
1632 
1633 fallback_pio:
1634 			spi_dma_sync_for_device(ctlr, xfer);
1635 			ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1636 			if (ret < 0) {
1637 				spi_dma_sync_for_cpu(ctlr, xfer);
1638 
1639 				if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) &&
1640 				    (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1641 					__spi_unmap_msg(ctlr, msg);
1642 					ctlr->fallback = true;
1643 					xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1644 					goto fallback_pio;
1645 				}
1646 
1647 				SPI_STATISTICS_INCREMENT_FIELD(statm,
1648 							       errors);
1649 				SPI_STATISTICS_INCREMENT_FIELD(stats,
1650 							       errors);
1651 				dev_err(&msg->spi->dev,
1652 					"SPI transfer failed: %d\n", ret);
1653 				goto out;
1654 			}
1655 
1656 			if (ret > 0) {
1657 				ret = spi_transfer_wait(ctlr, msg, xfer);
1658 				if (ret < 0)
1659 					msg->status = ret;
1660 			}
1661 
1662 			spi_dma_sync_for_cpu(ctlr, xfer);
1663 		} else {
1664 			if (xfer->len)
1665 				dev_err(&msg->spi->dev,
1666 					"Bufferless transfer has length %u\n",
1667 					xfer->len);
1668 		}
1669 
1670 		if (!ctlr->ptp_sts_supported) {
1671 			ptp_read_system_postts(xfer->ptp_sts);
1672 			xfer->ptp_sts_word_post = xfer->len;
1673 		}
1674 
1675 		trace_spi_transfer_stop(msg, xfer);
1676 
1677 		if (msg->status != -EINPROGRESS)
1678 			goto out;
1679 
1680 		spi_transfer_delay_exec(xfer);
1681 
1682 		if (xfer->cs_change) {
1683 			if (list_is_last(&xfer->transfer_list,
1684 					 &msg->transfers)) {
1685 				keep_cs = true;
1686 			} else {
1687 				if (!xfer->cs_off)
1688 					spi_set_cs(msg->spi, false, false);
1689 				_spi_transfer_cs_change_delay(msg, xfer);
1690 				if (!list_next_entry(xfer, transfer_list)->cs_off)
1691 					spi_set_cs(msg->spi, true, false);
1692 			}
1693 		} else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
1694 			   xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
1695 			spi_set_cs(msg->spi, xfer->cs_off, false);
1696 		}
1697 
1698 		msg->actual_length += xfer->len;
1699 	}
1700 
1701 out:
1702 	if (ret != 0 || !keep_cs)
1703 		spi_set_cs(msg->spi, false, false);
1704 
1705 	if (msg->status == -EINPROGRESS)
1706 		msg->status = ret;
1707 
1708 	if (msg->status && ctlr->handle_err)
1709 		ctlr->handle_err(ctlr, msg);
1710 
1711 	spi_finalize_current_message(ctlr);
1712 
1713 	return ret;
1714 }
1715 
1716 /**
1717  * spi_finalize_current_transfer - report completion of a transfer
1718  * @ctlr: the controller reporting completion
1719  *
1720  * Called by SPI drivers using the core transfer_one_message()
1721  * implementation to notify it that the current interrupt driven
1722  * transfer has finished and the next one may be scheduled.
1723  */
spi_finalize_current_transfer(struct spi_controller * ctlr)1724 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1725 {
1726 	complete(&ctlr->xfer_completion);
1727 }
1728 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1729 
spi_idle_runtime_pm(struct spi_controller * ctlr)1730 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1731 {
1732 	if (ctlr->auto_runtime_pm) {
1733 		pm_runtime_put_autosuspend(ctlr->dev.parent);
1734 	}
1735 }
1736 
__spi_pump_transfer_message(struct spi_controller * ctlr,struct spi_message * msg,bool was_busy)1737 static int __spi_pump_transfer_message(struct spi_controller *ctlr,
1738 		struct spi_message *msg, bool was_busy)
1739 {
1740 	struct spi_transfer *xfer;
1741 	int ret;
1742 
1743 	if (!was_busy && ctlr->auto_runtime_pm) {
1744 		ret = pm_runtime_get_sync(ctlr->dev.parent);
1745 		if (ret < 0) {
1746 			pm_runtime_put_noidle(ctlr->dev.parent);
1747 			dev_err(&ctlr->dev, "Failed to power device: %d\n",
1748 				ret);
1749 
1750 			msg->status = ret;
1751 			spi_finalize_current_message(ctlr);
1752 
1753 			return ret;
1754 		}
1755 	}
1756 
1757 	if (!was_busy)
1758 		trace_spi_controller_busy(ctlr);
1759 
1760 	if (!was_busy && ctlr->prepare_transfer_hardware) {
1761 		ret = ctlr->prepare_transfer_hardware(ctlr);
1762 		if (ret) {
1763 			dev_err(&ctlr->dev,
1764 				"failed to prepare transfer hardware: %d\n",
1765 				ret);
1766 
1767 			if (ctlr->auto_runtime_pm)
1768 				pm_runtime_put(ctlr->dev.parent);
1769 
1770 			msg->status = ret;
1771 			spi_finalize_current_message(ctlr);
1772 
1773 			return ret;
1774 		}
1775 	}
1776 
1777 	trace_spi_message_start(msg);
1778 
1779 	if (ctlr->prepare_message) {
1780 		ret = ctlr->prepare_message(ctlr, msg);
1781 		if (ret) {
1782 			dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1783 				ret);
1784 			msg->status = ret;
1785 			spi_finalize_current_message(ctlr);
1786 			return ret;
1787 		}
1788 		msg->prepared = true;
1789 	}
1790 
1791 	ret = spi_map_msg(ctlr, msg);
1792 	if (ret) {
1793 		msg->status = ret;
1794 		spi_finalize_current_message(ctlr);
1795 		return ret;
1796 	}
1797 
1798 	if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1799 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1800 			xfer->ptp_sts_word_pre = 0;
1801 			ptp_read_system_prets(xfer->ptp_sts);
1802 		}
1803 	}
1804 
1805 	/*
1806 	 * Drivers implementation of transfer_one_message() must arrange for
1807 	 * spi_finalize_current_message() to get called. Most drivers will do
1808 	 * this in the calling context, but some don't. For those cases, a
1809 	 * completion is used to guarantee that this function does not return
1810 	 * until spi_finalize_current_message() is done accessing
1811 	 * ctlr->cur_msg.
1812 	 * Use of the following two flags enable to opportunistically skip the
1813 	 * use of the completion since its use involves expensive spin locks.
1814 	 * In case of a race with the context that calls
1815 	 * spi_finalize_current_message() the completion will always be used,
1816 	 * due to strict ordering of these flags using barriers.
1817 	 */
1818 	WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1819 	WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1820 	reinit_completion(&ctlr->cur_msg_completion);
1821 	smp_wmb(); /* Make these available to spi_finalize_current_message() */
1822 
1823 	ret = ctlr->transfer_one_message(ctlr, msg);
1824 	if (ret) {
1825 		dev_err(&ctlr->dev,
1826 			"failed to transfer one message from queue\n");
1827 		return ret;
1828 	}
1829 
1830 	WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1831 	smp_mb(); /* See spi_finalize_current_message()... */
1832 	if (READ_ONCE(ctlr->cur_msg_incomplete))
1833 		wait_for_completion(&ctlr->cur_msg_completion);
1834 
1835 	return 0;
1836 }
1837 
1838 /**
1839  * __spi_pump_messages - function which processes SPI message queue
1840  * @ctlr: controller to process queue for
1841  * @in_kthread: true if we are in the context of the message pump thread
1842  *
1843  * This function checks if there is any SPI message in the queue that
1844  * needs processing and if so call out to the driver to initialize hardware
1845  * and transfer each message.
1846  *
1847  * Note that it is called both from the kthread itself and also from
1848  * inside spi_sync(); the queue extraction handling at the top of the
1849  * function should deal with this safely.
1850  */
__spi_pump_messages(struct spi_controller * ctlr,bool in_kthread)1851 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1852 {
1853 	struct spi_message *msg;
1854 	bool was_busy = false;
1855 	unsigned long flags;
1856 	int ret;
1857 
1858 	/* Take the I/O mutex */
1859 	mutex_lock(&ctlr->io_mutex);
1860 
1861 	/* Lock queue */
1862 	spin_lock_irqsave(&ctlr->queue_lock, flags);
1863 
1864 	/* Make sure we are not already running a message */
1865 	if (ctlr->cur_msg)
1866 		goto out_unlock;
1867 
1868 	/* Check if the queue is idle */
1869 	if (list_empty(&ctlr->queue) || !ctlr->running) {
1870 		if (!ctlr->busy)
1871 			goto out_unlock;
1872 
1873 		/* Defer any non-atomic teardown to the thread */
1874 		if (!in_kthread) {
1875 			if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1876 			    !ctlr->unprepare_transfer_hardware) {
1877 				spi_idle_runtime_pm(ctlr);
1878 				ctlr->busy = false;
1879 				ctlr->queue_empty = true;
1880 				trace_spi_controller_idle(ctlr);
1881 			} else {
1882 				kthread_queue_work(ctlr->kworker,
1883 						   &ctlr->pump_messages);
1884 			}
1885 			goto out_unlock;
1886 		}
1887 
1888 		ctlr->busy = false;
1889 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1890 
1891 		kfree(ctlr->dummy_rx);
1892 		ctlr->dummy_rx = NULL;
1893 		kfree(ctlr->dummy_tx);
1894 		ctlr->dummy_tx = NULL;
1895 		if (ctlr->unprepare_transfer_hardware &&
1896 		    ctlr->unprepare_transfer_hardware(ctlr))
1897 			dev_err(&ctlr->dev,
1898 				"failed to unprepare transfer hardware\n");
1899 		spi_idle_runtime_pm(ctlr);
1900 		trace_spi_controller_idle(ctlr);
1901 
1902 		spin_lock_irqsave(&ctlr->queue_lock, flags);
1903 		ctlr->queue_empty = true;
1904 		goto out_unlock;
1905 	}
1906 
1907 	/* Extract head of queue */
1908 	msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1909 	ctlr->cur_msg = msg;
1910 
1911 	list_del_init(&msg->queue);
1912 	if (ctlr->busy)
1913 		was_busy = true;
1914 	else
1915 		ctlr->busy = true;
1916 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1917 
1918 	ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
1919 	kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1920 
1921 	ctlr->cur_msg = NULL;
1922 	ctlr->fallback = false;
1923 
1924 	mutex_unlock(&ctlr->io_mutex);
1925 
1926 	/* Prod the scheduler in case transfer_one() was busy waiting */
1927 	if (!ret)
1928 		cond_resched();
1929 	return;
1930 
1931 out_unlock:
1932 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1933 	mutex_unlock(&ctlr->io_mutex);
1934 }
1935 
1936 /**
1937  * spi_pump_messages - kthread work function which processes spi message queue
1938  * @work: pointer to kthread work struct contained in the controller struct
1939  */
spi_pump_messages(struct kthread_work * work)1940 static void spi_pump_messages(struct kthread_work *work)
1941 {
1942 	struct spi_controller *ctlr =
1943 		container_of(work, struct spi_controller, pump_messages);
1944 
1945 	__spi_pump_messages(ctlr, true);
1946 }
1947 
1948 /**
1949  * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1950  * @ctlr: Pointer to the spi_controller structure of the driver
1951  * @xfer: Pointer to the transfer being timestamped
1952  * @progress: How many words (not bytes) have been transferred so far
1953  * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1954  *	      transfer, for less jitter in time measurement. Only compatible
1955  *	      with PIO drivers. If true, must follow up with
1956  *	      spi_take_timestamp_post or otherwise system will crash.
1957  *	      WARNING: for fully predictable results, the CPU frequency must
1958  *	      also be under control (governor).
1959  *
1960  * This is a helper for drivers to collect the beginning of the TX timestamp
1961  * for the requested byte from the SPI transfer. The frequency with which this
1962  * function must be called (once per word, once for the whole transfer, once
1963  * per batch of words etc) is arbitrary as long as the @tx buffer offset is
1964  * greater than or equal to the requested byte at the time of the call. The
1965  * timestamp is only taken once, at the first such call. It is assumed that
1966  * the driver advances its @tx buffer pointer monotonically.
1967  */
spi_take_timestamp_pre(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1968 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1969 			    struct spi_transfer *xfer,
1970 			    size_t progress, bool irqs_off)
1971 {
1972 	if (!xfer->ptp_sts)
1973 		return;
1974 
1975 	if (xfer->timestamped)
1976 		return;
1977 
1978 	if (progress > xfer->ptp_sts_word_pre)
1979 		return;
1980 
1981 	/* Capture the resolution of the timestamp */
1982 	xfer->ptp_sts_word_pre = progress;
1983 
1984 	if (irqs_off) {
1985 		local_irq_save(ctlr->irq_flags);
1986 		preempt_disable();
1987 	}
1988 
1989 	ptp_read_system_prets(xfer->ptp_sts);
1990 }
1991 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1992 
1993 /**
1994  * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1995  * @ctlr: Pointer to the spi_controller structure of the driver
1996  * @xfer: Pointer to the transfer being timestamped
1997  * @progress: How many words (not bytes) have been transferred so far
1998  * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1999  *
2000  * This is a helper for drivers to collect the end of the TX timestamp for
2001  * the requested byte from the SPI transfer. Can be called with an arbitrary
2002  * frequency: only the first call where @tx exceeds or is equal to the
2003  * requested word will be timestamped.
2004  */
spi_take_timestamp_post(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)2005 void spi_take_timestamp_post(struct spi_controller *ctlr,
2006 			     struct spi_transfer *xfer,
2007 			     size_t progress, bool irqs_off)
2008 {
2009 	if (!xfer->ptp_sts)
2010 		return;
2011 
2012 	if (xfer->timestamped)
2013 		return;
2014 
2015 	if (progress < xfer->ptp_sts_word_post)
2016 		return;
2017 
2018 	ptp_read_system_postts(xfer->ptp_sts);
2019 
2020 	if (irqs_off) {
2021 		local_irq_restore(ctlr->irq_flags);
2022 		preempt_enable();
2023 	}
2024 
2025 	/* Capture the resolution of the timestamp */
2026 	xfer->ptp_sts_word_post = progress;
2027 
2028 	xfer->timestamped = 1;
2029 }
2030 EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
2031 
2032 /**
2033  * spi_set_thread_rt - set the controller to pump at realtime priority
2034  * @ctlr: controller to boost priority of
2035  *
2036  * This can be called because the controller requested realtime priority
2037  * (by setting the ->rt value before calling spi_register_controller()) or
2038  * because a device on the bus said that its transfers needed realtime
2039  * priority.
2040  *
2041  * NOTE: at the moment if any device on a bus says it needs realtime then
2042  * the thread will be at realtime priority for all transfers on that
2043  * controller.  If this eventually becomes a problem we may see if we can
2044  * find a way to boost the priority only temporarily during relevant
2045  * transfers.
2046  */
spi_set_thread_rt(struct spi_controller * ctlr)2047 static void spi_set_thread_rt(struct spi_controller *ctlr)
2048 {
2049 	dev_info(&ctlr->dev,
2050 		"will run message pump with realtime priority\n");
2051 	sched_set_fifo(ctlr->kworker->task);
2052 }
2053 
spi_init_queue(struct spi_controller * ctlr)2054 static int spi_init_queue(struct spi_controller *ctlr)
2055 {
2056 	ctlr->running = false;
2057 	ctlr->busy = false;
2058 	ctlr->queue_empty = true;
2059 
2060 	ctlr->kworker = kthread_run_worker(0, dev_name(&ctlr->dev));
2061 	if (IS_ERR(ctlr->kworker)) {
2062 		dev_err(&ctlr->dev, "failed to create message pump kworker\n");
2063 		return PTR_ERR(ctlr->kworker);
2064 	}
2065 
2066 	kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
2067 
2068 	/*
2069 	 * Controller config will indicate if this controller should run the
2070 	 * message pump with high (realtime) priority to reduce the transfer
2071 	 * latency on the bus by minimising the delay between a transfer
2072 	 * request and the scheduling of the message pump thread. Without this
2073 	 * setting the message pump thread will remain at default priority.
2074 	 */
2075 	if (ctlr->rt)
2076 		spi_set_thread_rt(ctlr);
2077 
2078 	return 0;
2079 }
2080 
2081 /**
2082  * spi_get_next_queued_message() - called by driver to check for queued
2083  * messages
2084  * @ctlr: the controller to check for queued messages
2085  *
2086  * If there are more messages in the queue, the next message is returned from
2087  * this call.
2088  *
2089  * Return: the next message in the queue, else NULL if the queue is empty.
2090  */
spi_get_next_queued_message(struct spi_controller * ctlr)2091 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
2092 {
2093 	struct spi_message *next;
2094 	unsigned long flags;
2095 
2096 	/* Get a pointer to the next message, if any */
2097 	spin_lock_irqsave(&ctlr->queue_lock, flags);
2098 	next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
2099 					queue);
2100 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2101 
2102 	return next;
2103 }
2104 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
2105 
2106 /*
2107  * __spi_unoptimize_message - shared implementation of spi_unoptimize_message()
2108  *                            and spi_maybe_unoptimize_message()
2109  * @msg: the message to unoptimize
2110  *
2111  * Peripheral drivers should use spi_unoptimize_message() and callers inside
2112  * core should use spi_maybe_unoptimize_message() rather than calling this
2113  * function directly.
2114  *
2115  * It is not valid to call this on a message that is not currently optimized.
2116  */
__spi_unoptimize_message(struct spi_message * msg)2117 static void __spi_unoptimize_message(struct spi_message *msg)
2118 {
2119 	struct spi_controller *ctlr = msg->spi->controller;
2120 
2121 	if (ctlr->unoptimize_message)
2122 		ctlr->unoptimize_message(msg);
2123 
2124 	spi_res_release(ctlr, msg);
2125 
2126 	msg->optimized = false;
2127 	msg->opt_state = NULL;
2128 }
2129 
2130 /*
2131  * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral
2132  * @msg: the message to unoptimize
2133  *
2134  * This function is used to unoptimize a message if and only if it was
2135  * optimized by the core (via spi_maybe_optimize_message()).
2136  */
spi_maybe_unoptimize_message(struct spi_message * msg)2137 static void spi_maybe_unoptimize_message(struct spi_message *msg)
2138 {
2139 	if (!msg->pre_optimized && msg->optimized &&
2140 	    !msg->spi->controller->defer_optimize_message)
2141 		__spi_unoptimize_message(msg);
2142 }
2143 
2144 /**
2145  * spi_finalize_current_message() - the current message is complete
2146  * @ctlr: the controller to return the message to
2147  *
2148  * Called by the driver to notify the core that the message in the front of the
2149  * queue is complete and can be removed from the queue.
2150  */
spi_finalize_current_message(struct spi_controller * ctlr)2151 void spi_finalize_current_message(struct spi_controller *ctlr)
2152 {
2153 	struct spi_transfer *xfer;
2154 	struct spi_message *mesg;
2155 	int ret;
2156 
2157 	mesg = ctlr->cur_msg;
2158 
2159 	if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2160 		list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
2161 			ptp_read_system_postts(xfer->ptp_sts);
2162 			xfer->ptp_sts_word_post = xfer->len;
2163 		}
2164 	}
2165 
2166 	if (unlikely(ctlr->ptp_sts_supported))
2167 		list_for_each_entry(xfer, &mesg->transfers, transfer_list)
2168 			WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
2169 
2170 	spi_unmap_msg(ctlr, mesg);
2171 
2172 	if (mesg->prepared && ctlr->unprepare_message) {
2173 		ret = ctlr->unprepare_message(ctlr, mesg);
2174 		if (ret) {
2175 			dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2176 				ret);
2177 		}
2178 	}
2179 
2180 	mesg->prepared = false;
2181 
2182 	spi_maybe_unoptimize_message(mesg);
2183 
2184 	WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2185 	smp_mb(); /* See __spi_pump_transfer_message()... */
2186 	if (READ_ONCE(ctlr->cur_msg_need_completion))
2187 		complete(&ctlr->cur_msg_completion);
2188 
2189 	trace_spi_message_done(mesg);
2190 
2191 	mesg->state = NULL;
2192 	if (mesg->complete)
2193 		mesg->complete(mesg->context);
2194 }
2195 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
2196 
spi_start_queue(struct spi_controller * ctlr)2197 static int spi_start_queue(struct spi_controller *ctlr)
2198 {
2199 	unsigned long flags;
2200 
2201 	spin_lock_irqsave(&ctlr->queue_lock, flags);
2202 
2203 	if (ctlr->running || ctlr->busy) {
2204 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2205 		return -EBUSY;
2206 	}
2207 
2208 	ctlr->running = true;
2209 	ctlr->cur_msg = NULL;
2210 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2211 
2212 	kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2213 
2214 	return 0;
2215 }
2216 
spi_stop_queue(struct spi_controller * ctlr)2217 static int spi_stop_queue(struct spi_controller *ctlr)
2218 {
2219 	unsigned int limit = 500;
2220 	unsigned long flags;
2221 
2222 	/*
2223 	 * This is a bit lame, but is optimized for the common execution path.
2224 	 * A wait_queue on the ctlr->busy could be used, but then the common
2225 	 * execution path (pump_messages) would be required to call wake_up or
2226 	 * friends on every SPI message. Do this instead.
2227 	 */
2228 	do {
2229 		spin_lock_irqsave(&ctlr->queue_lock, flags);
2230 		if (list_empty(&ctlr->queue) && !ctlr->busy) {
2231 			ctlr->running = false;
2232 			spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2233 			return 0;
2234 		}
2235 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2236 		usleep_range(10000, 11000);
2237 	} while (--limit);
2238 
2239 	return -EBUSY;
2240 }
2241 
spi_destroy_queue(struct spi_controller * ctlr)2242 static int spi_destroy_queue(struct spi_controller *ctlr)
2243 {
2244 	int ret;
2245 
2246 	ret = spi_stop_queue(ctlr);
2247 
2248 	/*
2249 	 * kthread_flush_worker will block until all work is done.
2250 	 * If the reason that stop_queue timed out is that the work will never
2251 	 * finish, then it does no good to call flush/stop thread, so
2252 	 * return anyway.
2253 	 */
2254 	if (ret) {
2255 		dev_err(&ctlr->dev, "problem destroying queue\n");
2256 		return ret;
2257 	}
2258 
2259 	kthread_destroy_worker(ctlr->kworker);
2260 
2261 	return 0;
2262 }
2263 
__spi_queued_transfer(struct spi_device * spi,struct spi_message * msg,bool need_pump)2264 static int __spi_queued_transfer(struct spi_device *spi,
2265 				 struct spi_message *msg,
2266 				 bool need_pump)
2267 {
2268 	struct spi_controller *ctlr = spi->controller;
2269 	unsigned long flags;
2270 
2271 	spin_lock_irqsave(&ctlr->queue_lock, flags);
2272 
2273 	if (!ctlr->running) {
2274 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2275 		return -ESHUTDOWN;
2276 	}
2277 	msg->actual_length = 0;
2278 	msg->status = -EINPROGRESS;
2279 
2280 	list_add_tail(&msg->queue, &ctlr->queue);
2281 	ctlr->queue_empty = false;
2282 	if (!ctlr->busy && need_pump)
2283 		kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2284 
2285 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2286 	return 0;
2287 }
2288 
2289 /**
2290  * spi_queued_transfer - transfer function for queued transfers
2291  * @spi: SPI device which is requesting transfer
2292  * @msg: SPI message which is to handled is queued to driver queue
2293  *
2294  * Return: zero on success, else a negative error code.
2295  */
spi_queued_transfer(struct spi_device * spi,struct spi_message * msg)2296 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2297 {
2298 	return __spi_queued_transfer(spi, msg, true);
2299 }
2300 
spi_controller_initialize_queue(struct spi_controller * ctlr)2301 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2302 {
2303 	int ret;
2304 
2305 	ctlr->transfer = spi_queued_transfer;
2306 	if (!ctlr->transfer_one_message)
2307 		ctlr->transfer_one_message = spi_transfer_one_message;
2308 
2309 	/* Initialize and start queue */
2310 	ret = spi_init_queue(ctlr);
2311 	if (ret) {
2312 		dev_err(&ctlr->dev, "problem initializing queue\n");
2313 		goto err_init_queue;
2314 	}
2315 	ctlr->queued = true;
2316 	ret = spi_start_queue(ctlr);
2317 	if (ret) {
2318 		dev_err(&ctlr->dev, "problem starting queue\n");
2319 		goto err_start_queue;
2320 	}
2321 
2322 	return 0;
2323 
2324 err_start_queue:
2325 	spi_destroy_queue(ctlr);
2326 err_init_queue:
2327 	return ret;
2328 }
2329 
2330 /**
2331  * spi_flush_queue - Send all pending messages in the queue from the callers'
2332  *		     context
2333  * @ctlr: controller to process queue for
2334  *
2335  * This should be used when one wants to ensure all pending messages have been
2336  * sent before doing something. Is used by the spi-mem code to make sure SPI
2337  * memory operations do not preempt regular SPI transfers that have been queued
2338  * before the spi-mem operation.
2339  */
spi_flush_queue(struct spi_controller * ctlr)2340 void spi_flush_queue(struct spi_controller *ctlr)
2341 {
2342 	if (ctlr->transfer == spi_queued_transfer)
2343 		__spi_pump_messages(ctlr, false);
2344 }
2345 
2346 /*-------------------------------------------------------------------------*/
2347 
2348 #if defined(CONFIG_OF)
of_spi_parse_dt_cs_delay(struct device_node * nc,struct spi_delay * delay,const char * prop)2349 static void of_spi_parse_dt_cs_delay(struct device_node *nc,
2350 				     struct spi_delay *delay, const char *prop)
2351 {
2352 	u32 value;
2353 
2354 	if (!of_property_read_u32(nc, prop, &value)) {
2355 		if (value > U16_MAX) {
2356 			delay->value = DIV_ROUND_UP(value, 1000);
2357 			delay->unit = SPI_DELAY_UNIT_USECS;
2358 		} else {
2359 			delay->value = value;
2360 			delay->unit = SPI_DELAY_UNIT_NSECS;
2361 		}
2362 	}
2363 }
2364 
of_spi_parse_dt(struct spi_controller * ctlr,struct spi_device * spi,struct device_node * nc)2365 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2366 			   struct device_node *nc)
2367 {
2368 	u32 value, cs[SPI_DEVICE_CS_CNT_MAX], map[SPI_DEVICE_DATA_LANE_CNT_MAX];
2369 	int rc, idx, max_num_data_lanes;
2370 
2371 	/* Mode (clock phase/polarity/etc.) */
2372 	if (of_property_read_bool(nc, "spi-cpha"))
2373 		spi->mode |= SPI_CPHA;
2374 	if (of_property_read_bool(nc, "spi-cpol"))
2375 		spi->mode |= SPI_CPOL;
2376 	if (of_property_read_bool(nc, "spi-3wire"))
2377 		spi->mode |= SPI_3WIRE;
2378 	if (of_property_read_bool(nc, "spi-lsb-first"))
2379 		spi->mode |= SPI_LSB_FIRST;
2380 	if (of_property_read_bool(nc, "spi-cs-high"))
2381 		spi->mode |= SPI_CS_HIGH;
2382 
2383 	/* Device DUAL/QUAD mode */
2384 
2385 	rc = of_property_read_variable_u32_array(nc, "spi-tx-lane-map", map, 1,
2386 						 ARRAY_SIZE(map));
2387 	if (rc >= 0) {
2388 		max_num_data_lanes = rc;
2389 		for (idx = 0; idx < max_num_data_lanes; idx++)
2390 			spi->tx_lane_map[idx] = map[idx];
2391 	} else if (rc == -EINVAL) {
2392 		/* Default lane map is identity mapping. */
2393 		max_num_data_lanes = ARRAY_SIZE(spi->tx_lane_map);
2394 		for (idx = 0; idx < max_num_data_lanes; idx++)
2395 			spi->tx_lane_map[idx] = idx;
2396 	} else {
2397 		dev_err(&ctlr->dev,
2398 			"failed to read spi-tx-lane-map property: %d\n", rc);
2399 		return rc;
2400 	}
2401 
2402 	rc = of_property_count_u32_elems(nc, "spi-tx-bus-width");
2403 	if (rc < 0 && rc != -EINVAL) {
2404 		dev_err(&ctlr->dev,
2405 			"failed to read spi-tx-bus-width property: %d\n", rc);
2406 		return rc;
2407 	}
2408 	if (rc > max_num_data_lanes) {
2409 		dev_err(&ctlr->dev,
2410 			"spi-tx-bus-width has more elements (%d) than spi-tx-lane-map (%d)\n",
2411 			rc, max_num_data_lanes);
2412 		return -EINVAL;
2413 	}
2414 
2415 	if (rc == -EINVAL) {
2416 		/* Default when property is not present. */
2417 		spi->num_tx_lanes = 1;
2418 	} else {
2419 		u32 first_value;
2420 
2421 		spi->num_tx_lanes = rc;
2422 
2423 		for (idx = 0; idx < spi->num_tx_lanes; idx++) {
2424 			rc = of_property_read_u32_index(nc, "spi-tx-bus-width",
2425 							idx, &value);
2426 			if (rc)
2427 				return rc;
2428 
2429 			/*
2430 			 * For now, we only support all lanes having the same
2431 			 * width so we can keep using the existing mode flags.
2432 			 */
2433 			if (!idx)
2434 				first_value = value;
2435 			else if (first_value != value) {
2436 				dev_err(&ctlr->dev,
2437 					"spi-tx-bus-width has inconsistent values: first %d vs later %d\n",
2438 					first_value, value);
2439 				return -EINVAL;
2440 			}
2441 		}
2442 
2443 		switch (value) {
2444 		case 0:
2445 			spi->mode |= SPI_NO_TX;
2446 			break;
2447 		case 1:
2448 			break;
2449 		case 2:
2450 			spi->mode |= SPI_TX_DUAL;
2451 			break;
2452 		case 4:
2453 			spi->mode |= SPI_TX_QUAD;
2454 			break;
2455 		case 8:
2456 			spi->mode |= SPI_TX_OCTAL;
2457 			break;
2458 		default:
2459 			dev_warn(&ctlr->dev,
2460 				"spi-tx-bus-width %d not supported\n",
2461 				value);
2462 			break;
2463 		}
2464 	}
2465 
2466 	for (idx = 0; idx < spi->num_tx_lanes; idx++) {
2467 		if (spi->tx_lane_map[idx] >= spi->controller->num_data_lanes) {
2468 			dev_err(&ctlr->dev,
2469 				"spi-tx-lane-map has invalid value %d (num_data_lanes=%d)\n",
2470 				spi->tx_lane_map[idx],
2471 				spi->controller->num_data_lanes);
2472 			return -EINVAL;
2473 		}
2474 	}
2475 
2476 	rc = of_property_read_variable_u32_array(nc, "spi-rx-lane-map", map, 1,
2477 						 ARRAY_SIZE(map));
2478 	if (rc >= 0) {
2479 		max_num_data_lanes = rc;
2480 		for (idx = 0; idx < max_num_data_lanes; idx++)
2481 			spi->rx_lane_map[idx] = map[idx];
2482 	} else if (rc == -EINVAL) {
2483 		/* Default lane map is identity mapping. */
2484 		max_num_data_lanes = ARRAY_SIZE(spi->rx_lane_map);
2485 		for (idx = 0; idx < max_num_data_lanes; idx++)
2486 			spi->rx_lane_map[idx] = idx;
2487 	} else {
2488 		dev_err(&ctlr->dev,
2489 			"failed to read spi-rx-lane-map property: %d\n", rc);
2490 		return rc;
2491 	}
2492 
2493 	rc = of_property_count_u32_elems(nc, "spi-rx-bus-width");
2494 	if (rc < 0 && rc != -EINVAL) {
2495 		dev_err(&ctlr->dev,
2496 			"failed to read spi-rx-bus-width property: %d\n", rc);
2497 		return rc;
2498 	}
2499 	if (rc > max_num_data_lanes) {
2500 		dev_err(&ctlr->dev,
2501 			"spi-rx-bus-width has more elements (%d) than spi-rx-lane-map (%d)\n",
2502 			rc, max_num_data_lanes);
2503 		return -EINVAL;
2504 	}
2505 
2506 	if (rc == -EINVAL) {
2507 		/* Default when property is not present. */
2508 		spi->num_rx_lanes = 1;
2509 	} else {
2510 		u32 first_value;
2511 
2512 		spi->num_rx_lanes = rc;
2513 
2514 		for (idx = 0; idx < spi->num_rx_lanes; idx++) {
2515 			rc = of_property_read_u32_index(nc, "spi-rx-bus-width",
2516 							idx, &value);
2517 			if (rc)
2518 				return rc;
2519 
2520 			/*
2521 			 * For now, we only support all lanes having the same
2522 			 * width so we can keep using the existing mode flags.
2523 			 */
2524 			if (!idx)
2525 				first_value = value;
2526 			else if (first_value != value) {
2527 				dev_err(&ctlr->dev,
2528 					"spi-rx-bus-width has inconsistent values: first %d vs later %d\n",
2529 					first_value, value);
2530 				return -EINVAL;
2531 			}
2532 		}
2533 
2534 		switch (value) {
2535 		case 0:
2536 			spi->mode |= SPI_NO_RX;
2537 			break;
2538 		case 1:
2539 			break;
2540 		case 2:
2541 			spi->mode |= SPI_RX_DUAL;
2542 			break;
2543 		case 4:
2544 			spi->mode |= SPI_RX_QUAD;
2545 			break;
2546 		case 8:
2547 			spi->mode |= SPI_RX_OCTAL;
2548 			break;
2549 		default:
2550 			dev_warn(&ctlr->dev,
2551 				"spi-rx-bus-width %d not supported\n",
2552 				value);
2553 			break;
2554 		}
2555 	}
2556 
2557 	for (idx = 0; idx < spi->num_rx_lanes; idx++) {
2558 		if (spi->rx_lane_map[idx] >= spi->controller->num_data_lanes) {
2559 			dev_err(&ctlr->dev,
2560 				"spi-rx-lane-map has invalid value %d (num_data_lanes=%d)\n",
2561 				spi->rx_lane_map[idx],
2562 				spi->controller->num_data_lanes);
2563 			return -EINVAL;
2564 		}
2565 	}
2566 
2567 	if (spi_controller_is_target(ctlr)) {
2568 		if (!of_node_name_eq(nc, "slave")) {
2569 			dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2570 				nc);
2571 			return -EINVAL;
2572 		}
2573 		return 0;
2574 	}
2575 
2576 	/* Device address */
2577 	rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1,
2578 						 SPI_DEVICE_CS_CNT_MAX);
2579 	if (rc < 0) {
2580 		dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2581 			nc, rc);
2582 		return rc;
2583 	}
2584 
2585 	if ((of_property_present(nc, "parallel-memories")) &&
2586 	    (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) {
2587 		dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n");
2588 		return -EINVAL;
2589 	}
2590 
2591 	spi->num_chipselect = rc;
2592 	for (idx = 0; idx < rc; idx++)
2593 		spi_set_chipselect(spi, idx, cs[idx]);
2594 
2595 	/*
2596 	 * By default spi->chip_select[0] will hold the physical CS number,
2597 	 * so set bit 0 in spi->cs_index_mask.
2598 	 */
2599 	spi->cs_index_mask = BIT(0);
2600 
2601 	/* Device speed */
2602 	if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2603 		spi->max_speed_hz = value;
2604 
2605 	/* Device CS delays */
2606 	of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
2607 	of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
2608 	of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
2609 
2610 	return 0;
2611 }
2612 
2613 static struct spi_device *
of_register_spi_device(struct spi_controller * ctlr,struct device_node * nc)2614 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2615 {
2616 	struct spi_device *spi;
2617 	int rc;
2618 
2619 	/* Alloc an spi_device */
2620 	spi = spi_alloc_device(ctlr);
2621 	if (!spi) {
2622 		dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2623 		rc = -ENOMEM;
2624 		goto err_out;
2625 	}
2626 
2627 	/* Select device driver */
2628 	rc = of_alias_from_compatible(nc, spi->modalias,
2629 				      sizeof(spi->modalias));
2630 	if (rc < 0) {
2631 		dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2632 		goto err_out;
2633 	}
2634 
2635 	rc = of_spi_parse_dt(ctlr, spi, nc);
2636 	if (rc)
2637 		goto err_out;
2638 
2639 	/* Store a pointer to the node in the device structure */
2640 	of_node_get(nc);
2641 
2642 	device_set_node(&spi->dev, of_fwnode_handle(nc));
2643 
2644 	/* Register the new device */
2645 	rc = spi_add_device(spi);
2646 	if (rc) {
2647 		dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2648 		goto err_of_node_put;
2649 	}
2650 
2651 	return spi;
2652 
2653 err_of_node_put:
2654 	of_node_put(nc);
2655 err_out:
2656 	spi_dev_put(spi);
2657 	return ERR_PTR(rc);
2658 }
2659 
2660 /**
2661  * of_register_spi_devices() - Register child devices onto the SPI bus
2662  * @ctlr:	Pointer to spi_controller device
2663  *
2664  * Registers an spi_device for each child node of controller node which
2665  * represents a valid SPI target device.
2666  */
of_register_spi_devices(struct spi_controller * ctlr)2667 static void of_register_spi_devices(struct spi_controller *ctlr)
2668 {
2669 	struct spi_device *spi;
2670 	struct device_node *nc;
2671 
2672 	for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2673 		if (of_node_test_and_set_flag(nc, OF_POPULATED))
2674 			continue;
2675 		spi = of_register_spi_device(ctlr, nc);
2676 		if (IS_ERR(spi)) {
2677 			dev_warn(&ctlr->dev,
2678 				 "Failed to create SPI device for %pOF\n", nc);
2679 			of_node_clear_flag(nc, OF_POPULATED);
2680 		}
2681 	}
2682 }
2683 #else
of_register_spi_devices(struct spi_controller * ctlr)2684 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2685 #endif
2686 
2687 /**
2688  * spi_new_ancillary_device() - Register ancillary SPI device
2689  * @spi:         Pointer to the main SPI device registering the ancillary device
2690  * @chip_select: Chip Select of the ancillary device
2691  *
2692  * Register an ancillary SPI device; for example some chips have a chip-select
2693  * for normal device usage and another one for setup/firmware upload.
2694  *
2695  * This may only be called from main SPI device's probe routine.
2696  *
2697  * Return: 0 on success; negative errno on failure
2698  */
spi_new_ancillary_device(struct spi_device * spi,u8 chip_select)2699 struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2700 					     u8 chip_select)
2701 {
2702 	struct spi_controller *ctlr = spi->controller;
2703 	struct spi_device *ancillary;
2704 	int rc;
2705 
2706 	/* Alloc an spi_device */
2707 	ancillary = spi_alloc_device(ctlr);
2708 	if (!ancillary) {
2709 		rc = -ENOMEM;
2710 		goto err_out;
2711 	}
2712 
2713 	strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2714 
2715 	/* Use provided chip-select for ancillary device */
2716 	spi_set_chipselect(ancillary, 0, chip_select);
2717 
2718 	/* Take over SPI mode/speed from SPI main device */
2719 	ancillary->max_speed_hz = spi->max_speed_hz;
2720 	ancillary->mode = spi->mode;
2721 	/*
2722 	 * By default spi->chip_select[0] will hold the physical CS number,
2723 	 * so set bit 0 in spi->cs_index_mask.
2724 	 */
2725 	ancillary->cs_index_mask = BIT(0);
2726 
2727 	WARN_ON(!mutex_is_locked(&ctlr->add_lock));
2728 
2729 	/* Register the new device, passing the parent to skip CS conflict check */
2730 	rc = __spi_add_device(ancillary, spi);
2731 	if (rc) {
2732 		dev_err(&spi->dev, "failed to register ancillary device\n");
2733 		goto err_out;
2734 	}
2735 
2736 	return ancillary;
2737 
2738 err_out:
2739 	spi_dev_put(ancillary);
2740 	return ERR_PTR(rc);
2741 }
2742 EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2743 
devm_spi_unregister_device(void * spi)2744 static void devm_spi_unregister_device(void *spi)
2745 {
2746 	spi_unregister_device(spi);
2747 }
2748 
2749 /**
2750  * devm_spi_new_ancillary_device() - Register managed ancillary SPI device
2751  * @spi:         Pointer to the main SPI device registering the ancillary device
2752  * @chip_select: Chip Select of the ancillary device
2753  *
2754  * Register an ancillary SPI device; for example some chips have a chip-select
2755  * for normal device usage and another one for setup/firmware upload.
2756  *
2757  * This is the managed version of spi_new_ancillary_device(). The ancillary
2758  * device will be unregistered automatically when the parent SPI device is
2759  * unregistered.
2760  *
2761  * This may only be called from main SPI device's probe routine.
2762  *
2763  * Return: Pointer to new ancillary device on success; ERR_PTR on failure
2764  */
devm_spi_new_ancillary_device(struct spi_device * spi,u8 chip_select)2765 struct spi_device *devm_spi_new_ancillary_device(struct spi_device *spi,
2766 						 u8 chip_select)
2767 {
2768 	struct spi_device *ancillary;
2769 	int ret;
2770 
2771 	ancillary = spi_new_ancillary_device(spi, chip_select);
2772 	if (IS_ERR(ancillary))
2773 		return ancillary;
2774 
2775 	ret = devm_add_action_or_reset(&spi->dev, devm_spi_unregister_device,
2776 				       ancillary);
2777 	if (ret)
2778 		return ERR_PTR(ret);
2779 
2780 	return ancillary;
2781 }
2782 EXPORT_SYMBOL_GPL(devm_spi_new_ancillary_device);
2783 
2784 #ifdef CONFIG_ACPI
2785 struct acpi_spi_lookup {
2786 	struct spi_controller 	*ctlr;
2787 	u32			max_speed_hz;
2788 	u32			mode;
2789 	int			irq;
2790 	u8			bits_per_word;
2791 	u8			chip_select;
2792 	int			n;
2793 	int			index;
2794 };
2795 
acpi_spi_count(struct acpi_resource * ares,void * data)2796 static int acpi_spi_count(struct acpi_resource *ares, void *data)
2797 {
2798 	struct acpi_resource_spi_serialbus *sb;
2799 	int *count = data;
2800 
2801 	if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2802 		return 1;
2803 
2804 	sb = &ares->data.spi_serial_bus;
2805 	if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2806 		return 1;
2807 
2808 	*count = *count + 1;
2809 
2810 	return 1;
2811 }
2812 
2813 /**
2814  * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2815  * @adev:	ACPI device
2816  *
2817  * Return: the number of SpiSerialBus resources in the ACPI-device's
2818  * resource-list; or a negative error code.
2819  */
acpi_spi_count_resources(struct acpi_device * adev)2820 int acpi_spi_count_resources(struct acpi_device *adev)
2821 {
2822 	LIST_HEAD(r);
2823 	int count = 0;
2824 	int ret;
2825 
2826 	ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2827 	if (ret < 0)
2828 		return ret;
2829 
2830 	acpi_dev_free_resource_list(&r);
2831 
2832 	return count;
2833 }
2834 EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2835 
acpi_spi_parse_apple_properties(struct acpi_device * dev,struct acpi_spi_lookup * lookup)2836 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2837 					    struct acpi_spi_lookup *lookup)
2838 {
2839 	const union acpi_object *obj;
2840 
2841 	if (!x86_apple_machine)
2842 		return;
2843 
2844 	if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2845 	    && obj->buffer.length >= 4)
2846 		lookup->max_speed_hz  = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2847 
2848 	if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2849 	    && obj->buffer.length == 8)
2850 		lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2851 
2852 	if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2853 	    && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2854 		lookup->mode |= SPI_LSB_FIRST;
2855 
2856 	if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2857 	    && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2858 		lookup->mode |= SPI_CPOL;
2859 
2860 	if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2861 	    && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2862 		lookup->mode |= SPI_CPHA;
2863 }
2864 
acpi_spi_add_resource(struct acpi_resource * ares,void * data)2865 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2866 {
2867 	struct acpi_spi_lookup *lookup = data;
2868 	struct spi_controller *ctlr = lookup->ctlr;
2869 
2870 	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2871 		struct acpi_resource_spi_serialbus *sb;
2872 		acpi_handle parent_handle;
2873 		acpi_status status;
2874 
2875 		sb = &ares->data.spi_serial_bus;
2876 		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2877 
2878 			if (lookup->index != -1 && lookup->n++ != lookup->index)
2879 				return 1;
2880 
2881 			status = acpi_get_handle(NULL,
2882 						 sb->resource_source.string_ptr,
2883 						 &parent_handle);
2884 
2885 			if (ACPI_FAILURE(status))
2886 				return -ENODEV;
2887 
2888 			if (ctlr) {
2889 				if (!device_match_acpi_handle(ctlr->dev.parent, parent_handle))
2890 					return -ENODEV;
2891 			} else {
2892 				struct acpi_device *adev;
2893 
2894 				adev = acpi_fetch_acpi_dev(parent_handle);
2895 				if (!adev)
2896 					return -ENODEV;
2897 
2898 				ctlr = acpi_spi_find_controller_by_adev(adev);
2899 				if (!ctlr)
2900 					return -EPROBE_DEFER;
2901 
2902 				lookup->ctlr = ctlr;
2903 			}
2904 
2905 			/*
2906 			 * ACPI DeviceSelection numbering is handled by the
2907 			 * host controller driver in Windows and can vary
2908 			 * from driver to driver. In Linux we always expect
2909 			 * 0 .. max - 1 so we need to ask the driver to
2910 			 * translate between the two schemes.
2911 			 */
2912 			if (ctlr->fw_translate_cs) {
2913 				int cs = ctlr->fw_translate_cs(ctlr,
2914 						sb->device_selection);
2915 				if (cs < 0)
2916 					return cs;
2917 				lookup->chip_select = cs;
2918 			} else {
2919 				lookup->chip_select = sb->device_selection;
2920 			}
2921 
2922 			lookup->max_speed_hz = sb->connection_speed;
2923 			lookup->bits_per_word = sb->data_bit_length;
2924 
2925 			if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2926 				lookup->mode |= SPI_CPHA;
2927 			if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2928 				lookup->mode |= SPI_CPOL;
2929 			if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2930 				lookup->mode |= SPI_CS_HIGH;
2931 		}
2932 	} else if (lookup->irq < 0) {
2933 		struct resource r;
2934 
2935 		if (acpi_dev_resource_interrupt(ares, 0, &r))
2936 			lookup->irq = r.start;
2937 	}
2938 
2939 	/* Always tell the ACPI core to skip this resource */
2940 	return 1;
2941 }
2942 
2943 /**
2944  * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2945  * @ctlr: controller to which the spi device belongs
2946  * @adev: ACPI Device for the spi device
2947  * @index: Index of the spi resource inside the ACPI Node
2948  *
2949  * This should be used to allocate a new SPI device from and ACPI Device node.
2950  * The caller is responsible for calling spi_add_device to register the SPI device.
2951  *
2952  * If ctlr is set to NULL, the Controller for the SPI device will be looked up
2953  * using the resource.
2954  * If index is set to -1, index is not used.
2955  * Note: If index is -1, ctlr must be set.
2956  *
2957  * Return: a pointer to the new device, or ERR_PTR on error.
2958  */
acpi_spi_device_alloc(struct spi_controller * ctlr,struct acpi_device * adev,int index)2959 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2960 					 struct acpi_device *adev,
2961 					 int index)
2962 {
2963 	acpi_handle parent_handle = NULL;
2964 	struct list_head resource_list;
2965 	struct acpi_spi_lookup lookup = {};
2966 	struct spi_device *spi;
2967 	int ret;
2968 
2969 	if (!ctlr && index == -1)
2970 		return ERR_PTR(-EINVAL);
2971 
2972 	lookup.ctlr		= ctlr;
2973 	lookup.irq		= -1;
2974 	lookup.index		= index;
2975 	lookup.n		= 0;
2976 
2977 	INIT_LIST_HEAD(&resource_list);
2978 	ret = acpi_dev_get_resources(adev, &resource_list,
2979 				     acpi_spi_add_resource, &lookup);
2980 	acpi_dev_free_resource_list(&resource_list);
2981 
2982 	if (ret < 0)
2983 		/* Found SPI in _CRS but it points to another controller */
2984 		return ERR_PTR(ret);
2985 
2986 	if (!lookup.max_speed_hz &&
2987 	    ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2988 	    device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) {
2989 		/* Apple does not use _CRS but nested devices for SPI target devices */
2990 		acpi_spi_parse_apple_properties(adev, &lookup);
2991 	}
2992 
2993 	if (!lookup.max_speed_hz)
2994 		return ERR_PTR(-ENODEV);
2995 
2996 	spi = spi_alloc_device(lookup.ctlr);
2997 	if (!spi) {
2998 		dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2999 			dev_name(&adev->dev));
3000 		return ERR_PTR(-ENOMEM);
3001 	}
3002 
3003 	spi_set_chipselect(spi, 0, lookup.chip_select);
3004 
3005 	ACPI_COMPANION_SET(&spi->dev, adev);
3006 	spi->max_speed_hz	= lookup.max_speed_hz;
3007 	spi->mode		|= lookup.mode;
3008 	spi->irq		= lookup.irq;
3009 	spi->bits_per_word	= lookup.bits_per_word;
3010 	/*
3011 	 * By default spi->chip_select[0] will hold the physical CS number,
3012 	 * so set bit 0 in spi->cs_index_mask.
3013 	 */
3014 	spi->cs_index_mask	= BIT(0);
3015 
3016 	return spi;
3017 }
3018 EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
3019 
acpi_register_spi_device(struct spi_controller * ctlr,struct acpi_device * adev)3020 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
3021 					    struct acpi_device *adev)
3022 {
3023 	struct spi_device *spi;
3024 
3025 	if (acpi_bus_get_status(adev) || !adev->status.present ||
3026 	    acpi_device_enumerated(adev))
3027 		return AE_OK;
3028 
3029 	spi = acpi_spi_device_alloc(ctlr, adev, -1);
3030 	if (IS_ERR(spi)) {
3031 		if (PTR_ERR(spi) == -ENOMEM)
3032 			return AE_NO_MEMORY;
3033 		else
3034 			return AE_OK;
3035 	}
3036 
3037 	acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
3038 			  sizeof(spi->modalias));
3039 
3040 	/*
3041 	 * This gets re-tried in spi_probe() for -EPROBE_DEFER handling in case
3042 	 * the GPIO controller does not have a driver yet. This needs to be done
3043 	 * here too, because this call sets the GPIO direction and/or bias.
3044 	 * Setting these needs to be done even if there is no driver, in which
3045 	 * case spi_probe() will never get called.
3046 	 * TODO: ideally the setup of the GPIO should be handled in a generic
3047 	 * manner in the ACPI/gpiolib core code.
3048 	 */
3049 	if (spi->irq < 0)
3050 		spi->irq = acpi_dev_gpio_irq_get(adev, 0);
3051 
3052 	acpi_device_set_enumerated(adev);
3053 
3054 	adev->power.flags.ignore_parent = true;
3055 	if (spi_add_device(spi)) {
3056 		adev->power.flags.ignore_parent = false;
3057 		dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
3058 			dev_name(&adev->dev));
3059 		spi_dev_put(spi);
3060 	}
3061 
3062 	return AE_OK;
3063 }
3064 
acpi_spi_add_device(acpi_handle handle,u32 level,void * data,void ** return_value)3065 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
3066 				       void *data, void **return_value)
3067 {
3068 	struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
3069 	struct spi_controller *ctlr = data;
3070 
3071 	if (!adev)
3072 		return AE_OK;
3073 
3074 	return acpi_register_spi_device(ctlr, adev);
3075 }
3076 
3077 #define SPI_ACPI_ENUMERATE_MAX_DEPTH		32
3078 
acpi_register_spi_devices(struct spi_controller * ctlr)3079 static void acpi_register_spi_devices(struct spi_controller *ctlr)
3080 {
3081 	acpi_status status;
3082 	acpi_handle handle;
3083 
3084 	handle = ACPI_HANDLE(ctlr->dev.parent);
3085 	if (!handle)
3086 		return;
3087 
3088 	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
3089 				     SPI_ACPI_ENUMERATE_MAX_DEPTH,
3090 				     acpi_spi_add_device, NULL, ctlr, NULL);
3091 	if (ACPI_FAILURE(status))
3092 		dev_warn(&ctlr->dev, "failed to enumerate SPI target devices\n");
3093 }
3094 #else
acpi_register_spi_devices(struct spi_controller * ctlr)3095 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
3096 #endif /* CONFIG_ACPI */
3097 
spi_controller_release(struct device * dev)3098 static void spi_controller_release(struct device *dev)
3099 {
3100 	struct spi_controller *ctlr;
3101 
3102 	ctlr = container_of(dev, struct spi_controller, dev);
3103 
3104 	free_percpu(ctlr->pcpu_statistics);
3105 	kfree(ctlr);
3106 }
3107 
3108 static const struct class spi_controller_class = {
3109 	.name		= "spi_master",
3110 	.dev_release	= spi_controller_release,
3111 	.dev_groups	= spi_controller_groups,
3112 };
3113 
3114 #ifdef CONFIG_SPI_SLAVE
3115 /**
3116  * spi_target_abort - abort the ongoing transfer request on an SPI target controller
3117  * @spi: device used for the current transfer
3118  */
spi_target_abort(struct spi_device * spi)3119 int spi_target_abort(struct spi_device *spi)
3120 {
3121 	struct spi_controller *ctlr = spi->controller;
3122 
3123 	if (spi_controller_is_target(ctlr) && ctlr->target_abort)
3124 		return ctlr->target_abort(ctlr);
3125 
3126 	return -ENOTSUPP;
3127 }
3128 EXPORT_SYMBOL_GPL(spi_target_abort);
3129 
slave_show(struct device * dev,struct device_attribute * attr,char * buf)3130 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
3131 			  char *buf)
3132 {
3133 	struct spi_controller *ctlr = container_of(dev, struct spi_controller,
3134 						   dev);
3135 	struct device *child;
3136 	int ret;
3137 
3138 	child = device_find_any_child(&ctlr->dev);
3139 	ret = sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
3140 	put_device(child);
3141 
3142 	return ret;
3143 }
3144 
slave_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3145 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
3146 			   const char *buf, size_t count)
3147 {
3148 	struct spi_controller *ctlr = container_of(dev, struct spi_controller,
3149 						   dev);
3150 	struct spi_device *spi;
3151 	struct device *child;
3152 	char name[32];
3153 	int rc;
3154 
3155 	rc = sscanf(buf, "%31s", name);
3156 	if (rc != 1 || !name[0])
3157 		return -EINVAL;
3158 
3159 	child = device_find_any_child(&ctlr->dev);
3160 	if (child) {
3161 		/* Remove registered target device */
3162 		device_unregister(child);
3163 		put_device(child);
3164 	}
3165 
3166 	if (strcmp(name, "(null)")) {
3167 		/* Register new target device */
3168 		spi = spi_alloc_device(ctlr);
3169 		if (!spi)
3170 			return -ENOMEM;
3171 
3172 		strscpy(spi->modalias, name, sizeof(spi->modalias));
3173 
3174 		rc = spi_add_device(spi);
3175 		if (rc) {
3176 			spi_dev_put(spi);
3177 			return rc;
3178 		}
3179 	}
3180 
3181 	return count;
3182 }
3183 
3184 static DEVICE_ATTR_RW(slave);
3185 
3186 static struct attribute *spi_target_attrs[] = {
3187 	&dev_attr_slave.attr,
3188 	NULL,
3189 };
3190 
3191 static const struct attribute_group spi_target_group = {
3192 	.attrs = spi_target_attrs,
3193 };
3194 
3195 static const struct attribute_group *spi_target_groups[] = {
3196 	&spi_controller_statistics_group,
3197 	&spi_target_group,
3198 	NULL,
3199 };
3200 
3201 static const struct class spi_target_class = {
3202 	.name		= "spi_slave",
3203 	.dev_release	= spi_controller_release,
3204 	.dev_groups	= spi_target_groups,
3205 };
3206 #else
3207 extern struct class spi_target_class;	/* dummy */
3208 #endif
3209 
3210 /**
3211  * __spi_alloc_controller - allocate an SPI host or target controller
3212  * @dev: the controller, possibly using the platform_bus
3213  * @size: how much zeroed driver-private data to allocate; the pointer to this
3214  *	memory is in the driver_data field of the returned device, accessible
3215  *	with spi_controller_get_devdata(); the memory is cacheline aligned;
3216  *	drivers granting DMA access to portions of their private data need to
3217  *	round up @size using ALIGN(size, dma_get_cache_alignment()).
3218  * @target: flag indicating whether to allocate an SPI host (false) or SPI target (true)
3219  *	controller
3220  * Context: can sleep
3221  *
3222  * This call is used only by SPI controller drivers, which are the
3223  * only ones directly touching chip registers.  It's how they allocate
3224  * an spi_controller structure, prior to calling spi_register_controller().
3225  *
3226  * This must be called from context that can sleep.
3227  *
3228  * The caller is responsible for assigning the bus number and initializing the
3229  * controller's methods before calling spi_register_controller(); and (after
3230  * errors adding the device) calling spi_controller_put() to prevent a memory
3231  * leak.
3232  *
3233  * Return: the SPI controller structure on success, else NULL.
3234  */
__spi_alloc_controller(struct device * dev,unsigned int size,bool target)3235 struct spi_controller *__spi_alloc_controller(struct device *dev,
3236 					      unsigned int size, bool target)
3237 {
3238 	struct spi_controller	*ctlr;
3239 	size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
3240 
3241 	if (!dev)
3242 		return NULL;
3243 
3244 	ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
3245 	if (!ctlr)
3246 		return NULL;
3247 
3248 	ctlr->pcpu_statistics = spi_alloc_pcpu_stats();
3249 	if (!ctlr->pcpu_statistics) {
3250 		kfree(ctlr);
3251 		return NULL;
3252 	}
3253 
3254 	device_initialize(&ctlr->dev);
3255 	INIT_LIST_HEAD(&ctlr->queue);
3256 	spin_lock_init(&ctlr->queue_lock);
3257 	spin_lock_init(&ctlr->bus_lock_spinlock);
3258 	mutex_init(&ctlr->bus_lock_mutex);
3259 	mutex_init(&ctlr->io_mutex);
3260 	mutex_init(&ctlr->add_lock);
3261 	ctlr->bus_num = -1;
3262 	ctlr->num_chipselect = 1;
3263 	ctlr->num_data_lanes = 1;
3264 	ctlr->target = target;
3265 	if (IS_ENABLED(CONFIG_SPI_SLAVE) && target)
3266 		ctlr->dev.class = &spi_target_class;
3267 	else
3268 		ctlr->dev.class = &spi_controller_class;
3269 	ctlr->dev.parent = dev;
3270 
3271 	device_set_node(&ctlr->dev, dev_fwnode(dev));
3272 
3273 	pm_suspend_ignore_children(&ctlr->dev, true);
3274 	spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
3275 
3276 	return ctlr;
3277 }
3278 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
3279 
devm_spi_release_controller(void * ctlr)3280 static void devm_spi_release_controller(void *ctlr)
3281 {
3282 	spi_controller_put(ctlr);
3283 }
3284 
3285 /**
3286  * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
3287  * @dev: physical device of SPI controller
3288  * @size: how much zeroed driver-private data to allocate
3289  * @target: whether to allocate an SPI host (false) or SPI target (true) controller
3290  * Context: can sleep
3291  *
3292  * Allocate an SPI controller and automatically release a reference on it
3293  * when @dev is unbound from its driver.  Drivers are thus relieved from
3294  * having to call spi_controller_put().
3295  *
3296  * The arguments to this function are identical to __spi_alloc_controller().
3297  *
3298  * Return: the SPI controller structure on success, else NULL.
3299  */
__devm_spi_alloc_controller(struct device * dev,unsigned int size,bool target)3300 struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
3301 						   unsigned int size,
3302 						   bool target)
3303 {
3304 	struct spi_controller *ctlr;
3305 	int ret;
3306 
3307 	ctlr = __spi_alloc_controller(dev, size, target);
3308 	if (!ctlr)
3309 		return NULL;
3310 
3311 	ret = devm_add_action_or_reset(dev, devm_spi_release_controller, ctlr);
3312 	if (ret)
3313 		return NULL;
3314 
3315 	ctlr->devm_allocated = true;
3316 
3317 	return ctlr;
3318 }
3319 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
3320 
3321 /**
3322  * spi_get_gpio_descs() - grab chip select GPIOs for the controller
3323  * @ctlr: The SPI controller to grab GPIO descriptors for
3324  */
spi_get_gpio_descs(struct spi_controller * ctlr)3325 static int spi_get_gpio_descs(struct spi_controller *ctlr)
3326 {
3327 	int nb, i;
3328 	struct gpio_desc **cs;
3329 	struct device *dev = &ctlr->dev;
3330 	unsigned long native_cs_mask = 0;
3331 	unsigned int num_cs_gpios = 0;
3332 
3333 	nb = gpiod_count(dev, "cs");
3334 	if (nb < 0) {
3335 		/* No GPIOs at all is fine, else return the error */
3336 		if (nb == -ENOENT)
3337 			return 0;
3338 		return nb;
3339 	}
3340 
3341 	ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
3342 
3343 	cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
3344 			  GFP_KERNEL);
3345 	if (!cs)
3346 		return -ENOMEM;
3347 	ctlr->cs_gpiods = cs;
3348 
3349 	for (i = 0; i < nb; i++) {
3350 		/*
3351 		 * Most chipselects are active low, the inverted
3352 		 * semantics are handled by special quirks in gpiolib,
3353 		 * so initializing them GPIOD_OUT_LOW here means
3354 		 * "unasserted", in most cases this will drive the physical
3355 		 * line high.
3356 		 */
3357 		cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
3358 						      GPIOD_OUT_LOW);
3359 		if (IS_ERR(cs[i]))
3360 			return PTR_ERR(cs[i]);
3361 
3362 		if (cs[i]) {
3363 			/*
3364 			 * If we find a CS GPIO, name it after the device and
3365 			 * chip select line.
3366 			 */
3367 			char *gpioname;
3368 
3369 			gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
3370 						  dev_name(dev), i);
3371 			if (!gpioname)
3372 				return -ENOMEM;
3373 			gpiod_set_consumer_name(cs[i], gpioname);
3374 			num_cs_gpios++;
3375 			continue;
3376 		}
3377 
3378 		if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3379 			dev_err(dev, "Invalid native chip select %d\n", i);
3380 			return -EINVAL;
3381 		}
3382 		native_cs_mask |= BIT(i);
3383 	}
3384 
3385 	ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3386 
3387 	if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios &&
3388 	    ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3389 		dev_err(dev, "No unused native chip select available\n");
3390 		return -EINVAL;
3391 	}
3392 
3393 	return 0;
3394 }
3395 
spi_controller_check_ops(struct spi_controller * ctlr)3396 static int spi_controller_check_ops(struct spi_controller *ctlr)
3397 {
3398 	/*
3399 	 * The controller may implement only the high-level SPI-memory like
3400 	 * operations if it does not support regular SPI transfers, and this is
3401 	 * valid use case.
3402 	 * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least
3403 	 * one of the ->transfer_xxx() method be implemented.
3404 	 */
3405 	if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
3406 		if (!ctlr->transfer && !ctlr->transfer_one &&
3407 		   !ctlr->transfer_one_message) {
3408 			return -EINVAL;
3409 		}
3410 	}
3411 
3412 	return 0;
3413 }
3414 
3415 /* Allocate dynamic bus number using Linux idr */
spi_controller_id_alloc(struct spi_controller * ctlr,int start,int end)3416 static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end)
3417 {
3418 	int id;
3419 
3420 	mutex_lock(&board_lock);
3421 	id = idr_alloc(&spi_controller_idr, ctlr, start, end, GFP_KERNEL);
3422 	mutex_unlock(&board_lock);
3423 	if (WARN(id < 0, "couldn't get idr"))
3424 		return id == -ENOSPC ? -EBUSY : id;
3425 	ctlr->bus_num = id;
3426 	return 0;
3427 }
3428 
3429 /**
3430  * spi_register_controller - register SPI host or target controller
3431  * @ctlr: initialized controller, originally from spi_alloc_host() or
3432  *	spi_alloc_target()
3433  * Context: can sleep
3434  *
3435  * SPI controllers connect to their drivers using some non-SPI bus,
3436  * such as the platform bus.  The final stage of probe() in that code
3437  * includes calling spi_register_controller() to hook up to this SPI bus glue.
3438  *
3439  * SPI controllers use board specific (often SOC specific) bus numbers,
3440  * and board-specific addressing for SPI devices combines those numbers
3441  * with chip select numbers.  Since SPI does not directly support dynamic
3442  * device identification, boards need configuration tables telling which
3443  * chip is at which address.
3444  *
3445  * This must be called from context that can sleep.
3446  *
3447  * After a successful return, the caller is responsible for calling
3448  * spi_unregister_controller().
3449  *
3450  * Return: zero on success, else a negative error code.
3451  */
spi_register_controller(struct spi_controller * ctlr)3452 int spi_register_controller(struct spi_controller *ctlr)
3453 {
3454 	struct device		*dev = ctlr->dev.parent;
3455 	struct boardinfo	*bi;
3456 	int			first_dynamic;
3457 	int			status;
3458 	int			idx;
3459 
3460 	if (!dev)
3461 		return -ENODEV;
3462 
3463 	/*
3464 	 * Make sure all necessary hooks are implemented before registering
3465 	 * the SPI controller.
3466 	 */
3467 	status = spi_controller_check_ops(ctlr);
3468 	if (status)
3469 		return status;
3470 
3471 	if (ctlr->bus_num < 0)
3472 		ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
3473 	if (ctlr->bus_num >= 0) {
3474 		/* Devices with a fixed bus num must check-in with the num */
3475 		status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1);
3476 		if (status)
3477 			return status;
3478 	}
3479 	if (ctlr->bus_num < 0) {
3480 		first_dynamic = of_alias_get_highest_id("spi");
3481 		if (first_dynamic < 0)
3482 			first_dynamic = 0;
3483 		else
3484 			first_dynamic++;
3485 
3486 		status = spi_controller_id_alloc(ctlr, first_dynamic, 0);
3487 		if (status)
3488 			return status;
3489 	}
3490 	ctlr->bus_lock_flag = 0;
3491 	init_completion(&ctlr->xfer_completion);
3492 	init_completion(&ctlr->cur_msg_completion);
3493 	if (!ctlr->max_dma_len)
3494 		ctlr->max_dma_len = INT_MAX;
3495 
3496 	/*
3497 	 * Register the device, then userspace will see it.
3498 	 * Registration fails if the bus ID is in use.
3499 	 */
3500 	dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3501 
3502 	if (!spi_controller_is_target(ctlr) && ctlr->use_gpio_descriptors) {
3503 		status = spi_get_gpio_descs(ctlr);
3504 		if (status)
3505 			goto free_bus_id;
3506 		/*
3507 		 * A controller using GPIO descriptors always
3508 		 * supports SPI_CS_HIGH if need be.
3509 		 */
3510 		ctlr->mode_bits |= SPI_CS_HIGH;
3511 	}
3512 
3513 	/*
3514 	 * Even if it's just one always-selected device, there must
3515 	 * be at least one chipselect.
3516 	 */
3517 	if (!ctlr->num_chipselect) {
3518 		status = -EINVAL;
3519 		goto free_bus_id;
3520 	}
3521 
3522 	/* Setting last_cs to SPI_INVALID_CS means no chip selected */
3523 	for (idx = 0; idx < SPI_DEVICE_CS_CNT_MAX; idx++)
3524 		ctlr->last_cs[idx] = SPI_INVALID_CS;
3525 
3526 	status = device_add(&ctlr->dev);
3527 	if (status < 0)
3528 		goto free_bus_id;
3529 	dev_dbg(dev, "registered %s %s\n",
3530 			spi_controller_is_target(ctlr) ? "target" : "host",
3531 			dev_name(&ctlr->dev));
3532 
3533 	/*
3534 	 * If we're using a queued driver, start the queue. Note that we don't
3535 	 * need the queueing logic if the driver is only supporting high-level
3536 	 * memory operations.
3537 	 */
3538 	if (ctlr->transfer) {
3539 		dev_info(dev, "controller is unqueued, this is deprecated\n");
3540 	} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3541 		status = spi_controller_initialize_queue(ctlr);
3542 		if (status)
3543 			goto del_ctrl;
3544 	}
3545 
3546 	mutex_lock(&board_lock);
3547 	list_add_tail(&ctlr->list, &spi_controller_list);
3548 	list_for_each_entry(bi, &board_list, list)
3549 		spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3550 	mutex_unlock(&board_lock);
3551 
3552 	/* Register devices from the device tree and ACPI */
3553 	of_register_spi_devices(ctlr);
3554 	acpi_register_spi_devices(ctlr);
3555 	return status;
3556 
3557 del_ctrl:
3558 	device_del(&ctlr->dev);
3559 free_bus_id:
3560 	mutex_lock(&board_lock);
3561 	idr_remove(&spi_controller_idr, ctlr->bus_num);
3562 	mutex_unlock(&board_lock);
3563 	return status;
3564 }
3565 EXPORT_SYMBOL_GPL(spi_register_controller);
3566 
devm_spi_unregister_controller(void * ctlr)3567 static void devm_spi_unregister_controller(void *ctlr)
3568 {
3569 	spi_unregister_controller(ctlr);
3570 }
3571 
3572 /**
3573  * devm_spi_register_controller - register managed SPI host or target controller
3574  * @dev:    device managing SPI controller
3575  * @ctlr: initialized controller, originally from spi_alloc_host() or
3576  *	spi_alloc_target()
3577  * Context: can sleep
3578  *
3579  * Register a SPI device as with spi_register_controller() which will
3580  * automatically be unregistered (and freed unless it has been allocated using
3581  * devm_spi_alloc_host/target()).
3582  *
3583  * Return: zero on success, else a negative error code.
3584  */
devm_spi_register_controller(struct device * dev,struct spi_controller * ctlr)3585 int devm_spi_register_controller(struct device *dev,
3586 				 struct spi_controller *ctlr)
3587 {
3588 	int ret;
3589 
3590 	ret = spi_register_controller(ctlr);
3591 	if (ret)
3592 		return ret;
3593 
3594 	/*
3595 	 * Prevent controller from being freed by spi_unregister_controller()
3596 	 * if devm_add_action_or_reset() fails for a non-devres allocated
3597 	 * controller.
3598 	 */
3599 	spi_controller_get(ctlr);
3600 
3601 	ret = devm_add_action_or_reset(dev, devm_spi_unregister_controller, ctlr);
3602 
3603 	if (ret == 0 || ctlr->devm_allocated)
3604 		spi_controller_put(ctlr);
3605 
3606 	return ret;
3607 }
3608 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3609 
__unregister(struct device * dev,void * null)3610 static int __unregister(struct device *dev, void *null)
3611 {
3612 	spi_unregister_device(to_spi_device(dev));
3613 	return 0;
3614 }
3615 
3616 /**
3617  * spi_unregister_controller - unregister SPI host or target controller
3618  * @ctlr: the controller being unregistered
3619  * Context: can sleep
3620  *
3621  * This call is used only by SPI controller drivers, which are the
3622  * only ones directly touching chip registers.
3623  *
3624  * This must be called from context that can sleep.
3625  *
3626  * Note that this function also drops a reference to the controller unless it
3627  * has been allocated using devm_spi_alloc_host/target().
3628  */
spi_unregister_controller(struct spi_controller * ctlr)3629 void spi_unregister_controller(struct spi_controller *ctlr)
3630 {
3631 	struct spi_controller *found;
3632 	int id = ctlr->bus_num;
3633 
3634 	/* Prevent addition of new devices, unregister existing ones */
3635 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3636 		mutex_lock(&ctlr->add_lock);
3637 
3638 	device_for_each_child(&ctlr->dev, NULL, __unregister);
3639 
3640 	/* First make sure that this controller was ever added */
3641 	mutex_lock(&board_lock);
3642 	found = idr_find(&spi_controller_idr, id);
3643 	mutex_unlock(&board_lock);
3644 	if (ctlr->queued) {
3645 		if (spi_destroy_queue(ctlr))
3646 			dev_err(&ctlr->dev, "queue remove failed\n");
3647 	}
3648 	mutex_lock(&board_lock);
3649 	list_del(&ctlr->list);
3650 	mutex_unlock(&board_lock);
3651 
3652 	device_del(&ctlr->dev);
3653 
3654 	/* Free bus id */
3655 	mutex_lock(&board_lock);
3656 	if (found == ctlr)
3657 		idr_remove(&spi_controller_idr, id);
3658 	mutex_unlock(&board_lock);
3659 
3660 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3661 		mutex_unlock(&ctlr->add_lock);
3662 
3663 	/*
3664 	 * Release the last reference on the controller if its driver
3665 	 * has not yet been converted to devm_spi_alloc_host/target().
3666 	 */
3667 	if (!ctlr->devm_allocated)
3668 		put_device(&ctlr->dev);
3669 }
3670 EXPORT_SYMBOL_GPL(spi_unregister_controller);
3671 
__spi_check_suspended(const struct spi_controller * ctlr)3672 static inline int __spi_check_suspended(const struct spi_controller *ctlr)
3673 {
3674 	return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
3675 }
3676 
__spi_mark_suspended(struct spi_controller * ctlr)3677 static inline void __spi_mark_suspended(struct spi_controller *ctlr)
3678 {
3679 	mutex_lock(&ctlr->bus_lock_mutex);
3680 	ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
3681 	mutex_unlock(&ctlr->bus_lock_mutex);
3682 }
3683 
__spi_mark_resumed(struct spi_controller * ctlr)3684 static inline void __spi_mark_resumed(struct spi_controller *ctlr)
3685 {
3686 	mutex_lock(&ctlr->bus_lock_mutex);
3687 	ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
3688 	mutex_unlock(&ctlr->bus_lock_mutex);
3689 }
3690 
spi_controller_suspend(struct spi_controller * ctlr)3691 int spi_controller_suspend(struct spi_controller *ctlr)
3692 {
3693 	int ret = 0;
3694 
3695 	/* Basically no-ops for non-queued controllers */
3696 	if (ctlr->queued) {
3697 		ret = spi_stop_queue(ctlr);
3698 		if (ret)
3699 			dev_err(&ctlr->dev, "queue stop failed\n");
3700 	}
3701 
3702 	__spi_mark_suspended(ctlr);
3703 	return ret;
3704 }
3705 EXPORT_SYMBOL_GPL(spi_controller_suspend);
3706 
spi_controller_resume(struct spi_controller * ctlr)3707 int spi_controller_resume(struct spi_controller *ctlr)
3708 {
3709 	int ret = 0;
3710 
3711 	__spi_mark_resumed(ctlr);
3712 
3713 	if (ctlr->queued) {
3714 		ret = spi_start_queue(ctlr);
3715 		if (ret)
3716 			dev_err(&ctlr->dev, "queue restart failed\n");
3717 	}
3718 	return ret;
3719 }
3720 EXPORT_SYMBOL_GPL(spi_controller_resume);
3721 
3722 /*-------------------------------------------------------------------------*/
3723 
3724 /* Core methods for spi_message alterations */
3725 
__spi_replace_transfers_release(struct spi_controller * ctlr,struct spi_message * msg,void * res)3726 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3727 					    struct spi_message *msg,
3728 					    void *res)
3729 {
3730 	struct spi_replaced_transfers *rxfer = res;
3731 	size_t i;
3732 
3733 	/* Call extra callback if requested */
3734 	if (rxfer->release)
3735 		rxfer->release(ctlr, msg, res);
3736 
3737 	/* Insert replaced transfers back into the message */
3738 	list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3739 
3740 	/* Remove the formerly inserted entries */
3741 	for (i = 0; i < rxfer->inserted; i++)
3742 		list_del(&rxfer->inserted_transfers[i].transfer_list);
3743 }
3744 
3745 /**
3746  * spi_replace_transfers - replace transfers with several transfers
3747  *                         and register change with spi_message.resources
3748  * @msg:           the spi_message we work upon
3749  * @xfer_first:    the first spi_transfer we want to replace
3750  * @remove:        number of transfers to remove
3751  * @insert:        the number of transfers we want to insert instead
3752  * @release:       extra release code necessary in some circumstances
3753  * @extradatasize: extra data to allocate (with alignment guarantees
3754  *                 of struct @spi_transfer)
3755  * @gfp:           gfp flags
3756  *
3757  * Returns: pointer to @spi_replaced_transfers,
3758  *          PTR_ERR(...) in case of errors.
3759  */
spi_replace_transfers(struct spi_message * msg,struct spi_transfer * xfer_first,size_t remove,size_t insert,spi_replaced_release_t release,size_t extradatasize,gfp_t gfp)3760 static struct spi_replaced_transfers *spi_replace_transfers(
3761 	struct spi_message *msg,
3762 	struct spi_transfer *xfer_first,
3763 	size_t remove,
3764 	size_t insert,
3765 	spi_replaced_release_t release,
3766 	size_t extradatasize,
3767 	gfp_t gfp)
3768 {
3769 	struct spi_replaced_transfers *rxfer;
3770 	struct spi_transfer *xfer;
3771 	size_t i;
3772 
3773 	/* Allocate the structure using spi_res */
3774 	rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3775 			      struct_size(rxfer, inserted_transfers, insert)
3776 			      + extradatasize,
3777 			      gfp);
3778 	if (!rxfer)
3779 		return ERR_PTR(-ENOMEM);
3780 
3781 	/* The release code to invoke before running the generic release */
3782 	rxfer->release = release;
3783 
3784 	/* Assign extradata */
3785 	if (extradatasize)
3786 		rxfer->extradata =
3787 			&rxfer->inserted_transfers[insert];
3788 
3789 	/* Init the replaced_transfers list */
3790 	INIT_LIST_HEAD(&rxfer->replaced_transfers);
3791 
3792 	/*
3793 	 * Assign the list_entry after which we should reinsert
3794 	 * the @replaced_transfers - it may be spi_message.messages!
3795 	 */
3796 	rxfer->replaced_after = xfer_first->transfer_list.prev;
3797 
3798 	/* Remove the requested number of transfers */
3799 	for (i = 0; i < remove; i++) {
3800 		/*
3801 		 * If the entry after replaced_after it is msg->transfers
3802 		 * then we have been requested to remove more transfers
3803 		 * than are in the list.
3804 		 */
3805 		if (rxfer->replaced_after->next == &msg->transfers) {
3806 			dev_err(&msg->spi->dev,
3807 				"requested to remove more spi_transfers than are available\n");
3808 			/* Insert replaced transfers back into the message */
3809 			list_splice(&rxfer->replaced_transfers,
3810 				    rxfer->replaced_after);
3811 
3812 			/* Free the spi_replace_transfer structure... */
3813 			spi_res_free(rxfer);
3814 
3815 			/* ...and return with an error */
3816 			return ERR_PTR(-EINVAL);
3817 		}
3818 
3819 		/*
3820 		 * Remove the entry after replaced_after from list of
3821 		 * transfers and add it to list of replaced_transfers.
3822 		 */
3823 		list_move_tail(rxfer->replaced_after->next,
3824 			       &rxfer->replaced_transfers);
3825 	}
3826 
3827 	/*
3828 	 * Create copy of the given xfer with identical settings
3829 	 * based on the first transfer to get removed.
3830 	 */
3831 	for (i = 0; i < insert; i++) {
3832 		/* We need to run in reverse order */
3833 		xfer = &rxfer->inserted_transfers[insert - 1 - i];
3834 
3835 		/* Copy all spi_transfer data */
3836 		memcpy(xfer, xfer_first, sizeof(*xfer));
3837 
3838 		/* Add to list */
3839 		list_add(&xfer->transfer_list, rxfer->replaced_after);
3840 
3841 		/* Clear cs_change and delay for all but the last */
3842 		if (i) {
3843 			xfer->cs_change = false;
3844 			xfer->delay.value = 0;
3845 		}
3846 	}
3847 
3848 	/* Set up inserted... */
3849 	rxfer->inserted = insert;
3850 
3851 	/* ...and register it with spi_res/spi_message */
3852 	spi_res_add(msg, rxfer);
3853 
3854 	return rxfer;
3855 }
3856 
__spi_split_transfer_maxsize(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer ** xferp,size_t maxsize)3857 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3858 					struct spi_message *msg,
3859 					struct spi_transfer **xferp,
3860 					size_t maxsize)
3861 {
3862 	struct spi_transfer *xfer = *xferp, *xfers;
3863 	struct spi_replaced_transfers *srt;
3864 	size_t offset;
3865 	size_t count, i;
3866 
3867 	/* Calculate how many we have to replace */
3868 	count = DIV_ROUND_UP(xfer->len, maxsize);
3869 
3870 	/* Create replacement */
3871 	srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, GFP_KERNEL);
3872 	if (IS_ERR(srt))
3873 		return PTR_ERR(srt);
3874 	xfers = srt->inserted_transfers;
3875 
3876 	/*
3877 	 * Now handle each of those newly inserted spi_transfers.
3878 	 * Note that the replacements spi_transfers all are preset
3879 	 * to the same values as *xferp, so tx_buf, rx_buf and len
3880 	 * are all identical (as well as most others)
3881 	 * so we just have to fix up len and the pointers.
3882 	 */
3883 
3884 	/*
3885 	 * The first transfer just needs the length modified, so we
3886 	 * run it outside the loop.
3887 	 */
3888 	xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3889 
3890 	/* All the others need rx_buf/tx_buf also set */
3891 	for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3892 		/* Update rx_buf, tx_buf and DMA */
3893 		if (xfers[i].rx_buf)
3894 			xfers[i].rx_buf += offset;
3895 		if (xfers[i].tx_buf)
3896 			xfers[i].tx_buf += offset;
3897 
3898 		/* Update length */
3899 		xfers[i].len = min(maxsize, xfers[i].len - offset);
3900 	}
3901 
3902 	/*
3903 	 * We set up xferp to the last entry we have inserted,
3904 	 * so that we skip those already split transfers.
3905 	 */
3906 	*xferp = &xfers[count - 1];
3907 
3908 	/* Increment statistics counters */
3909 	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3910 				       transfers_split_maxsize);
3911 	SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3912 				       transfers_split_maxsize);
3913 
3914 	return 0;
3915 }
3916 
3917 /**
3918  * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3919  *                               when an individual transfer exceeds a
3920  *                               certain size
3921  * @ctlr:    the @spi_controller for this transfer
3922  * @msg:   the @spi_message to transform
3923  * @maxsize:  the maximum when to apply this
3924  *
3925  * This function allocates resources that are automatically freed during the
3926  * spi message unoptimize phase so this function should only be called from
3927  * optimize_message callbacks.
3928  *
3929  * Return: status of transformation
3930  */
spi_split_transfers_maxsize(struct spi_controller * ctlr,struct spi_message * msg,size_t maxsize)3931 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3932 				struct spi_message *msg,
3933 				size_t maxsize)
3934 {
3935 	struct spi_transfer *xfer;
3936 	int ret;
3937 
3938 	/*
3939 	 * Iterate over the transfer_list,
3940 	 * but note that xfer is advanced to the last transfer inserted
3941 	 * to avoid checking sizes again unnecessarily (also xfer does
3942 	 * potentially belong to a different list by the time the
3943 	 * replacement has happened).
3944 	 */
3945 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3946 		if (xfer->len > maxsize) {
3947 			ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3948 							   maxsize);
3949 			if (ret)
3950 				return ret;
3951 		}
3952 	}
3953 
3954 	return 0;
3955 }
3956 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3957 
3958 
3959 /**
3960  * spi_split_transfers_maxwords - split SPI transfers into multiple transfers
3961  *                                when an individual transfer exceeds a
3962  *                                certain number of SPI words
3963  * @ctlr:     the @spi_controller for this transfer
3964  * @msg:      the @spi_message to transform
3965  * @maxwords: the number of words to limit each transfer to
3966  *
3967  * This function allocates resources that are automatically freed during the
3968  * spi message unoptimize phase so this function should only be called from
3969  * optimize_message callbacks.
3970  *
3971  * Return: status of transformation
3972  */
spi_split_transfers_maxwords(struct spi_controller * ctlr,struct spi_message * msg,size_t maxwords)3973 int spi_split_transfers_maxwords(struct spi_controller *ctlr,
3974 				 struct spi_message *msg,
3975 				 size_t maxwords)
3976 {
3977 	struct spi_transfer *xfer;
3978 
3979 	/*
3980 	 * Iterate over the transfer_list,
3981 	 * but note that xfer is advanced to the last transfer inserted
3982 	 * to avoid checking sizes again unnecessarily (also xfer does
3983 	 * potentially belong to a different list by the time the
3984 	 * replacement has happened).
3985 	 */
3986 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3987 		size_t maxsize;
3988 		int ret;
3989 
3990 		maxsize = maxwords * spi_bpw_to_bytes(xfer->bits_per_word);
3991 		if (xfer->len > maxsize) {
3992 			ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3993 							   maxsize);
3994 			if (ret)
3995 				return ret;
3996 		}
3997 	}
3998 
3999 	return 0;
4000 }
4001 EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords);
4002 
4003 /*-------------------------------------------------------------------------*/
4004 
4005 /*
4006  * Core methods for SPI controller protocol drivers. Some of the
4007  * other core methods are currently defined as inline functions.
4008  */
4009 
__spi_validate_bits_per_word(struct spi_controller * ctlr,u8 bits_per_word)4010 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
4011 					u8 bits_per_word)
4012 {
4013 	if (ctlr->bits_per_word_mask) {
4014 		/* Only 32 bits fit in the mask */
4015 		if (bits_per_word > 32)
4016 			return -EINVAL;
4017 		if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
4018 			return -EINVAL;
4019 	}
4020 
4021 	return 0;
4022 }
4023 
4024 /**
4025  * spi_set_cs_timing - configure CS setup, hold, and inactive delays
4026  * @spi: the device that requires specific CS timing configuration
4027  *
4028  * Return: zero on success, else a negative error code.
4029  */
spi_set_cs_timing(struct spi_device * spi)4030 static int spi_set_cs_timing(struct spi_device *spi)
4031 {
4032 	struct device *parent = spi->controller->dev.parent;
4033 	int status = 0;
4034 
4035 	if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
4036 		if (spi->controller->auto_runtime_pm) {
4037 			status = pm_runtime_get_sync(parent);
4038 			if (status < 0) {
4039 				pm_runtime_put_noidle(parent);
4040 				dev_err(&spi->controller->dev, "Failed to power device: %d\n",
4041 					status);
4042 				return status;
4043 			}
4044 
4045 			status = spi->controller->set_cs_timing(spi);
4046 			pm_runtime_put_autosuspend(parent);
4047 		} else {
4048 			status = spi->controller->set_cs_timing(spi);
4049 		}
4050 	}
4051 	return status;
4052 }
4053 
__spi_setup(struct spi_device * spi,bool initial_setup)4054 static int __spi_setup(struct spi_device *spi, bool initial_setup)
4055 {
4056 	unsigned	bad_bits, ugly_bits;
4057 	int		status;
4058 
4059 	/*
4060 	 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
4061 	 * are set at the same time.
4062 	 */
4063 	if ((hweight_long(spi->mode &
4064 		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
4065 	    (hweight_long(spi->mode &
4066 		(SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
4067 		dev_err(&spi->dev,
4068 		"setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
4069 		return -EINVAL;
4070 	}
4071 	/* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
4072 	if ((spi->mode & SPI_3WIRE) && (spi->mode &
4073 		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
4074 		 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
4075 		return -EINVAL;
4076 	/* Check against conflicting MOSI idle configuration */
4077 	if ((spi->mode & SPI_MOSI_IDLE_LOW) && (spi->mode & SPI_MOSI_IDLE_HIGH)) {
4078 		dev_err(&spi->dev,
4079 			"setup: MOSI configured to idle low and high at the same time.\n");
4080 		return -EINVAL;
4081 	}
4082 	/*
4083 	 * Help drivers fail *cleanly* when they need options
4084 	 * that aren't supported with their current controller.
4085 	 * SPI_CS_WORD has a fallback software implementation,
4086 	 * so it is ignored here.
4087 	 */
4088 	bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
4089 				 SPI_NO_TX | SPI_NO_RX);
4090 	ugly_bits = bad_bits &
4091 		    (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
4092 		     SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
4093 	if (ugly_bits) {
4094 		dev_warn(&spi->dev,
4095 			 "setup: ignoring unsupported mode bits %x\n",
4096 			 ugly_bits);
4097 		spi->mode &= ~ugly_bits;
4098 		bad_bits &= ~ugly_bits;
4099 	}
4100 	if (bad_bits) {
4101 		dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
4102 			bad_bits);
4103 		return -EINVAL;
4104 	}
4105 
4106 	if (!spi->bits_per_word) {
4107 		spi->bits_per_word = 8;
4108 	} else {
4109 		/*
4110 		 * Some controllers may not support the default 8 bits-per-word
4111 		 * so only perform the check when this is explicitly provided.
4112 		 */
4113 		status = __spi_validate_bits_per_word(spi->controller,
4114 						      spi->bits_per_word);
4115 		if (status)
4116 			return status;
4117 	}
4118 
4119 	if (spi->controller->max_speed_hz &&
4120 	    (!spi->max_speed_hz ||
4121 	     spi->max_speed_hz > spi->controller->max_speed_hz))
4122 		spi->max_speed_hz = spi->controller->max_speed_hz;
4123 
4124 	mutex_lock(&spi->controller->io_mutex);
4125 
4126 	if (spi->controller->setup) {
4127 		status = spi->controller->setup(spi);
4128 		if (status) {
4129 			mutex_unlock(&spi->controller->io_mutex);
4130 			dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
4131 				status);
4132 			return status;
4133 		}
4134 	}
4135 
4136 	status = spi_set_cs_timing(spi);
4137 	if (status) {
4138 		mutex_unlock(&spi->controller->io_mutex);
4139 		goto err_cleanup;
4140 	}
4141 
4142 	if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
4143 		status = pm_runtime_resume_and_get(spi->controller->dev.parent);
4144 		if (status < 0) {
4145 			mutex_unlock(&spi->controller->io_mutex);
4146 			dev_err(&spi->controller->dev, "Failed to power device: %d\n",
4147 				status);
4148 			goto err_cleanup;
4149 		}
4150 
4151 		/*
4152 		 * We do not want to return positive value from pm_runtime_get,
4153 		 * there are many instances of devices calling spi_setup() and
4154 		 * checking for a non-zero return value instead of a negative
4155 		 * return value.
4156 		 */
4157 		status = 0;
4158 
4159 		spi_set_cs(spi, false, true);
4160 		pm_runtime_put_autosuspend(spi->controller->dev.parent);
4161 	} else {
4162 		spi_set_cs(spi, false, true);
4163 	}
4164 
4165 	mutex_unlock(&spi->controller->io_mutex);
4166 
4167 	if (spi->rt && !spi->controller->rt) {
4168 		spi->controller->rt = true;
4169 		spi_set_thread_rt(spi->controller);
4170 	}
4171 
4172 	trace_spi_setup(spi, status);
4173 
4174 	dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
4175 			spi->mode & SPI_MODE_X_MASK,
4176 			(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
4177 			(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
4178 			(spi->mode & SPI_3WIRE) ? "3wire, " : "",
4179 			(spi->mode & SPI_LOOP) ? "loopback, " : "",
4180 			spi->bits_per_word, spi->max_speed_hz,
4181 			status);
4182 
4183 	return status;
4184 
4185 err_cleanup:
4186 	if (initial_setup)
4187 		spi_cleanup(spi);
4188 
4189 	return status;
4190 }
4191 
4192 /**
4193  * spi_setup - setup SPI mode and clock rate
4194  * @spi: the device whose settings are being modified
4195  * Context: can sleep, and no requests are queued to the device
4196  *
4197  * SPI protocol drivers may need to update the transfer mode if the
4198  * device doesn't work with its default.  They may likewise need
4199  * to update clock rates or word sizes from initial values.  This function
4200  * changes those settings, and must be called from a context that can sleep.
4201  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
4202  * effect the next time the device is selected and data is transferred to
4203  * or from it.  When this function returns, the SPI device is deselected.
4204  *
4205  * Note that this call will fail if the protocol driver specifies an option
4206  * that the underlying controller or its driver does not support.  For
4207  * example, not all hardware supports wire transfers using nine bit words,
4208  * LSB-first wire encoding, or active-high chipselects.
4209  *
4210  * Return: zero on success, else a negative error code.
4211  */
spi_setup(struct spi_device * spi)4212 int spi_setup(struct spi_device *spi)
4213 {
4214 	return __spi_setup(spi, false);
4215 }
4216 EXPORT_SYMBOL_GPL(spi_setup);
4217 
_spi_xfer_word_delay_update(struct spi_transfer * xfer,struct spi_device * spi)4218 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
4219 				       struct spi_device *spi)
4220 {
4221 	int delay1, delay2;
4222 
4223 	delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
4224 	if (delay1 < 0)
4225 		return delay1;
4226 
4227 	delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
4228 	if (delay2 < 0)
4229 		return delay2;
4230 
4231 	if (delay1 < delay2)
4232 		memcpy(&xfer->word_delay, &spi->word_delay,
4233 		       sizeof(xfer->word_delay));
4234 
4235 	return 0;
4236 }
4237 
__spi_validate(struct spi_device * spi,struct spi_message * message)4238 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
4239 {
4240 	struct spi_controller *ctlr = spi->controller;
4241 	struct spi_transfer *xfer;
4242 	int w_size;
4243 
4244 	if (list_empty(&message->transfers))
4245 		return -EINVAL;
4246 
4247 	message->spi = spi;
4248 
4249 	/*
4250 	 * Half-duplex links include original MicroWire, and ones with
4251 	 * only one data pin like SPI_3WIRE (switches direction) or where
4252 	 * either MOSI or MISO is missing.  They can also be caused by
4253 	 * software limitations.
4254 	 */
4255 	if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
4256 	    (spi->mode & SPI_3WIRE)) {
4257 		unsigned flags = ctlr->flags;
4258 
4259 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
4260 			if (xfer->rx_buf && xfer->tx_buf)
4261 				return -EINVAL;
4262 			if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
4263 				return -EINVAL;
4264 			if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
4265 				return -EINVAL;
4266 		}
4267 	}
4268 
4269 	/*
4270 	 * Set transfer bits_per_word and max speed as spi device default if
4271 	 * it is not set for this transfer.
4272 	 * Set transfer tx_nbits and rx_nbits as single transfer default
4273 	 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
4274 	 * Ensure transfer word_delay is at least as long as that required by
4275 	 * device itself.
4276 	 */
4277 	message->frame_length = 0;
4278 	list_for_each_entry(xfer, &message->transfers, transfer_list) {
4279 		xfer->effective_speed_hz = 0;
4280 		message->frame_length += xfer->len;
4281 		if (!xfer->bits_per_word)
4282 			xfer->bits_per_word = spi->bits_per_word;
4283 
4284 		if (!xfer->speed_hz)
4285 			xfer->speed_hz = spi->max_speed_hz;
4286 
4287 		if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
4288 			xfer->speed_hz = ctlr->max_speed_hz;
4289 
4290 		if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
4291 			return -EINVAL;
4292 
4293 		/* DDR mode is supported only if controller has dtr_caps=true.
4294 		 * default considered as SDR mode for SPI and QSPI controller.
4295 		 * Note: This is applicable only to QSPI controller.
4296 		 */
4297 		if (xfer->dtr_mode && !ctlr->dtr_caps)
4298 			return -EINVAL;
4299 
4300 		/*
4301 		 * SPI transfer length should be multiple of SPI word size
4302 		 * where SPI word size should be power-of-two multiple.
4303 		 */
4304 		w_size = spi_bpw_to_bytes(xfer->bits_per_word);
4305 
4306 		/* No partial transfers accepted */
4307 		if (xfer->len % w_size)
4308 			return -EINVAL;
4309 
4310 		if (xfer->speed_hz && ctlr->min_speed_hz &&
4311 		    xfer->speed_hz < ctlr->min_speed_hz)
4312 			return -EINVAL;
4313 
4314 		if (xfer->tx_buf && !xfer->tx_nbits)
4315 			xfer->tx_nbits = SPI_NBITS_SINGLE;
4316 		if (xfer->rx_buf && !xfer->rx_nbits)
4317 			xfer->rx_nbits = SPI_NBITS_SINGLE;
4318 		/*
4319 		 * Check transfer tx/rx_nbits:
4320 		 * 1. check the value matches one of single, dual and quad
4321 		 * 2. check tx/rx_nbits match the mode in spi_device
4322 		 */
4323 		if (xfer->tx_buf) {
4324 			if (spi->mode & SPI_NO_TX)
4325 				return -EINVAL;
4326 			if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
4327 				xfer->tx_nbits != SPI_NBITS_DUAL &&
4328 				xfer->tx_nbits != SPI_NBITS_QUAD &&
4329 				xfer->tx_nbits != SPI_NBITS_OCTAL)
4330 				return -EINVAL;
4331 			if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
4332 				!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL)))
4333 				return -EINVAL;
4334 			if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
4335 				!(spi->mode & (SPI_TX_QUAD | SPI_TX_OCTAL)))
4336 				return -EINVAL;
4337 			if ((xfer->tx_nbits == SPI_NBITS_OCTAL) &&
4338 				!(spi->mode & SPI_TX_OCTAL))
4339 				return -EINVAL;
4340 		}
4341 		/* Check transfer rx_nbits */
4342 		if (xfer->rx_buf) {
4343 			if (spi->mode & SPI_NO_RX)
4344 				return -EINVAL;
4345 			if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
4346 				xfer->rx_nbits != SPI_NBITS_DUAL &&
4347 				xfer->rx_nbits != SPI_NBITS_QUAD &&
4348 				xfer->rx_nbits != SPI_NBITS_OCTAL)
4349 				return -EINVAL;
4350 			if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
4351 				!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
4352 				return -EINVAL;
4353 			if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
4354 				!(spi->mode & (SPI_RX_QUAD | SPI_RX_OCTAL)))
4355 				return -EINVAL;
4356 			if ((xfer->rx_nbits == SPI_NBITS_OCTAL) &&
4357 				!(spi->mode & SPI_RX_OCTAL))
4358 				return -EINVAL;
4359 		}
4360 
4361 		if (_spi_xfer_word_delay_update(xfer, spi))
4362 			return -EINVAL;
4363 
4364 		/* Make sure controller supports required offload features. */
4365 		if (xfer->offload_flags) {
4366 			if (!message->offload)
4367 				return -EINVAL;
4368 
4369 			if (xfer->offload_flags & ~message->offload->xfer_flags)
4370 				return -EINVAL;
4371 		}
4372 	}
4373 
4374 	message->status = -EINPROGRESS;
4375 
4376 	return 0;
4377 }
4378 
4379 /*
4380  * spi_split_transfers - generic handling of transfer splitting
4381  * @msg: the message to split
4382  *
4383  * Under certain conditions, a SPI controller may not support arbitrary
4384  * transfer sizes or other features required by a peripheral. This function
4385  * will split the transfers in the message into smaller transfers that are
4386  * supported by the controller.
4387  *
4388  * Controllers with special requirements not covered here can also split
4389  * transfers in the optimize_message() callback.
4390  *
4391  * Context: can sleep
4392  * Return: zero on success, else a negative error code
4393  */
spi_split_transfers(struct spi_message * msg)4394 static int spi_split_transfers(struct spi_message *msg)
4395 {
4396 	struct spi_controller *ctlr = msg->spi->controller;
4397 	struct spi_transfer *xfer;
4398 	int ret;
4399 
4400 	/*
4401 	 * If an SPI controller does not support toggling the CS line on each
4402 	 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
4403 	 * for the CS line, we can emulate the CS-per-word hardware function by
4404 	 * splitting transfers into one-word transfers and ensuring that
4405 	 * cs_change is set for each transfer.
4406 	 */
4407 	if ((msg->spi->mode & SPI_CS_WORD) &&
4408 	    (!(ctlr->mode_bits & SPI_CS_WORD) || spi_is_csgpiod(msg->spi))) {
4409 		ret = spi_split_transfers_maxwords(ctlr, msg, 1);
4410 		if (ret)
4411 			return ret;
4412 
4413 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
4414 			/* Don't change cs_change on the last entry in the list */
4415 			if (list_is_last(&xfer->transfer_list, &msg->transfers))
4416 				break;
4417 
4418 			xfer->cs_change = 1;
4419 		}
4420 	} else {
4421 		ret = spi_split_transfers_maxsize(ctlr, msg,
4422 						  spi_max_transfer_size(msg->spi));
4423 		if (ret)
4424 			return ret;
4425 	}
4426 
4427 	return 0;
4428 }
4429 
4430 /*
4431  * __spi_optimize_message - shared implementation for spi_optimize_message()
4432  *                          and spi_maybe_optimize_message()
4433  * @spi: the device that will be used for the message
4434  * @msg: the message to optimize
4435  *
4436  * Peripheral drivers will call spi_optimize_message() and the spi core will
4437  * call spi_maybe_optimize_message() instead of calling this directly.
4438  *
4439  * It is not valid to call this on a message that has already been optimized.
4440  *
4441  * Return: zero on success, else a negative error code
4442  */
__spi_optimize_message(struct spi_device * spi,struct spi_message * msg)4443 static int __spi_optimize_message(struct spi_device *spi,
4444 				  struct spi_message *msg)
4445 {
4446 	struct spi_controller *ctlr = spi->controller;
4447 	int ret;
4448 
4449 	ret = __spi_validate(spi, msg);
4450 	if (ret)
4451 		return ret;
4452 
4453 	ret = spi_split_transfers(msg);
4454 	if (ret)
4455 		return ret;
4456 
4457 	if (ctlr->optimize_message) {
4458 		ret = ctlr->optimize_message(msg);
4459 		if (ret) {
4460 			spi_res_release(ctlr, msg);
4461 			return ret;
4462 		}
4463 	}
4464 
4465 	msg->optimized = true;
4466 
4467 	return 0;
4468 }
4469 
4470 /*
4471  * spi_maybe_optimize_message - optimize message if it isn't already pre-optimized
4472  * @spi: the device that will be used for the message
4473  * @msg: the message to optimize
4474  * Return: zero on success, else a negative error code
4475  */
spi_maybe_optimize_message(struct spi_device * spi,struct spi_message * msg)4476 static int spi_maybe_optimize_message(struct spi_device *spi,
4477 				      struct spi_message *msg)
4478 {
4479 	if (spi->controller->defer_optimize_message) {
4480 		msg->spi = spi;
4481 		return 0;
4482 	}
4483 
4484 	if (msg->pre_optimized)
4485 		return 0;
4486 
4487 	return __spi_optimize_message(spi, msg);
4488 }
4489 
4490 /**
4491  * spi_optimize_message - do any one-time validation and setup for a SPI message
4492  * @spi: the device that will be used for the message
4493  * @msg: the message to optimize
4494  *
4495  * Peripheral drivers that reuse the same message repeatedly may call this to
4496  * perform as much message prep as possible once, rather than repeating it each
4497  * time a message transfer is performed to improve throughput and reduce CPU
4498  * usage.
4499  *
4500  * Once a message has been optimized, it cannot be modified with the exception
4501  * of updating the contents of any xfer->tx_buf (the pointer can't be changed,
4502  * only the data in the memory it points to).
4503  *
4504  * Calls to this function must be balanced with calls to spi_unoptimize_message()
4505  * to avoid leaking resources.
4506  *
4507  * Context: can sleep
4508  * Return: zero on success, else a negative error code
4509  */
spi_optimize_message(struct spi_device * spi,struct spi_message * msg)4510 int spi_optimize_message(struct spi_device *spi, struct spi_message *msg)
4511 {
4512 	int ret;
4513 
4514 	/*
4515 	 * Pre-optimization is not supported and optimization is deferred e.g.
4516 	 * when using spi-mux.
4517 	 */
4518 	if (spi->controller->defer_optimize_message)
4519 		return 0;
4520 
4521 	ret = __spi_optimize_message(spi, msg);
4522 	if (ret)
4523 		return ret;
4524 
4525 	/*
4526 	 * This flag indicates that the peripheral driver called spi_optimize_message()
4527 	 * and therefore we shouldn't unoptimize message automatically when finalizing
4528 	 * the message but rather wait until spi_unoptimize_message() is called
4529 	 * by the peripheral driver.
4530 	 */
4531 	msg->pre_optimized = true;
4532 
4533 	return 0;
4534 }
4535 EXPORT_SYMBOL_GPL(spi_optimize_message);
4536 
4537 /**
4538  * spi_unoptimize_message - releases any resources allocated by spi_optimize_message()
4539  * @msg: the message to unoptimize
4540  *
4541  * Calls to this function must be balanced with calls to spi_optimize_message().
4542  *
4543  * Context: can sleep
4544  */
spi_unoptimize_message(struct spi_message * msg)4545 void spi_unoptimize_message(struct spi_message *msg)
4546 {
4547 	if (msg->spi->controller->defer_optimize_message)
4548 		return;
4549 
4550 	__spi_unoptimize_message(msg);
4551 	msg->pre_optimized = false;
4552 }
4553 EXPORT_SYMBOL_GPL(spi_unoptimize_message);
4554 
__spi_async(struct spi_device * spi,struct spi_message * message)4555 static int __spi_async(struct spi_device *spi, struct spi_message *message)
4556 {
4557 	struct spi_controller *ctlr = spi->controller;
4558 	struct spi_transfer *xfer;
4559 
4560 	/*
4561 	 * Some controllers do not support doing regular SPI transfers. Return
4562 	 * ENOTSUPP when this is the case.
4563 	 */
4564 	if (!ctlr->transfer)
4565 		return -ENOTSUPP;
4566 
4567 	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
4568 	SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
4569 
4570 	trace_spi_message_submit(message);
4571 
4572 	if (!ctlr->ptp_sts_supported) {
4573 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
4574 			xfer->ptp_sts_word_pre = 0;
4575 			ptp_read_system_prets(xfer->ptp_sts);
4576 		}
4577 	}
4578 
4579 	return ctlr->transfer(spi, message);
4580 }
4581 
devm_spi_unoptimize_message(void * msg)4582 static void devm_spi_unoptimize_message(void *msg)
4583 {
4584 	spi_unoptimize_message(msg);
4585 }
4586 
4587 /**
4588  * devm_spi_optimize_message - managed version of spi_optimize_message()
4589  * @dev: the device that manages @msg (usually @spi->dev)
4590  * @spi: the device that will be used for the message
4591  * @msg: the message to optimize
4592  * Return: zero on success, else a negative error code
4593  *
4594  * spi_unoptimize_message() will automatically be called when the device is
4595  * removed.
4596  */
devm_spi_optimize_message(struct device * dev,struct spi_device * spi,struct spi_message * msg)4597 int devm_spi_optimize_message(struct device *dev, struct spi_device *spi,
4598 			      struct spi_message *msg)
4599 {
4600 	int ret;
4601 
4602 	ret = spi_optimize_message(spi, msg);
4603 	if (ret)
4604 		return ret;
4605 
4606 	return devm_add_action_or_reset(dev, devm_spi_unoptimize_message, msg);
4607 }
4608 EXPORT_SYMBOL_GPL(devm_spi_optimize_message);
4609 
4610 /**
4611  * spi_async - asynchronous SPI transfer
4612  * @spi: device with which data will be exchanged
4613  * @message: describes the data transfers, including completion callback
4614  * Context: any (IRQs may be blocked, etc)
4615  *
4616  * This call may be used in_irq and other contexts which can't sleep,
4617  * as well as from task contexts which can sleep.
4618  *
4619  * The completion callback is invoked in a context which can't sleep.
4620  * Before that invocation, the value of message->status is undefined.
4621  * When the callback is issued, message->status holds either zero (to
4622  * indicate complete success) or a negative error code.  After that
4623  * callback returns, the driver which issued the transfer request may
4624  * deallocate the associated memory; it's no longer in use by any SPI
4625  * core or controller driver code.
4626  *
4627  * Note that although all messages to a spi_device are handled in
4628  * FIFO order, messages may go to different devices in other orders.
4629  * Some device might be higher priority, or have various "hard" access
4630  * time requirements, for example.
4631  *
4632  * On detection of any fault during the transfer, processing of
4633  * the entire message is aborted, and the device is deselected.
4634  * Until returning from the associated message completion callback,
4635  * no other spi_message queued to that device will be processed.
4636  * (This rule applies equally to all the synchronous transfer calls,
4637  * which are wrappers around this core asynchronous primitive.)
4638  *
4639  * Return: zero on success, else a negative error code.
4640  */
spi_async(struct spi_device * spi,struct spi_message * message)4641 int spi_async(struct spi_device *spi, struct spi_message *message)
4642 {
4643 	struct spi_controller *ctlr = spi->controller;
4644 	int ret;
4645 	unsigned long flags;
4646 
4647 	ret = spi_maybe_optimize_message(spi, message);
4648 	if (ret)
4649 		return ret;
4650 
4651 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4652 
4653 	if (ctlr->bus_lock_flag)
4654 		ret = -EBUSY;
4655 	else
4656 		ret = __spi_async(spi, message);
4657 
4658 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4659 
4660 	return ret;
4661 }
4662 EXPORT_SYMBOL_GPL(spi_async);
4663 
__spi_transfer_message_noqueue(struct spi_controller * ctlr,struct spi_message * msg)4664 static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
4665 {
4666 	bool was_busy;
4667 	int ret;
4668 
4669 	mutex_lock(&ctlr->io_mutex);
4670 
4671 	was_busy = ctlr->busy;
4672 
4673 	ctlr->cur_msg = msg;
4674 	ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
4675 	if (ret)
4676 		dev_err(&ctlr->dev, "noqueue transfer failed\n");
4677 	ctlr->cur_msg = NULL;
4678 	ctlr->fallback = false;
4679 
4680 	if (!was_busy) {
4681 		kfree(ctlr->dummy_rx);
4682 		ctlr->dummy_rx = NULL;
4683 		kfree(ctlr->dummy_tx);
4684 		ctlr->dummy_tx = NULL;
4685 		if (ctlr->unprepare_transfer_hardware &&
4686 		    ctlr->unprepare_transfer_hardware(ctlr))
4687 			dev_err(&ctlr->dev,
4688 				"failed to unprepare transfer hardware\n");
4689 		spi_idle_runtime_pm(ctlr);
4690 	}
4691 
4692 	mutex_unlock(&ctlr->io_mutex);
4693 }
4694 
4695 /*-------------------------------------------------------------------------*/
4696 
4697 /*
4698  * Utility methods for SPI protocol drivers, layered on
4699  * top of the core.  Some other utility methods are defined as
4700  * inline functions.
4701  */
4702 
spi_complete(void * arg)4703 static void spi_complete(void *arg)
4704 {
4705 	complete(arg);
4706 }
4707 
__spi_sync(struct spi_device * spi,struct spi_message * message)4708 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
4709 {
4710 	DECLARE_COMPLETION_ONSTACK(done);
4711 	unsigned long flags;
4712 	int status;
4713 	struct spi_controller *ctlr = spi->controller;
4714 
4715 	if (__spi_check_suspended(ctlr)) {
4716 		dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
4717 		return -ESHUTDOWN;
4718 	}
4719 
4720 	status = spi_maybe_optimize_message(spi, message);
4721 	if (status)
4722 		return status;
4723 
4724 	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4725 	SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4726 
4727 	/*
4728 	 * Checking queue_empty here only guarantees async/sync message
4729 	 * ordering when coming from the same context. It does not need to
4730 	 * guard against reentrancy from a different context. The io_mutex
4731 	 * will catch those cases.
4732 	 */
4733 	if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4734 		message->actual_length = 0;
4735 		message->status = -EINPROGRESS;
4736 
4737 		trace_spi_message_submit(message);
4738 
4739 		SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4740 		SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4741 
4742 		__spi_transfer_message_noqueue(ctlr, message);
4743 
4744 		return message->status;
4745 	}
4746 
4747 	/*
4748 	 * There are messages in the async queue that could have originated
4749 	 * from the same context, so we need to preserve ordering.
4750 	 * Therefor we send the message to the async queue and wait until they
4751 	 * are completed.
4752 	 */
4753 	message->complete = spi_complete;
4754 	message->context = &done;
4755 
4756 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4757 	status = __spi_async(spi, message);
4758 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4759 
4760 	if (status == 0) {
4761 		wait_for_completion(&done);
4762 		status = message->status;
4763 	}
4764 	message->complete = NULL;
4765 	message->context = NULL;
4766 
4767 	return status;
4768 }
4769 
4770 /**
4771  * spi_sync - blocking/synchronous SPI data transfers
4772  * @spi: device with which data will be exchanged
4773  * @message: describes the data transfers
4774  * Context: can sleep
4775  *
4776  * This call may only be used from a context that may sleep.  The sleep
4777  * is non-interruptible, and has no timeout.  Low-overhead controller
4778  * drivers may DMA directly into and out of the message buffers.
4779  *
4780  * Note that the SPI device's chip select is active during the message,
4781  * and then is normally disabled between messages.  Drivers for some
4782  * frequently-used devices may want to minimize costs of selecting a chip,
4783  * by leaving it selected in anticipation that the next message will go
4784  * to the same chip.  (That may increase power usage.)
4785  *
4786  * Also, the caller is guaranteeing that the memory associated with the
4787  * message will not be freed before this call returns.
4788  *
4789  * Return: zero on success, else a negative error code.
4790  */
spi_sync(struct spi_device * spi,struct spi_message * message)4791 int spi_sync(struct spi_device *spi, struct spi_message *message)
4792 {
4793 	int ret;
4794 
4795 	mutex_lock(&spi->controller->bus_lock_mutex);
4796 	ret = __spi_sync(spi, message);
4797 	mutex_unlock(&spi->controller->bus_lock_mutex);
4798 
4799 	return ret;
4800 }
4801 EXPORT_SYMBOL_GPL(spi_sync);
4802 
4803 /**
4804  * spi_sync_locked - version of spi_sync with exclusive bus usage
4805  * @spi: device with which data will be exchanged
4806  * @message: describes the data transfers
4807  * Context: can sleep
4808  *
4809  * This call may only be used from a context that may sleep.  The sleep
4810  * is non-interruptible, and has no timeout.  Low-overhead controller
4811  * drivers may DMA directly into and out of the message buffers.
4812  *
4813  * This call should be used by drivers that require exclusive access to the
4814  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4815  * be released by a spi_bus_unlock call when the exclusive access is over.
4816  *
4817  * Return: zero on success, else a negative error code.
4818  */
spi_sync_locked(struct spi_device * spi,struct spi_message * message)4819 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4820 {
4821 	return __spi_sync(spi, message);
4822 }
4823 EXPORT_SYMBOL_GPL(spi_sync_locked);
4824 
4825 /**
4826  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4827  * @ctlr: SPI bus controller that should be locked for exclusive bus access
4828  * Context: can sleep
4829  *
4830  * This call may only be used from a context that may sleep.  The sleep
4831  * is non-interruptible, and has no timeout.
4832  *
4833  * This call should be used by drivers that require exclusive access to the
4834  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4835  * exclusive access is over. Data transfer must be done by spi_sync_locked
4836  * and spi_async_locked calls when the SPI bus lock is held.
4837  *
4838  * Return: always zero.
4839  */
spi_bus_lock(struct spi_controller * ctlr)4840 int spi_bus_lock(struct spi_controller *ctlr)
4841 {
4842 	unsigned long flags;
4843 
4844 	mutex_lock(&ctlr->bus_lock_mutex);
4845 
4846 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4847 	ctlr->bus_lock_flag = 1;
4848 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4849 
4850 	/* Mutex remains locked until spi_bus_unlock() is called */
4851 
4852 	return 0;
4853 }
4854 EXPORT_SYMBOL_GPL(spi_bus_lock);
4855 
4856 /**
4857  * spi_bus_unlock - release the lock for exclusive SPI bus usage
4858  * @ctlr: SPI bus controller that was locked for exclusive bus access
4859  * Context: can sleep
4860  *
4861  * This call may only be used from a context that may sleep.  The sleep
4862  * is non-interruptible, and has no timeout.
4863  *
4864  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4865  * call.
4866  *
4867  * Return: always zero.
4868  */
spi_bus_unlock(struct spi_controller * ctlr)4869 int spi_bus_unlock(struct spi_controller *ctlr)
4870 {
4871 	ctlr->bus_lock_flag = 0;
4872 
4873 	mutex_unlock(&ctlr->bus_lock_mutex);
4874 
4875 	return 0;
4876 }
4877 EXPORT_SYMBOL_GPL(spi_bus_unlock);
4878 
4879 /* Portable code must never pass more than 32 bytes */
4880 #define	SPI_BUFSIZ	max(32, SMP_CACHE_BYTES)
4881 
4882 static u8	*buf;
4883 
4884 /**
4885  * spi_write_then_read - SPI synchronous write followed by read
4886  * @spi: device with which data will be exchanged
4887  * @txbuf: data to be written (need not be DMA-safe)
4888  * @n_tx: size of txbuf, in bytes
4889  * @rxbuf: buffer into which data will be read (need not be DMA-safe)
4890  * @n_rx: size of rxbuf, in bytes
4891  * Context: can sleep
4892  *
4893  * This performs a half duplex MicroWire style transaction with the
4894  * device, sending txbuf and then reading rxbuf.  The return value
4895  * is zero for success, else a negative errno status code.
4896  * This call may only be used from a context that may sleep.
4897  *
4898  * Parameters to this routine are always copied using a small buffer.
4899  * Performance-sensitive or bulk transfer code should instead use
4900  * spi_{async,sync}() calls with DMA-safe buffers.
4901  *
4902  * Return: zero on success, else a negative error code.
4903  */
spi_write_then_read(struct spi_device * spi,const void * txbuf,unsigned n_tx,void * rxbuf,unsigned n_rx)4904 int spi_write_then_read(struct spi_device *spi,
4905 		const void *txbuf, unsigned n_tx,
4906 		void *rxbuf, unsigned n_rx)
4907 {
4908 	static DEFINE_MUTEX(lock);
4909 
4910 	int			status;
4911 	struct spi_message	message;
4912 	struct spi_transfer	x[2];
4913 	u8			*local_buf;
4914 
4915 	/*
4916 	 * Use preallocated DMA-safe buffer if we can. We can't avoid
4917 	 * copying here, (as a pure convenience thing), but we can
4918 	 * keep heap costs out of the hot path unless someone else is
4919 	 * using the pre-allocated buffer or the transfer is too large.
4920 	 */
4921 	if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4922 		local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4923 				    GFP_KERNEL | GFP_DMA);
4924 		if (!local_buf)
4925 			return -ENOMEM;
4926 	} else {
4927 		local_buf = buf;
4928 	}
4929 
4930 	spi_message_init(&message);
4931 	memset(x, 0, sizeof(x));
4932 	if (n_tx) {
4933 		x[0].len = n_tx;
4934 		spi_message_add_tail(&x[0], &message);
4935 	}
4936 	if (n_rx) {
4937 		x[1].len = n_rx;
4938 		spi_message_add_tail(&x[1], &message);
4939 	}
4940 
4941 	memcpy(local_buf, txbuf, n_tx);
4942 	x[0].tx_buf = local_buf;
4943 	x[1].rx_buf = local_buf + n_tx;
4944 
4945 	/* Do the I/O */
4946 	status = spi_sync(spi, &message);
4947 	if (status == 0)
4948 		memcpy(rxbuf, x[1].rx_buf, n_rx);
4949 
4950 	if (x[0].tx_buf == buf)
4951 		mutex_unlock(&lock);
4952 	else
4953 		kfree(local_buf);
4954 
4955 	return status;
4956 }
4957 EXPORT_SYMBOL_GPL(spi_write_then_read);
4958 
4959 /*-------------------------------------------------------------------------*/
4960 
4961 #if IS_ENABLED(CONFIG_OF)
4962 /* The spi controllers are not using spi_bus, so we find it with another way */
of_find_spi_controller_by_node(struct device_node * node)4963 struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4964 {
4965 	struct device *dev;
4966 
4967 	dev = class_find_device_by_of_node(&spi_controller_class, node);
4968 	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4969 		dev = class_find_device_by_of_node(&spi_target_class, node);
4970 	if (!dev)
4971 		return NULL;
4972 
4973 	/* Reference got in class_find_device */
4974 	return container_of(dev, struct spi_controller, dev);
4975 }
4976 EXPORT_SYMBOL_GPL(of_find_spi_controller_by_node);
4977 #endif
4978 
4979 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
4980 /* Must call put_device() when done with returned spi_device device */
of_find_spi_device_by_node(struct device_node * node)4981 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4982 {
4983 	struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4984 
4985 	return dev ? to_spi_device(dev) : NULL;
4986 }
4987 
of_spi_notify(struct notifier_block * nb,unsigned long action,void * arg)4988 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4989 			 void *arg)
4990 {
4991 	struct of_reconfig_data *rd = arg;
4992 	struct spi_controller *ctlr;
4993 	struct spi_device *spi;
4994 
4995 	switch (of_reconfig_get_state_change(action, arg)) {
4996 	case OF_RECONFIG_CHANGE_ADD:
4997 		ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4998 		if (ctlr == NULL)
4999 			return NOTIFY_OK;	/* Not for us */
5000 
5001 		if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
5002 			put_device(&ctlr->dev);
5003 			return NOTIFY_OK;
5004 		}
5005 
5006 		/*
5007 		 * Clear the flag before adding the device so that fw_devlink
5008 		 * doesn't skip adding consumers to this device.
5009 		 */
5010 		fwnode_clear_flag(&rd->dn->fwnode, FWNODE_FLAG_NOT_DEVICE);
5011 		spi = of_register_spi_device(ctlr, rd->dn);
5012 		put_device(&ctlr->dev);
5013 
5014 		if (IS_ERR(spi)) {
5015 			pr_err("%s: failed to create for '%pOF'\n",
5016 					__func__, rd->dn);
5017 			of_node_clear_flag(rd->dn, OF_POPULATED);
5018 			return notifier_from_errno(PTR_ERR(spi));
5019 		}
5020 		break;
5021 
5022 	case OF_RECONFIG_CHANGE_REMOVE:
5023 		/* Already depopulated? */
5024 		if (!of_node_check_flag(rd->dn, OF_POPULATED))
5025 			return NOTIFY_OK;
5026 
5027 		/* Find our device by node */
5028 		spi = of_find_spi_device_by_node(rd->dn);
5029 		if (spi == NULL)
5030 			return NOTIFY_OK;	/* No? not meant for us */
5031 
5032 		/* Unregister takes one ref away */
5033 		spi_unregister_device(spi);
5034 
5035 		/* And put the reference of the find */
5036 		put_device(&spi->dev);
5037 		break;
5038 	}
5039 
5040 	return NOTIFY_OK;
5041 }
5042 
5043 static struct notifier_block spi_of_notifier = {
5044 	.notifier_call = of_spi_notify,
5045 };
5046 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
5047 extern struct notifier_block spi_of_notifier;
5048 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
5049 
5050 #if IS_ENABLED(CONFIG_ACPI)
spi_acpi_controller_match(struct device * dev,const void * data)5051 static int spi_acpi_controller_match(struct device *dev, const void *data)
5052 {
5053 	return device_match_acpi_dev(dev->parent, data);
5054 }
5055 
acpi_spi_find_controller_by_adev(struct acpi_device * adev)5056 struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
5057 {
5058 	struct device *dev;
5059 
5060 	dev = class_find_device(&spi_controller_class, NULL, adev,
5061 				spi_acpi_controller_match);
5062 	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
5063 		dev = class_find_device(&spi_target_class, NULL, adev,
5064 					spi_acpi_controller_match);
5065 	if (!dev)
5066 		return NULL;
5067 
5068 	return container_of(dev, struct spi_controller, dev);
5069 }
5070 EXPORT_SYMBOL_GPL(acpi_spi_find_controller_by_adev);
5071 
acpi_spi_find_device_by_adev(struct acpi_device * adev)5072 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
5073 {
5074 	struct device *dev;
5075 
5076 	dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
5077 	return to_spi_device(dev);
5078 }
5079 
acpi_spi_notify(struct notifier_block * nb,unsigned long value,void * arg)5080 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
5081 			   void *arg)
5082 {
5083 	struct acpi_device *adev = arg;
5084 	struct spi_controller *ctlr;
5085 	struct spi_device *spi;
5086 
5087 	switch (value) {
5088 	case ACPI_RECONFIG_DEVICE_ADD:
5089 		ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
5090 		if (!ctlr)
5091 			break;
5092 
5093 		acpi_register_spi_device(ctlr, adev);
5094 		put_device(&ctlr->dev);
5095 		break;
5096 	case ACPI_RECONFIG_DEVICE_REMOVE:
5097 		if (!acpi_device_enumerated(adev))
5098 			break;
5099 
5100 		spi = acpi_spi_find_device_by_adev(adev);
5101 		if (!spi)
5102 			break;
5103 
5104 		spi_unregister_device(spi);
5105 		put_device(&spi->dev);
5106 		break;
5107 	}
5108 
5109 	return NOTIFY_OK;
5110 }
5111 
5112 static struct notifier_block spi_acpi_notifier = {
5113 	.notifier_call = acpi_spi_notify,
5114 };
5115 #else
5116 extern struct notifier_block spi_acpi_notifier;
5117 #endif
5118 
spi_init(void)5119 static int __init spi_init(void)
5120 {
5121 	int	status;
5122 
5123 	buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
5124 	if (!buf) {
5125 		status = -ENOMEM;
5126 		goto err0;
5127 	}
5128 
5129 	status = bus_register(&spi_bus_type);
5130 	if (status < 0)
5131 		goto err1;
5132 
5133 	status = class_register(&spi_controller_class);
5134 	if (status < 0)
5135 		goto err2;
5136 
5137 	if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
5138 		status = class_register(&spi_target_class);
5139 		if (status < 0)
5140 			goto err3;
5141 	}
5142 
5143 	if (IS_ENABLED(CONFIG_OF_DYNAMIC))
5144 		WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
5145 	if (IS_ENABLED(CONFIG_ACPI))
5146 		WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
5147 
5148 	return 0;
5149 
5150 err3:
5151 	class_unregister(&spi_controller_class);
5152 err2:
5153 	bus_unregister(&spi_bus_type);
5154 err1:
5155 	kfree(buf);
5156 	buf = NULL;
5157 err0:
5158 	return status;
5159 }
5160 
5161 /*
5162  * A board_info is normally registered in arch_initcall(),
5163  * but even essential drivers wait till later.
5164  *
5165  * REVISIT only boardinfo really needs static linking. The rest (device and
5166  * driver registration) _could_ be dynamically linked (modular) ... Costs
5167  * include needing to have boardinfo data structures be much more public.
5168  */
5169 postcore_initcall(spi_init);
5170