xref: /linux/drivers/spi/spi.c (revision 88d324e69ea9f3ae1c1905ea75d717c08bdb8e15)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
3 //
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
6 
7 #include <linux/acpi.h>
8 #include <linux/cache.h>
9 #include <linux/clk/clk-conf.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/dmaengine.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/export.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/highmem.h>
17 #include <linux/idr.h>
18 #include <linux/init.h>
19 #include <linux/ioport.h>
20 #include <linux/kernel.h>
21 #include <linux/kthread.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/mutex.h>
24 #include <linux/of_device.h>
25 #include <linux/of_irq.h>
26 #include <linux/percpu.h>
27 #include <linux/platform_data/x86/apple.h>
28 #include <linux/pm_domain.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/property.h>
31 #include <linux/ptp_clock_kernel.h>
32 #include <linux/sched/rt.h>
33 #include <linux/slab.h>
34 #include <linux/spi/spi.h>
35 #include <linux/spi/spi-mem.h>
36 #include <uapi/linux/sched/types.h>
37 
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/spi.h>
40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
41 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
42 
43 #include "internals.h"
44 
45 static DEFINE_IDR(spi_master_idr);
46 
spidev_release(struct device * dev)47 static void spidev_release(struct device *dev)
48 {
49 	struct spi_device	*spi = to_spi_device(dev);
50 
51 	spi_controller_put(spi->controller);
52 	kfree(spi->driver_override);
53 	free_percpu(spi->pcpu_statistics);
54 	kfree(spi);
55 }
56 
57 static ssize_t
modalias_show(struct device * dev,struct device_attribute * a,char * buf)58 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
59 {
60 	const struct spi_device	*spi = to_spi_device(dev);
61 	int len;
62 
63 	len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
64 	if (len != -ENODEV)
65 		return len;
66 
67 	return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
68 }
69 static DEVICE_ATTR_RO(modalias);
70 
driver_override_store(struct device * dev,struct device_attribute * a,const char * buf,size_t count)71 static ssize_t driver_override_store(struct device *dev,
72 				     struct device_attribute *a,
73 				     const char *buf, size_t count)
74 {
75 	struct spi_device *spi = to_spi_device(dev);
76 	int ret;
77 
78 	ret = driver_set_override(dev, &spi->driver_override, buf, count);
79 	if (ret)
80 		return ret;
81 
82 	return count;
83 }
84 
driver_override_show(struct device * dev,struct device_attribute * a,char * buf)85 static ssize_t driver_override_show(struct device *dev,
86 				    struct device_attribute *a, char *buf)
87 {
88 	const struct spi_device *spi = to_spi_device(dev);
89 	ssize_t len;
90 
91 	device_lock(dev);
92 	len = sysfs_emit(buf, "%s\n", spi->driver_override ? : "");
93 	device_unlock(dev);
94 	return len;
95 }
96 static DEVICE_ATTR_RW(driver_override);
97 
spi_alloc_pcpu_stats(struct device * dev)98 static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
99 {
100 	struct spi_statistics __percpu *pcpu_stats;
101 
102 	if (dev)
103 		pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
104 	else
105 		pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
106 
107 	if (pcpu_stats) {
108 		int cpu;
109 
110 		for_each_possible_cpu(cpu) {
111 			struct spi_statistics *stat;
112 
113 			stat = per_cpu_ptr(pcpu_stats, cpu);
114 			u64_stats_init(&stat->syncp);
115 		}
116 	}
117 	return pcpu_stats;
118 }
119 
spi_emit_pcpu_stats(struct spi_statistics __percpu * stat,char * buf,size_t offset)120 static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat,
121 				   char *buf, size_t offset)
122 {
123 	u64 val = 0;
124 	int i;
125 
126 	for_each_possible_cpu(i) {
127 		const struct spi_statistics *pcpu_stats;
128 		u64_stats_t *field;
129 		unsigned int start;
130 		u64 inc;
131 
132 		pcpu_stats = per_cpu_ptr(stat, i);
133 		field = (void *)pcpu_stats + offset;
134 		do {
135 			start = u64_stats_fetch_begin(&pcpu_stats->syncp);
136 			inc = u64_stats_read(field);
137 		} while (u64_stats_fetch_retry(&pcpu_stats->syncp, start));
138 		val += inc;
139 	}
140 	return sysfs_emit(buf, "%llu\n", val);
141 }
142 
143 #define SPI_STATISTICS_ATTRS(field, file)				\
144 static ssize_t spi_controller_##field##_show(struct device *dev,	\
145 					     struct device_attribute *attr, \
146 					     char *buf)			\
147 {									\
148 	struct spi_controller *ctlr = container_of(dev,			\
149 					 struct spi_controller, dev);	\
150 	return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
151 }									\
152 static struct device_attribute dev_attr_spi_controller_##field = {	\
153 	.attr = { .name = file, .mode = 0444 },				\
154 	.show = spi_controller_##field##_show,				\
155 };									\
156 static ssize_t spi_device_##field##_show(struct device *dev,		\
157 					 struct device_attribute *attr,	\
158 					char *buf)			\
159 {									\
160 	struct spi_device *spi = to_spi_device(dev);			\
161 	return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
162 }									\
163 static struct device_attribute dev_attr_spi_device_##field = {		\
164 	.attr = { .name = file, .mode = 0444 },				\
165 	.show = spi_device_##field##_show,				\
166 }
167 
168 #define SPI_STATISTICS_SHOW_NAME(name, file, field)			\
169 static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
170 					    char *buf)			\
171 {									\
172 	return spi_emit_pcpu_stats(stat, buf,				\
173 			offsetof(struct spi_statistics, field));	\
174 }									\
175 SPI_STATISTICS_ATTRS(name, file)
176 
177 #define SPI_STATISTICS_SHOW(field)					\
178 	SPI_STATISTICS_SHOW_NAME(field, __stringify(field),		\
179 				 field)
180 
181 SPI_STATISTICS_SHOW(messages);
182 SPI_STATISTICS_SHOW(transfers);
183 SPI_STATISTICS_SHOW(errors);
184 SPI_STATISTICS_SHOW(timedout);
185 
186 SPI_STATISTICS_SHOW(spi_sync);
187 SPI_STATISTICS_SHOW(spi_sync_immediate);
188 SPI_STATISTICS_SHOW(spi_async);
189 
190 SPI_STATISTICS_SHOW(bytes);
191 SPI_STATISTICS_SHOW(bytes_rx);
192 SPI_STATISTICS_SHOW(bytes_tx);
193 
194 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)		\
195 	SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,		\
196 				 "transfer_bytes_histo_" number,	\
197 				 transfer_bytes_histo[index])
198 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
199 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
200 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
201 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
202 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
203 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
204 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
205 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
206 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
207 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
208 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
209 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
210 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
211 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
212 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
213 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
214 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
215 
216 SPI_STATISTICS_SHOW(transfers_split_maxsize);
217 
218 static struct attribute *spi_dev_attrs[] = {
219 	&dev_attr_modalias.attr,
220 	&dev_attr_driver_override.attr,
221 	NULL,
222 };
223 
224 static const struct attribute_group spi_dev_group = {
225 	.attrs  = spi_dev_attrs,
226 };
227 
228 static struct attribute *spi_device_statistics_attrs[] = {
229 	&dev_attr_spi_device_messages.attr,
230 	&dev_attr_spi_device_transfers.attr,
231 	&dev_attr_spi_device_errors.attr,
232 	&dev_attr_spi_device_timedout.attr,
233 	&dev_attr_spi_device_spi_sync.attr,
234 	&dev_attr_spi_device_spi_sync_immediate.attr,
235 	&dev_attr_spi_device_spi_async.attr,
236 	&dev_attr_spi_device_bytes.attr,
237 	&dev_attr_spi_device_bytes_rx.attr,
238 	&dev_attr_spi_device_bytes_tx.attr,
239 	&dev_attr_spi_device_transfer_bytes_histo0.attr,
240 	&dev_attr_spi_device_transfer_bytes_histo1.attr,
241 	&dev_attr_spi_device_transfer_bytes_histo2.attr,
242 	&dev_attr_spi_device_transfer_bytes_histo3.attr,
243 	&dev_attr_spi_device_transfer_bytes_histo4.attr,
244 	&dev_attr_spi_device_transfer_bytes_histo5.attr,
245 	&dev_attr_spi_device_transfer_bytes_histo6.attr,
246 	&dev_attr_spi_device_transfer_bytes_histo7.attr,
247 	&dev_attr_spi_device_transfer_bytes_histo8.attr,
248 	&dev_attr_spi_device_transfer_bytes_histo9.attr,
249 	&dev_attr_spi_device_transfer_bytes_histo10.attr,
250 	&dev_attr_spi_device_transfer_bytes_histo11.attr,
251 	&dev_attr_spi_device_transfer_bytes_histo12.attr,
252 	&dev_attr_spi_device_transfer_bytes_histo13.attr,
253 	&dev_attr_spi_device_transfer_bytes_histo14.attr,
254 	&dev_attr_spi_device_transfer_bytes_histo15.attr,
255 	&dev_attr_spi_device_transfer_bytes_histo16.attr,
256 	&dev_attr_spi_device_transfers_split_maxsize.attr,
257 	NULL,
258 };
259 
260 static const struct attribute_group spi_device_statistics_group = {
261 	.name  = "statistics",
262 	.attrs  = spi_device_statistics_attrs,
263 };
264 
265 static const struct attribute_group *spi_dev_groups[] = {
266 	&spi_dev_group,
267 	&spi_device_statistics_group,
268 	NULL,
269 };
270 
271 static struct attribute *spi_controller_statistics_attrs[] = {
272 	&dev_attr_spi_controller_messages.attr,
273 	&dev_attr_spi_controller_transfers.attr,
274 	&dev_attr_spi_controller_errors.attr,
275 	&dev_attr_spi_controller_timedout.attr,
276 	&dev_attr_spi_controller_spi_sync.attr,
277 	&dev_attr_spi_controller_spi_sync_immediate.attr,
278 	&dev_attr_spi_controller_spi_async.attr,
279 	&dev_attr_spi_controller_bytes.attr,
280 	&dev_attr_spi_controller_bytes_rx.attr,
281 	&dev_attr_spi_controller_bytes_tx.attr,
282 	&dev_attr_spi_controller_transfer_bytes_histo0.attr,
283 	&dev_attr_spi_controller_transfer_bytes_histo1.attr,
284 	&dev_attr_spi_controller_transfer_bytes_histo2.attr,
285 	&dev_attr_spi_controller_transfer_bytes_histo3.attr,
286 	&dev_attr_spi_controller_transfer_bytes_histo4.attr,
287 	&dev_attr_spi_controller_transfer_bytes_histo5.attr,
288 	&dev_attr_spi_controller_transfer_bytes_histo6.attr,
289 	&dev_attr_spi_controller_transfer_bytes_histo7.attr,
290 	&dev_attr_spi_controller_transfer_bytes_histo8.attr,
291 	&dev_attr_spi_controller_transfer_bytes_histo9.attr,
292 	&dev_attr_spi_controller_transfer_bytes_histo10.attr,
293 	&dev_attr_spi_controller_transfer_bytes_histo11.attr,
294 	&dev_attr_spi_controller_transfer_bytes_histo12.attr,
295 	&dev_attr_spi_controller_transfer_bytes_histo13.attr,
296 	&dev_attr_spi_controller_transfer_bytes_histo14.attr,
297 	&dev_attr_spi_controller_transfer_bytes_histo15.attr,
298 	&dev_attr_spi_controller_transfer_bytes_histo16.attr,
299 	&dev_attr_spi_controller_transfers_split_maxsize.attr,
300 	NULL,
301 };
302 
303 static const struct attribute_group spi_controller_statistics_group = {
304 	.name  = "statistics",
305 	.attrs  = spi_controller_statistics_attrs,
306 };
307 
308 static const struct attribute_group *spi_master_groups[] = {
309 	&spi_controller_statistics_group,
310 	NULL,
311 };
312 
spi_statistics_add_transfer_stats(struct spi_statistics __percpu * pcpu_stats,struct spi_transfer * xfer,struct spi_message * msg)313 static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
314 					      struct spi_transfer *xfer,
315 					      struct spi_message *msg)
316 {
317 	int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
318 	struct spi_statistics *stats;
319 
320 	if (l2len < 0)
321 		l2len = 0;
322 
323 	get_cpu();
324 	stats = this_cpu_ptr(pcpu_stats);
325 	u64_stats_update_begin(&stats->syncp);
326 
327 	u64_stats_inc(&stats->transfers);
328 	u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
329 
330 	u64_stats_add(&stats->bytes, xfer->len);
331 	if (spi_valid_txbuf(msg, xfer))
332 		u64_stats_add(&stats->bytes_tx, xfer->len);
333 	if (spi_valid_rxbuf(msg, xfer))
334 		u64_stats_add(&stats->bytes_rx, xfer->len);
335 
336 	u64_stats_update_end(&stats->syncp);
337 	put_cpu();
338 }
339 
340 /*
341  * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
342  * and the sysfs version makes coldplug work too.
343  */
spi_match_id(const struct spi_device_id * id,const char * name)344 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
345 {
346 	while (id->name[0]) {
347 		if (!strcmp(name, id->name))
348 			return id;
349 		id++;
350 	}
351 	return NULL;
352 }
353 
spi_get_device_id(const struct spi_device * sdev)354 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
355 {
356 	const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
357 
358 	return spi_match_id(sdrv->id_table, sdev->modalias);
359 }
360 EXPORT_SYMBOL_GPL(spi_get_device_id);
361 
spi_get_device_match_data(const struct spi_device * sdev)362 const void *spi_get_device_match_data(const struct spi_device *sdev)
363 {
364 	const void *match;
365 
366 	match = device_get_match_data(&sdev->dev);
367 	if (match)
368 		return match;
369 
370 	return (const void *)spi_get_device_id(sdev)->driver_data;
371 }
372 EXPORT_SYMBOL_GPL(spi_get_device_match_data);
373 
spi_match_device(struct device * dev,const struct device_driver * drv)374 static int spi_match_device(struct device *dev, const struct device_driver *drv)
375 {
376 	const struct spi_device	*spi = to_spi_device(dev);
377 	const struct spi_driver	*sdrv = to_spi_driver(drv);
378 
379 	/* Check override first, and if set, only use the named driver */
380 	if (spi->driver_override)
381 		return strcmp(spi->driver_override, drv->name) == 0;
382 
383 	/* Attempt an OF style match */
384 	if (of_driver_match_device(dev, drv))
385 		return 1;
386 
387 	/* Then try ACPI */
388 	if (acpi_driver_match_device(dev, drv))
389 		return 1;
390 
391 	if (sdrv->id_table)
392 		return !!spi_match_id(sdrv->id_table, spi->modalias);
393 
394 	return strcmp(spi->modalias, drv->name) == 0;
395 }
396 
spi_uevent(const struct device * dev,struct kobj_uevent_env * env)397 static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env)
398 {
399 	const struct spi_device		*spi = to_spi_device(dev);
400 	int rc;
401 
402 	rc = acpi_device_uevent_modalias(dev, env);
403 	if (rc != -ENODEV)
404 		return rc;
405 
406 	return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
407 }
408 
spi_probe(struct device * dev)409 static int spi_probe(struct device *dev)
410 {
411 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
412 	struct spi_device		*spi = to_spi_device(dev);
413 	struct fwnode_handle		*fwnode = dev_fwnode(dev);
414 	int ret;
415 
416 	ret = of_clk_set_defaults(dev->of_node, false);
417 	if (ret)
418 		return ret;
419 
420 	if (is_of_node(fwnode))
421 		spi->irq = of_irq_get(dev->of_node, 0);
422 	else if (is_acpi_device_node(fwnode) && spi->irq < 0)
423 		spi->irq = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), 0);
424 	if (spi->irq == -EPROBE_DEFER)
425 		return dev_err_probe(dev, spi->irq, "Failed to get irq\n");
426 	if (spi->irq < 0)
427 		spi->irq = 0;
428 
429 	ret = dev_pm_domain_attach(dev, true);
430 	if (ret)
431 		return ret;
432 
433 	if (sdrv->probe) {
434 		ret = sdrv->probe(spi);
435 		if (ret)
436 			dev_pm_domain_detach(dev, true);
437 	}
438 
439 	return ret;
440 }
441 
spi_remove(struct device * dev)442 static void spi_remove(struct device *dev)
443 {
444 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
445 
446 	if (sdrv->remove)
447 		sdrv->remove(to_spi_device(dev));
448 
449 	dev_pm_domain_detach(dev, true);
450 }
451 
spi_shutdown(struct device * dev)452 static void spi_shutdown(struct device *dev)
453 {
454 	if (dev->driver) {
455 		const struct spi_driver	*sdrv = to_spi_driver(dev->driver);
456 
457 		if (sdrv->shutdown)
458 			sdrv->shutdown(to_spi_device(dev));
459 	}
460 }
461 
462 const struct bus_type spi_bus_type = {
463 	.name		= "spi",
464 	.dev_groups	= spi_dev_groups,
465 	.match		= spi_match_device,
466 	.uevent		= spi_uevent,
467 	.probe		= spi_probe,
468 	.remove		= spi_remove,
469 	.shutdown	= spi_shutdown,
470 };
471 EXPORT_SYMBOL_GPL(spi_bus_type);
472 
473 /**
474  * __spi_register_driver - register a SPI driver
475  * @owner: owner module of the driver to register
476  * @sdrv: the driver to register
477  * Context: can sleep
478  *
479  * Return: zero on success, else a negative error code.
480  */
__spi_register_driver(struct module * owner,struct spi_driver * sdrv)481 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
482 {
483 	sdrv->driver.owner = owner;
484 	sdrv->driver.bus = &spi_bus_type;
485 
486 	/*
487 	 * For Really Good Reasons we use spi: modaliases not of:
488 	 * modaliases for DT so module autoloading won't work if we
489 	 * don't have a spi_device_id as well as a compatible string.
490 	 */
491 	if (sdrv->driver.of_match_table) {
492 		const struct of_device_id *of_id;
493 
494 		for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
495 		     of_id++) {
496 			const char *of_name;
497 
498 			/* Strip off any vendor prefix */
499 			of_name = strnchr(of_id->compatible,
500 					  sizeof(of_id->compatible), ',');
501 			if (of_name)
502 				of_name++;
503 			else
504 				of_name = of_id->compatible;
505 
506 			if (sdrv->id_table) {
507 				const struct spi_device_id *spi_id;
508 
509 				spi_id = spi_match_id(sdrv->id_table, of_name);
510 				if (spi_id)
511 					continue;
512 			} else {
513 				if (strcmp(sdrv->driver.name, of_name) == 0)
514 					continue;
515 			}
516 
517 			pr_warn("SPI driver %s has no spi_device_id for %s\n",
518 				sdrv->driver.name, of_id->compatible);
519 		}
520 	}
521 
522 	return driver_register(&sdrv->driver);
523 }
524 EXPORT_SYMBOL_GPL(__spi_register_driver);
525 
526 /*-------------------------------------------------------------------------*/
527 
528 /*
529  * SPI devices should normally not be created by SPI device drivers; that
530  * would make them board-specific.  Similarly with SPI controller drivers.
531  * Device registration normally goes into like arch/.../mach.../board-YYY.c
532  * with other readonly (flashable) information about mainboard devices.
533  */
534 
535 struct boardinfo {
536 	struct list_head	list;
537 	struct spi_board_info	board_info;
538 };
539 
540 static LIST_HEAD(board_list);
541 static LIST_HEAD(spi_controller_list);
542 
543 /*
544  * Used to protect add/del operation for board_info list and
545  * spi_controller list, and their matching process also used
546  * to protect object of type struct idr.
547  */
548 static DEFINE_MUTEX(board_lock);
549 
550 /**
551  * spi_alloc_device - Allocate a new SPI device
552  * @ctlr: Controller to which device is connected
553  * Context: can sleep
554  *
555  * Allows a driver to allocate and initialize a spi_device without
556  * registering it immediately.  This allows a driver to directly
557  * fill the spi_device with device parameters before calling
558  * spi_add_device() on it.
559  *
560  * Caller is responsible to call spi_add_device() on the returned
561  * spi_device structure to add it to the SPI controller.  If the caller
562  * needs to discard the spi_device without adding it, then it should
563  * call spi_dev_put() on it.
564  *
565  * Return: a pointer to the new device, or NULL.
566  */
spi_alloc_device(struct spi_controller * ctlr)567 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
568 {
569 	struct spi_device	*spi;
570 
571 	if (!spi_controller_get(ctlr))
572 		return NULL;
573 
574 	spi = kzalloc(sizeof(*spi), GFP_KERNEL);
575 	if (!spi) {
576 		spi_controller_put(ctlr);
577 		return NULL;
578 	}
579 
580 	spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
581 	if (!spi->pcpu_statistics) {
582 		kfree(spi);
583 		spi_controller_put(ctlr);
584 		return NULL;
585 	}
586 
587 	spi->controller = ctlr;
588 	spi->dev.parent = &ctlr->dev;
589 	spi->dev.bus = &spi_bus_type;
590 	spi->dev.release = spidev_release;
591 	spi->mode = ctlr->buswidth_override_bits;
592 
593 	device_initialize(&spi->dev);
594 	return spi;
595 }
596 EXPORT_SYMBOL_GPL(spi_alloc_device);
597 
spi_dev_set_name(struct spi_device * spi)598 static void spi_dev_set_name(struct spi_device *spi)
599 {
600 	struct device *dev = &spi->dev;
601 	struct fwnode_handle *fwnode = dev_fwnode(dev);
602 
603 	if (is_acpi_device_node(fwnode)) {
604 		dev_set_name(dev, "spi-%s", acpi_dev_name(to_acpi_device_node(fwnode)));
605 		return;
606 	}
607 
608 	if (is_software_node(fwnode)) {
609 		dev_set_name(dev, "spi-%pfwP", fwnode);
610 		return;
611 	}
612 
613 	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
614 		     spi_get_chipselect(spi, 0));
615 }
616 
617 /*
618  * Zero(0) is a valid physical CS value and can be located at any
619  * logical CS in the spi->chip_select[]. If all the physical CS
620  * are initialized to 0 then It would be difficult to differentiate
621  * between a valid physical CS 0 & an unused logical CS whose physical
622  * CS can be 0. As a solution to this issue initialize all the CS to -1.
623  * Now all the unused logical CS will have -1 physical CS value & can be
624  * ignored while performing physical CS validity checks.
625  */
626 #define SPI_INVALID_CS		((s8)-1)
627 
is_valid_cs(s8 chip_select)628 static inline bool is_valid_cs(s8 chip_select)
629 {
630 	return chip_select != SPI_INVALID_CS;
631 }
632 
spi_dev_check_cs(struct device * dev,struct spi_device * spi,u8 idx,struct spi_device * new_spi,u8 new_idx)633 static inline int spi_dev_check_cs(struct device *dev,
634 				   struct spi_device *spi, u8 idx,
635 				   struct spi_device *new_spi, u8 new_idx)
636 {
637 	u8 cs, cs_new;
638 	u8 idx_new;
639 
640 	cs = spi_get_chipselect(spi, idx);
641 	for (idx_new = new_idx; idx_new < SPI_CS_CNT_MAX; idx_new++) {
642 		cs_new = spi_get_chipselect(new_spi, idx_new);
643 		if (is_valid_cs(cs) && is_valid_cs(cs_new) && cs == cs_new) {
644 			dev_err(dev, "chipselect %u already in use\n", cs_new);
645 			return -EBUSY;
646 		}
647 	}
648 	return 0;
649 }
650 
spi_dev_check(struct device * dev,void * data)651 static int spi_dev_check(struct device *dev, void *data)
652 {
653 	struct spi_device *spi = to_spi_device(dev);
654 	struct spi_device *new_spi = data;
655 	int status, idx;
656 
657 	if (spi->controller == new_spi->controller) {
658 		for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
659 			status = spi_dev_check_cs(dev, spi, idx, new_spi, 0);
660 			if (status)
661 				return status;
662 		}
663 	}
664 	return 0;
665 }
666 
spi_cleanup(struct spi_device * spi)667 static void spi_cleanup(struct spi_device *spi)
668 {
669 	if (spi->controller->cleanup)
670 		spi->controller->cleanup(spi);
671 }
672 
__spi_add_device(struct spi_device * spi)673 static int __spi_add_device(struct spi_device *spi)
674 {
675 	struct spi_controller *ctlr = spi->controller;
676 	struct device *dev = ctlr->dev.parent;
677 	int status, idx;
678 	u8 cs;
679 
680 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
681 		/* Chipselects are numbered 0..max; validate. */
682 		cs = spi_get_chipselect(spi, idx);
683 		if (is_valid_cs(cs) && cs >= ctlr->num_chipselect) {
684 			dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx),
685 				ctlr->num_chipselect);
686 			return -EINVAL;
687 		}
688 	}
689 
690 	/*
691 	 * Make sure that multiple logical CS doesn't map to the same physical CS.
692 	 * For example, spi->chip_select[0] != spi->chip_select[1] and so on.
693 	 */
694 	if (!spi_controller_is_target(ctlr)) {
695 		for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
696 			status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
697 			if (status)
698 				return status;
699 		}
700 	}
701 
702 	/* Set the bus ID string */
703 	spi_dev_set_name(spi);
704 
705 	/*
706 	 * We need to make sure there's no other device with this
707 	 * chipselect **BEFORE** we call setup(), else we'll trash
708 	 * its configuration.
709 	 */
710 	status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
711 	if (status)
712 		return status;
713 
714 	/* Controller may unregister concurrently */
715 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
716 	    !device_is_registered(&ctlr->dev)) {
717 		return -ENODEV;
718 	}
719 
720 	if (ctlr->cs_gpiods) {
721 		u8 cs;
722 
723 		for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
724 			cs = spi_get_chipselect(spi, idx);
725 			if (is_valid_cs(cs))
726 				spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
727 		}
728 	}
729 
730 	/*
731 	 * Drivers may modify this initial i/o setup, but will
732 	 * normally rely on the device being setup.  Devices
733 	 * using SPI_CS_HIGH can't coexist well otherwise...
734 	 */
735 	status = spi_setup(spi);
736 	if (status < 0) {
737 		dev_err(dev, "can't setup %s, status %d\n",
738 				dev_name(&spi->dev), status);
739 		return status;
740 	}
741 
742 	/* Device may be bound to an active driver when this returns */
743 	status = device_add(&spi->dev);
744 	if (status < 0) {
745 		dev_err(dev, "can't add %s, status %d\n",
746 				dev_name(&spi->dev), status);
747 		spi_cleanup(spi);
748 	} else {
749 		dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
750 	}
751 
752 	return status;
753 }
754 
755 /**
756  * spi_add_device - Add spi_device allocated with spi_alloc_device
757  * @spi: spi_device to register
758  *
759  * Companion function to spi_alloc_device.  Devices allocated with
760  * spi_alloc_device can be added onto the SPI bus with this function.
761  *
762  * Return: 0 on success; negative errno on failure
763  */
spi_add_device(struct spi_device * spi)764 int spi_add_device(struct spi_device *spi)
765 {
766 	struct spi_controller *ctlr = spi->controller;
767 	int status;
768 
769 	/* Set the bus ID string */
770 	spi_dev_set_name(spi);
771 
772 	mutex_lock(&ctlr->add_lock);
773 	status = __spi_add_device(spi);
774 	mutex_unlock(&ctlr->add_lock);
775 	return status;
776 }
777 EXPORT_SYMBOL_GPL(spi_add_device);
778 
spi_set_all_cs_unused(struct spi_device * spi)779 static void spi_set_all_cs_unused(struct spi_device *spi)
780 {
781 	u8 idx;
782 
783 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
784 		spi_set_chipselect(spi, idx, SPI_INVALID_CS);
785 }
786 
787 /**
788  * spi_new_device - instantiate one new SPI device
789  * @ctlr: Controller to which device is connected
790  * @chip: Describes the SPI device
791  * Context: can sleep
792  *
793  * On typical mainboards, this is purely internal; and it's not needed
794  * after board init creates the hard-wired devices.  Some development
795  * platforms may not be able to use spi_register_board_info though, and
796  * this is exported so that for example a USB or parport based adapter
797  * driver could add devices (which it would learn about out-of-band).
798  *
799  * Return: the new device, or NULL.
800  */
spi_new_device(struct spi_controller * ctlr,struct spi_board_info * chip)801 struct spi_device *spi_new_device(struct spi_controller *ctlr,
802 				  struct spi_board_info *chip)
803 {
804 	struct spi_device	*proxy;
805 	int			status;
806 
807 	/*
808 	 * NOTE:  caller did any chip->bus_num checks necessary.
809 	 *
810 	 * Also, unless we change the return value convention to use
811 	 * error-or-pointer (not NULL-or-pointer), troubleshootability
812 	 * suggests syslogged diagnostics are best here (ugh).
813 	 */
814 
815 	proxy = spi_alloc_device(ctlr);
816 	if (!proxy)
817 		return NULL;
818 
819 	WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
820 
821 	/* Use provided chip-select for proxy device */
822 	spi_set_all_cs_unused(proxy);
823 	spi_set_chipselect(proxy, 0, chip->chip_select);
824 
825 	proxy->max_speed_hz = chip->max_speed_hz;
826 	proxy->mode = chip->mode;
827 	proxy->irq = chip->irq;
828 	strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
829 	proxy->dev.platform_data = (void *) chip->platform_data;
830 	proxy->controller_data = chip->controller_data;
831 	proxy->controller_state = NULL;
832 	/*
833 	 * By default spi->chip_select[0] will hold the physical CS number,
834 	 * so set bit 0 in spi->cs_index_mask.
835 	 */
836 	proxy->cs_index_mask = BIT(0);
837 
838 	if (chip->swnode) {
839 		status = device_add_software_node(&proxy->dev, chip->swnode);
840 		if (status) {
841 			dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
842 				chip->modalias, status);
843 			goto err_dev_put;
844 		}
845 	}
846 
847 	status = spi_add_device(proxy);
848 	if (status < 0)
849 		goto err_dev_put;
850 
851 	return proxy;
852 
853 err_dev_put:
854 	device_remove_software_node(&proxy->dev);
855 	spi_dev_put(proxy);
856 	return NULL;
857 }
858 EXPORT_SYMBOL_GPL(spi_new_device);
859 
860 /**
861  * spi_unregister_device - unregister a single SPI device
862  * @spi: spi_device to unregister
863  *
864  * Start making the passed SPI device vanish. Normally this would be handled
865  * by spi_unregister_controller().
866  */
spi_unregister_device(struct spi_device * spi)867 void spi_unregister_device(struct spi_device *spi)
868 {
869 	struct fwnode_handle *fwnode;
870 
871 	if (!spi)
872 		return;
873 
874 	fwnode = dev_fwnode(&spi->dev);
875 	if (is_of_node(fwnode)) {
876 		of_node_clear_flag(to_of_node(fwnode), OF_POPULATED);
877 		of_node_put(to_of_node(fwnode));
878 	} else if (is_acpi_device_node(fwnode)) {
879 		acpi_device_clear_enumerated(to_acpi_device_node(fwnode));
880 	}
881 	device_remove_software_node(&spi->dev);
882 	device_del(&spi->dev);
883 	spi_cleanup(spi);
884 	put_device(&spi->dev);
885 }
886 EXPORT_SYMBOL_GPL(spi_unregister_device);
887 
spi_match_controller_to_boardinfo(struct spi_controller * ctlr,struct spi_board_info * bi)888 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
889 					      struct spi_board_info *bi)
890 {
891 	struct spi_device *dev;
892 
893 	if (ctlr->bus_num != bi->bus_num)
894 		return;
895 
896 	dev = spi_new_device(ctlr, bi);
897 	if (!dev)
898 		dev_err(ctlr->dev.parent, "can't create new device for %s\n",
899 			bi->modalias);
900 }
901 
902 /**
903  * spi_register_board_info - register SPI devices for a given board
904  * @info: array of chip descriptors
905  * @n: how many descriptors are provided
906  * Context: can sleep
907  *
908  * Board-specific early init code calls this (probably during arch_initcall)
909  * with segments of the SPI device table.  Any device nodes are created later,
910  * after the relevant parent SPI controller (bus_num) is defined.  We keep
911  * this table of devices forever, so that reloading a controller driver will
912  * not make Linux forget about these hard-wired devices.
913  *
914  * Other code can also call this, e.g. a particular add-on board might provide
915  * SPI devices through its expansion connector, so code initializing that board
916  * would naturally declare its SPI devices.
917  *
918  * The board info passed can safely be __initdata ... but be careful of
919  * any embedded pointers (platform_data, etc), they're copied as-is.
920  *
921  * Return: zero on success, else a negative error code.
922  */
spi_register_board_info(struct spi_board_info const * info,unsigned n)923 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
924 {
925 	struct boardinfo *bi;
926 	int i;
927 
928 	if (!n)
929 		return 0;
930 
931 	bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
932 	if (!bi)
933 		return -ENOMEM;
934 
935 	for (i = 0; i < n; i++, bi++, info++) {
936 		struct spi_controller *ctlr;
937 
938 		memcpy(&bi->board_info, info, sizeof(*info));
939 
940 		mutex_lock(&board_lock);
941 		list_add_tail(&bi->list, &board_list);
942 		list_for_each_entry(ctlr, &spi_controller_list, list)
943 			spi_match_controller_to_boardinfo(ctlr,
944 							  &bi->board_info);
945 		mutex_unlock(&board_lock);
946 	}
947 
948 	return 0;
949 }
950 
951 /*-------------------------------------------------------------------------*/
952 
953 /* Core methods for SPI resource management */
954 
955 /**
956  * spi_res_alloc - allocate a spi resource that is life-cycle managed
957  *                 during the processing of a spi_message while using
958  *                 spi_transfer_one
959  * @spi:     the SPI device for which we allocate memory
960  * @release: the release code to execute for this resource
961  * @size:    size to alloc and return
962  * @gfp:     GFP allocation flags
963  *
964  * Return: the pointer to the allocated data
965  *
966  * This may get enhanced in the future to allocate from a memory pool
967  * of the @spi_device or @spi_controller to avoid repeated allocations.
968  */
spi_res_alloc(struct spi_device * spi,spi_res_release_t release,size_t size,gfp_t gfp)969 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
970 			   size_t size, gfp_t gfp)
971 {
972 	struct spi_res *sres;
973 
974 	sres = kzalloc(sizeof(*sres) + size, gfp);
975 	if (!sres)
976 		return NULL;
977 
978 	INIT_LIST_HEAD(&sres->entry);
979 	sres->release = release;
980 
981 	return sres->data;
982 }
983 
984 /**
985  * spi_res_free - free an SPI resource
986  * @res: pointer to the custom data of a resource
987  */
spi_res_free(void * res)988 static void spi_res_free(void *res)
989 {
990 	struct spi_res *sres = container_of(res, struct spi_res, data);
991 
992 	WARN_ON(!list_empty(&sres->entry));
993 	kfree(sres);
994 }
995 
996 /**
997  * spi_res_add - add a spi_res to the spi_message
998  * @message: the SPI message
999  * @res:     the spi_resource
1000  */
spi_res_add(struct spi_message * message,void * res)1001 static void spi_res_add(struct spi_message *message, void *res)
1002 {
1003 	struct spi_res *sres = container_of(res, struct spi_res, data);
1004 
1005 	WARN_ON(!list_empty(&sres->entry));
1006 	list_add_tail(&sres->entry, &message->resources);
1007 }
1008 
1009 /**
1010  * spi_res_release - release all SPI resources for this message
1011  * @ctlr:  the @spi_controller
1012  * @message: the @spi_message
1013  */
spi_res_release(struct spi_controller * ctlr,struct spi_message * message)1014 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
1015 {
1016 	struct spi_res *res, *tmp;
1017 
1018 	list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
1019 		if (res->release)
1020 			res->release(ctlr, message, res->data);
1021 
1022 		list_del(&res->entry);
1023 
1024 		kfree(res);
1025 	}
1026 }
1027 
1028 /*-------------------------------------------------------------------------*/
1029 #define spi_for_each_valid_cs(spi, idx)				\
1030 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)		\
1031 		if (!(spi->cs_index_mask & BIT(idx))) {} else
1032 
spi_is_last_cs(struct spi_device * spi)1033 static inline bool spi_is_last_cs(struct spi_device *spi)
1034 {
1035 	u8 idx;
1036 	bool last = false;
1037 
1038 	spi_for_each_valid_cs(spi, idx) {
1039 		if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx))
1040 			last = true;
1041 	}
1042 	return last;
1043 }
1044 
spi_toggle_csgpiod(struct spi_device * spi,u8 idx,bool enable,bool activate)1045 static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool activate)
1046 {
1047 	/*
1048 	 * Historically ACPI has no means of the GPIO polarity and
1049 	 * thus the SPISerialBus() resource defines it on the per-chip
1050 	 * basis. In order to avoid a chain of negations, the GPIO
1051 	 * polarity is considered being Active High. Even for the cases
1052 	 * when _DSD() is involved (in the updated versions of ACPI)
1053 	 * the GPIO CS polarity must be defined Active High to avoid
1054 	 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
1055 	 * into account.
1056 	 */
1057 	if (is_acpi_device_node(dev_fwnode(&spi->dev)))
1058 		gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable);
1059 	else
1060 		/* Polarity handled by GPIO library */
1061 		gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), activate);
1062 
1063 	if (activate)
1064 		spi_delay_exec(&spi->cs_setup, NULL);
1065 	else
1066 		spi_delay_exec(&spi->cs_inactive, NULL);
1067 }
1068 
spi_set_cs(struct spi_device * spi,bool enable,bool force)1069 static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
1070 {
1071 	bool activate = enable;
1072 	u8 idx;
1073 
1074 	/*
1075 	 * Avoid calling into the driver (or doing delays) if the chip select
1076 	 * isn't actually changing from the last time this was called.
1077 	 */
1078 	if (!force && ((enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
1079 			spi_is_last_cs(spi)) ||
1080 		       (!enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
1081 			!spi_is_last_cs(spi))) &&
1082 	    (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
1083 		return;
1084 
1085 	trace_spi_set_cs(spi, activate);
1086 
1087 	spi->controller->last_cs_index_mask = spi->cs_index_mask;
1088 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
1089 		spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : SPI_INVALID_CS;
1090 	spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
1091 
1092 	if (spi->mode & SPI_CS_HIGH)
1093 		enable = !enable;
1094 
1095 	/*
1096 	 * Handle chip select delays for GPIO based CS or controllers without
1097 	 * programmable chip select timing.
1098 	 */
1099 	if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate)
1100 		spi_delay_exec(&spi->cs_hold, NULL);
1101 
1102 	if (spi_is_csgpiod(spi)) {
1103 		if (!(spi->mode & SPI_NO_CS)) {
1104 			spi_for_each_valid_cs(spi, idx) {
1105 				if (spi_get_csgpiod(spi, idx))
1106 					spi_toggle_csgpiod(spi, idx, enable, activate);
1107 			}
1108 		}
1109 		/* Some SPI masters need both GPIO CS & slave_select */
1110 		if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
1111 		    spi->controller->set_cs)
1112 			spi->controller->set_cs(spi, !enable);
1113 	} else if (spi->controller->set_cs) {
1114 		spi->controller->set_cs(spi, !enable);
1115 	}
1116 
1117 	if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) {
1118 		if (activate)
1119 			spi_delay_exec(&spi->cs_setup, NULL);
1120 		else
1121 			spi_delay_exec(&spi->cs_inactive, NULL);
1122 	}
1123 }
1124 
1125 #ifdef CONFIG_HAS_DMA
spi_map_buf_attrs(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir,unsigned long attrs)1126 static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
1127 			     struct sg_table *sgt, void *buf, size_t len,
1128 			     enum dma_data_direction dir, unsigned long attrs)
1129 {
1130 	const bool vmalloced_buf = is_vmalloc_addr(buf);
1131 	unsigned int max_seg_size = dma_get_max_seg_size(dev);
1132 #ifdef CONFIG_HIGHMEM
1133 	const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
1134 				(unsigned long)buf < (PKMAP_BASE +
1135 					(LAST_PKMAP * PAGE_SIZE)));
1136 #else
1137 	const bool kmap_buf = false;
1138 #endif
1139 	int desc_len;
1140 	int sgs;
1141 	struct page *vm_page;
1142 	struct scatterlist *sg;
1143 	void *sg_buf;
1144 	size_t min;
1145 	int i, ret;
1146 
1147 	if (vmalloced_buf || kmap_buf) {
1148 		desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
1149 		sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1150 	} else if (virt_addr_valid(buf)) {
1151 		desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1152 		sgs = DIV_ROUND_UP(len, desc_len);
1153 	} else {
1154 		return -EINVAL;
1155 	}
1156 
1157 	ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1158 	if (ret != 0)
1159 		return ret;
1160 
1161 	sg = &sgt->sgl[0];
1162 	for (i = 0; i < sgs; i++) {
1163 
1164 		if (vmalloced_buf || kmap_buf) {
1165 			/*
1166 			 * Next scatterlist entry size is the minimum between
1167 			 * the desc_len and the remaining buffer length that
1168 			 * fits in a page.
1169 			 */
1170 			min = min_t(size_t, desc_len,
1171 				    min_t(size_t, len,
1172 					  PAGE_SIZE - offset_in_page(buf)));
1173 			if (vmalloced_buf)
1174 				vm_page = vmalloc_to_page(buf);
1175 			else
1176 				vm_page = kmap_to_page(buf);
1177 			if (!vm_page) {
1178 				sg_free_table(sgt);
1179 				return -ENOMEM;
1180 			}
1181 			sg_set_page(sg, vm_page,
1182 				    min, offset_in_page(buf));
1183 		} else {
1184 			min = min_t(size_t, len, desc_len);
1185 			sg_buf = buf;
1186 			sg_set_buf(sg, sg_buf, min);
1187 		}
1188 
1189 		buf += min;
1190 		len -= min;
1191 		sg = sg_next(sg);
1192 	}
1193 
1194 	ret = dma_map_sgtable(dev, sgt, dir, attrs);
1195 	if (ret < 0) {
1196 		sg_free_table(sgt);
1197 		return ret;
1198 	}
1199 
1200 	return 0;
1201 }
1202 
spi_map_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir)1203 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1204 		struct sg_table *sgt, void *buf, size_t len,
1205 		enum dma_data_direction dir)
1206 {
1207 	return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
1208 }
1209 
spi_unmap_buf_attrs(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)1210 static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
1211 				struct device *dev, struct sg_table *sgt,
1212 				enum dma_data_direction dir,
1213 				unsigned long attrs)
1214 {
1215 	dma_unmap_sgtable(dev, sgt, dir, attrs);
1216 	sg_free_table(sgt);
1217 	sgt->orig_nents = 0;
1218 	sgt->nents = 0;
1219 }
1220 
spi_unmap_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)1221 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1222 		   struct sg_table *sgt, enum dma_data_direction dir)
1223 {
1224 	spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
1225 }
1226 
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1227 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1228 {
1229 	struct device *tx_dev, *rx_dev;
1230 	struct spi_transfer *xfer;
1231 	int ret;
1232 
1233 	if (!ctlr->can_dma)
1234 		return 0;
1235 
1236 	if (ctlr->dma_tx)
1237 		tx_dev = ctlr->dma_tx->device->dev;
1238 	else if (ctlr->dma_map_dev)
1239 		tx_dev = ctlr->dma_map_dev;
1240 	else
1241 		tx_dev = ctlr->dev.parent;
1242 
1243 	if (ctlr->dma_rx)
1244 		rx_dev = ctlr->dma_rx->device->dev;
1245 	else if (ctlr->dma_map_dev)
1246 		rx_dev = ctlr->dma_map_dev;
1247 	else
1248 		rx_dev = ctlr->dev.parent;
1249 
1250 	ret = -ENOMSG;
1251 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1252 		/* The sync is done before each transfer. */
1253 		unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1254 
1255 		if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1256 			continue;
1257 
1258 		if (xfer->tx_buf != NULL) {
1259 			ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1260 						(void *)xfer->tx_buf,
1261 						xfer->len, DMA_TO_DEVICE,
1262 						attrs);
1263 			if (ret != 0)
1264 				return ret;
1265 
1266 			xfer->tx_sg_mapped = true;
1267 		}
1268 
1269 		if (xfer->rx_buf != NULL) {
1270 			ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1271 						xfer->rx_buf, xfer->len,
1272 						DMA_FROM_DEVICE, attrs);
1273 			if (ret != 0) {
1274 				spi_unmap_buf_attrs(ctlr, tx_dev,
1275 						&xfer->tx_sg, DMA_TO_DEVICE,
1276 						attrs);
1277 
1278 				return ret;
1279 			}
1280 
1281 			xfer->rx_sg_mapped = true;
1282 		}
1283 	}
1284 	/* No transfer has been mapped, bail out with success */
1285 	if (ret)
1286 		return 0;
1287 
1288 	ctlr->cur_rx_dma_dev = rx_dev;
1289 	ctlr->cur_tx_dma_dev = tx_dev;
1290 
1291 	return 0;
1292 }
1293 
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1294 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1295 {
1296 	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1297 	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1298 	struct spi_transfer *xfer;
1299 
1300 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1301 		/* The sync has already been done after each transfer. */
1302 		unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1303 
1304 		if (xfer->rx_sg_mapped)
1305 			spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1306 					    DMA_FROM_DEVICE, attrs);
1307 		xfer->rx_sg_mapped = false;
1308 
1309 		if (xfer->tx_sg_mapped)
1310 			spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1311 					    DMA_TO_DEVICE, attrs);
1312 		xfer->tx_sg_mapped = false;
1313 	}
1314 
1315 	return 0;
1316 }
1317 
spi_dma_sync_for_device(struct spi_controller * ctlr,struct spi_transfer * xfer)1318 static void spi_dma_sync_for_device(struct spi_controller *ctlr,
1319 				    struct spi_transfer *xfer)
1320 {
1321 	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1322 	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1323 
1324 	if (xfer->tx_sg_mapped)
1325 		dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1326 	if (xfer->rx_sg_mapped)
1327 		dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1328 }
1329 
spi_dma_sync_for_cpu(struct spi_controller * ctlr,struct spi_transfer * xfer)1330 static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
1331 				 struct spi_transfer *xfer)
1332 {
1333 	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1334 	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1335 
1336 	if (xfer->rx_sg_mapped)
1337 		dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1338 	if (xfer->tx_sg_mapped)
1339 		dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1340 }
1341 #else /* !CONFIG_HAS_DMA */
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1342 static inline int __spi_map_msg(struct spi_controller *ctlr,
1343 				struct spi_message *msg)
1344 {
1345 	return 0;
1346 }
1347 
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1348 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1349 				  struct spi_message *msg)
1350 {
1351 	return 0;
1352 }
1353 
spi_dma_sync_for_device(struct spi_controller * ctrl,struct spi_transfer * xfer)1354 static void spi_dma_sync_for_device(struct spi_controller *ctrl,
1355 				    struct spi_transfer *xfer)
1356 {
1357 }
1358 
spi_dma_sync_for_cpu(struct spi_controller * ctrl,struct spi_transfer * xfer)1359 static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
1360 				 struct spi_transfer *xfer)
1361 {
1362 }
1363 #endif /* !CONFIG_HAS_DMA */
1364 
spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1365 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1366 				struct spi_message *msg)
1367 {
1368 	struct spi_transfer *xfer;
1369 
1370 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1371 		/*
1372 		 * Restore the original value of tx_buf or rx_buf if they are
1373 		 * NULL.
1374 		 */
1375 		if (xfer->tx_buf == ctlr->dummy_tx)
1376 			xfer->tx_buf = NULL;
1377 		if (xfer->rx_buf == ctlr->dummy_rx)
1378 			xfer->rx_buf = NULL;
1379 	}
1380 
1381 	return __spi_unmap_msg(ctlr, msg);
1382 }
1383 
spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1384 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1385 {
1386 	struct spi_transfer *xfer;
1387 	void *tmp;
1388 	unsigned int max_tx, max_rx;
1389 
1390 	if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1391 		&& !(msg->spi->mode & SPI_3WIRE)) {
1392 		max_tx = 0;
1393 		max_rx = 0;
1394 
1395 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1396 			if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1397 			    !xfer->tx_buf)
1398 				max_tx = max(xfer->len, max_tx);
1399 			if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1400 			    !xfer->rx_buf)
1401 				max_rx = max(xfer->len, max_rx);
1402 		}
1403 
1404 		if (max_tx) {
1405 			tmp = krealloc(ctlr->dummy_tx, max_tx,
1406 				       GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1407 			if (!tmp)
1408 				return -ENOMEM;
1409 			ctlr->dummy_tx = tmp;
1410 		}
1411 
1412 		if (max_rx) {
1413 			tmp = krealloc(ctlr->dummy_rx, max_rx,
1414 				       GFP_KERNEL | GFP_DMA);
1415 			if (!tmp)
1416 				return -ENOMEM;
1417 			ctlr->dummy_rx = tmp;
1418 		}
1419 
1420 		if (max_tx || max_rx) {
1421 			list_for_each_entry(xfer, &msg->transfers,
1422 					    transfer_list) {
1423 				if (!xfer->len)
1424 					continue;
1425 				if (!xfer->tx_buf)
1426 					xfer->tx_buf = ctlr->dummy_tx;
1427 				if (!xfer->rx_buf)
1428 					xfer->rx_buf = ctlr->dummy_rx;
1429 			}
1430 		}
1431 	}
1432 
1433 	return __spi_map_msg(ctlr, msg);
1434 }
1435 
spi_transfer_wait(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer * xfer)1436 static int spi_transfer_wait(struct spi_controller *ctlr,
1437 			     struct spi_message *msg,
1438 			     struct spi_transfer *xfer)
1439 {
1440 	struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1441 	struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1442 	u32 speed_hz = xfer->speed_hz;
1443 	unsigned long long ms;
1444 
1445 	if (spi_controller_is_target(ctlr)) {
1446 		if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1447 			dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1448 			return -EINTR;
1449 		}
1450 	} else {
1451 		if (!speed_hz)
1452 			speed_hz = 100000;
1453 
1454 		/*
1455 		 * For each byte we wait for 8 cycles of the SPI clock.
1456 		 * Since speed is defined in Hz and we want milliseconds,
1457 		 * use respective multiplier, but before the division,
1458 		 * otherwise we may get 0 for short transfers.
1459 		 */
1460 		ms = 8LL * MSEC_PER_SEC * xfer->len;
1461 		do_div(ms, speed_hz);
1462 
1463 		/*
1464 		 * Increase it twice and add 200 ms tolerance, use
1465 		 * predefined maximum in case of overflow.
1466 		 */
1467 		ms += ms + 200;
1468 		if (ms > UINT_MAX)
1469 			ms = UINT_MAX;
1470 
1471 		ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1472 						 msecs_to_jiffies(ms));
1473 
1474 		if (ms == 0) {
1475 			SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1476 			SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1477 			dev_err(&msg->spi->dev,
1478 				"SPI transfer timed out\n");
1479 			return -ETIMEDOUT;
1480 		}
1481 
1482 		if (xfer->error & SPI_TRANS_FAIL_IO)
1483 			return -EIO;
1484 	}
1485 
1486 	return 0;
1487 }
1488 
_spi_transfer_delay_ns(u32 ns)1489 static void _spi_transfer_delay_ns(u32 ns)
1490 {
1491 	if (!ns)
1492 		return;
1493 	if (ns <= NSEC_PER_USEC) {
1494 		ndelay(ns);
1495 	} else {
1496 		u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1497 
1498 		if (us <= 10)
1499 			udelay(us);
1500 		else
1501 			usleep_range(us, us + DIV_ROUND_UP(us, 10));
1502 	}
1503 }
1504 
spi_delay_to_ns(struct spi_delay * _delay,struct spi_transfer * xfer)1505 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1506 {
1507 	u32 delay = _delay->value;
1508 	u32 unit = _delay->unit;
1509 	u32 hz;
1510 
1511 	if (!delay)
1512 		return 0;
1513 
1514 	switch (unit) {
1515 	case SPI_DELAY_UNIT_USECS:
1516 		delay *= NSEC_PER_USEC;
1517 		break;
1518 	case SPI_DELAY_UNIT_NSECS:
1519 		/* Nothing to do here */
1520 		break;
1521 	case SPI_DELAY_UNIT_SCK:
1522 		/* Clock cycles need to be obtained from spi_transfer */
1523 		if (!xfer)
1524 			return -EINVAL;
1525 		/*
1526 		 * If there is unknown effective speed, approximate it
1527 		 * by underestimating with half of the requested Hz.
1528 		 */
1529 		hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1530 		if (!hz)
1531 			return -EINVAL;
1532 
1533 		/* Convert delay to nanoseconds */
1534 		delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1535 		break;
1536 	default:
1537 		return -EINVAL;
1538 	}
1539 
1540 	return delay;
1541 }
1542 EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1543 
spi_delay_exec(struct spi_delay * _delay,struct spi_transfer * xfer)1544 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1545 {
1546 	int delay;
1547 
1548 	might_sleep();
1549 
1550 	if (!_delay)
1551 		return -EINVAL;
1552 
1553 	delay = spi_delay_to_ns(_delay, xfer);
1554 	if (delay < 0)
1555 		return delay;
1556 
1557 	_spi_transfer_delay_ns(delay);
1558 
1559 	return 0;
1560 }
1561 EXPORT_SYMBOL_GPL(spi_delay_exec);
1562 
_spi_transfer_cs_change_delay(struct spi_message * msg,struct spi_transfer * xfer)1563 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1564 					  struct spi_transfer *xfer)
1565 {
1566 	u32 default_delay_ns = 10 * NSEC_PER_USEC;
1567 	u32 delay = xfer->cs_change_delay.value;
1568 	u32 unit = xfer->cs_change_delay.unit;
1569 	int ret;
1570 
1571 	/* Return early on "fast" mode - for everything but USECS */
1572 	if (!delay) {
1573 		if (unit == SPI_DELAY_UNIT_USECS)
1574 			_spi_transfer_delay_ns(default_delay_ns);
1575 		return;
1576 	}
1577 
1578 	ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1579 	if (ret) {
1580 		dev_err_once(&msg->spi->dev,
1581 			     "Use of unsupported delay unit %i, using default of %luus\n",
1582 			     unit, default_delay_ns / NSEC_PER_USEC);
1583 		_spi_transfer_delay_ns(default_delay_ns);
1584 	}
1585 }
1586 
spi_transfer_cs_change_delay_exec(struct spi_message * msg,struct spi_transfer * xfer)1587 void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
1588 						  struct spi_transfer *xfer)
1589 {
1590 	_spi_transfer_cs_change_delay(msg, xfer);
1591 }
1592 EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec);
1593 
1594 /*
1595  * spi_transfer_one_message - Default implementation of transfer_one_message()
1596  *
1597  * This is a standard implementation of transfer_one_message() for
1598  * drivers which implement a transfer_one() operation.  It provides
1599  * standard handling of delays and chip select management.
1600  */
spi_transfer_one_message(struct spi_controller * ctlr,struct spi_message * msg)1601 static int spi_transfer_one_message(struct spi_controller *ctlr,
1602 				    struct spi_message *msg)
1603 {
1604 	struct spi_transfer *xfer;
1605 	bool keep_cs = false;
1606 	int ret = 0;
1607 	struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1608 	struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1609 
1610 	xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
1611 	spi_set_cs(msg->spi, !xfer->cs_off, false);
1612 
1613 	SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1614 	SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1615 
1616 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1617 		trace_spi_transfer_start(msg, xfer);
1618 
1619 		spi_statistics_add_transfer_stats(statm, xfer, msg);
1620 		spi_statistics_add_transfer_stats(stats, xfer, msg);
1621 
1622 		if (!ctlr->ptp_sts_supported) {
1623 			xfer->ptp_sts_word_pre = 0;
1624 			ptp_read_system_prets(xfer->ptp_sts);
1625 		}
1626 
1627 		if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1628 			reinit_completion(&ctlr->xfer_completion);
1629 
1630 fallback_pio:
1631 			spi_dma_sync_for_device(ctlr, xfer);
1632 			ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1633 			if (ret < 0) {
1634 				spi_dma_sync_for_cpu(ctlr, xfer);
1635 
1636 				if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) &&
1637 				    (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1638 					__spi_unmap_msg(ctlr, msg);
1639 					ctlr->fallback = true;
1640 					xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1641 					goto fallback_pio;
1642 				}
1643 
1644 				SPI_STATISTICS_INCREMENT_FIELD(statm,
1645 							       errors);
1646 				SPI_STATISTICS_INCREMENT_FIELD(stats,
1647 							       errors);
1648 				dev_err(&msg->spi->dev,
1649 					"SPI transfer failed: %d\n", ret);
1650 				goto out;
1651 			}
1652 
1653 			if (ret > 0) {
1654 				ret = spi_transfer_wait(ctlr, msg, xfer);
1655 				if (ret < 0)
1656 					msg->status = ret;
1657 			}
1658 
1659 			spi_dma_sync_for_cpu(ctlr, xfer);
1660 		} else {
1661 			if (xfer->len)
1662 				dev_err(&msg->spi->dev,
1663 					"Bufferless transfer has length %u\n",
1664 					xfer->len);
1665 		}
1666 
1667 		if (!ctlr->ptp_sts_supported) {
1668 			ptp_read_system_postts(xfer->ptp_sts);
1669 			xfer->ptp_sts_word_post = xfer->len;
1670 		}
1671 
1672 		trace_spi_transfer_stop(msg, xfer);
1673 
1674 		if (msg->status != -EINPROGRESS)
1675 			goto out;
1676 
1677 		spi_transfer_delay_exec(xfer);
1678 
1679 		if (xfer->cs_change) {
1680 			if (list_is_last(&xfer->transfer_list,
1681 					 &msg->transfers)) {
1682 				keep_cs = true;
1683 			} else {
1684 				if (!xfer->cs_off)
1685 					spi_set_cs(msg->spi, false, false);
1686 				_spi_transfer_cs_change_delay(msg, xfer);
1687 				if (!list_next_entry(xfer, transfer_list)->cs_off)
1688 					spi_set_cs(msg->spi, true, false);
1689 			}
1690 		} else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
1691 			   xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
1692 			spi_set_cs(msg->spi, xfer->cs_off, false);
1693 		}
1694 
1695 		msg->actual_length += xfer->len;
1696 	}
1697 
1698 out:
1699 	if (ret != 0 || !keep_cs)
1700 		spi_set_cs(msg->spi, false, false);
1701 
1702 	if (msg->status == -EINPROGRESS)
1703 		msg->status = ret;
1704 
1705 	if (msg->status && ctlr->handle_err)
1706 		ctlr->handle_err(ctlr, msg);
1707 
1708 	spi_finalize_current_message(ctlr);
1709 
1710 	return ret;
1711 }
1712 
1713 /**
1714  * spi_finalize_current_transfer - report completion of a transfer
1715  * @ctlr: the controller reporting completion
1716  *
1717  * Called by SPI drivers using the core transfer_one_message()
1718  * implementation to notify it that the current interrupt driven
1719  * transfer has finished and the next one may be scheduled.
1720  */
spi_finalize_current_transfer(struct spi_controller * ctlr)1721 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1722 {
1723 	complete(&ctlr->xfer_completion);
1724 }
1725 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1726 
spi_idle_runtime_pm(struct spi_controller * ctlr)1727 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1728 {
1729 	if (ctlr->auto_runtime_pm) {
1730 		pm_runtime_mark_last_busy(ctlr->dev.parent);
1731 		pm_runtime_put_autosuspend(ctlr->dev.parent);
1732 	}
1733 }
1734 
__spi_pump_transfer_message(struct spi_controller * ctlr,struct spi_message * msg,bool was_busy)1735 static int __spi_pump_transfer_message(struct spi_controller *ctlr,
1736 		struct spi_message *msg, bool was_busy)
1737 {
1738 	struct spi_transfer *xfer;
1739 	int ret;
1740 
1741 	if (!was_busy && ctlr->auto_runtime_pm) {
1742 		ret = pm_runtime_get_sync(ctlr->dev.parent);
1743 		if (ret < 0) {
1744 			pm_runtime_put_noidle(ctlr->dev.parent);
1745 			dev_err(&ctlr->dev, "Failed to power device: %d\n",
1746 				ret);
1747 
1748 			msg->status = ret;
1749 			spi_finalize_current_message(ctlr);
1750 
1751 			return ret;
1752 		}
1753 	}
1754 
1755 	if (!was_busy)
1756 		trace_spi_controller_busy(ctlr);
1757 
1758 	if (!was_busy && ctlr->prepare_transfer_hardware) {
1759 		ret = ctlr->prepare_transfer_hardware(ctlr);
1760 		if (ret) {
1761 			dev_err(&ctlr->dev,
1762 				"failed to prepare transfer hardware: %d\n",
1763 				ret);
1764 
1765 			if (ctlr->auto_runtime_pm)
1766 				pm_runtime_put(ctlr->dev.parent);
1767 
1768 			msg->status = ret;
1769 			spi_finalize_current_message(ctlr);
1770 
1771 			return ret;
1772 		}
1773 	}
1774 
1775 	trace_spi_message_start(msg);
1776 
1777 	if (ctlr->prepare_message) {
1778 		ret = ctlr->prepare_message(ctlr, msg);
1779 		if (ret) {
1780 			dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1781 				ret);
1782 			msg->status = ret;
1783 			spi_finalize_current_message(ctlr);
1784 			return ret;
1785 		}
1786 		msg->prepared = true;
1787 	}
1788 
1789 	ret = spi_map_msg(ctlr, msg);
1790 	if (ret) {
1791 		msg->status = ret;
1792 		spi_finalize_current_message(ctlr);
1793 		return ret;
1794 	}
1795 
1796 	if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1797 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1798 			xfer->ptp_sts_word_pre = 0;
1799 			ptp_read_system_prets(xfer->ptp_sts);
1800 		}
1801 	}
1802 
1803 	/*
1804 	 * Drivers implementation of transfer_one_message() must arrange for
1805 	 * spi_finalize_current_message() to get called. Most drivers will do
1806 	 * this in the calling context, but some don't. For those cases, a
1807 	 * completion is used to guarantee that this function does not return
1808 	 * until spi_finalize_current_message() is done accessing
1809 	 * ctlr->cur_msg.
1810 	 * Use of the following two flags enable to opportunistically skip the
1811 	 * use of the completion since its use involves expensive spin locks.
1812 	 * In case of a race with the context that calls
1813 	 * spi_finalize_current_message() the completion will always be used,
1814 	 * due to strict ordering of these flags using barriers.
1815 	 */
1816 	WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1817 	WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1818 	reinit_completion(&ctlr->cur_msg_completion);
1819 	smp_wmb(); /* Make these available to spi_finalize_current_message() */
1820 
1821 	ret = ctlr->transfer_one_message(ctlr, msg);
1822 	if (ret) {
1823 		dev_err(&ctlr->dev,
1824 			"failed to transfer one message from queue\n");
1825 		return ret;
1826 	}
1827 
1828 	WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1829 	smp_mb(); /* See spi_finalize_current_message()... */
1830 	if (READ_ONCE(ctlr->cur_msg_incomplete))
1831 		wait_for_completion(&ctlr->cur_msg_completion);
1832 
1833 	return 0;
1834 }
1835 
1836 /**
1837  * __spi_pump_messages - function which processes SPI message queue
1838  * @ctlr: controller to process queue for
1839  * @in_kthread: true if we are in the context of the message pump thread
1840  *
1841  * This function checks if there is any SPI message in the queue that
1842  * needs processing and if so call out to the driver to initialize hardware
1843  * and transfer each message.
1844  *
1845  * Note that it is called both from the kthread itself and also from
1846  * inside spi_sync(); the queue extraction handling at the top of the
1847  * function should deal with this safely.
1848  */
__spi_pump_messages(struct spi_controller * ctlr,bool in_kthread)1849 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1850 {
1851 	struct spi_message *msg;
1852 	bool was_busy = false;
1853 	unsigned long flags;
1854 	int ret;
1855 
1856 	/* Take the I/O mutex */
1857 	mutex_lock(&ctlr->io_mutex);
1858 
1859 	/* Lock queue */
1860 	spin_lock_irqsave(&ctlr->queue_lock, flags);
1861 
1862 	/* Make sure we are not already running a message */
1863 	if (ctlr->cur_msg)
1864 		goto out_unlock;
1865 
1866 	/* Check if the queue is idle */
1867 	if (list_empty(&ctlr->queue) || !ctlr->running) {
1868 		if (!ctlr->busy)
1869 			goto out_unlock;
1870 
1871 		/* Defer any non-atomic teardown to the thread */
1872 		if (!in_kthread) {
1873 			if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1874 			    !ctlr->unprepare_transfer_hardware) {
1875 				spi_idle_runtime_pm(ctlr);
1876 				ctlr->busy = false;
1877 				ctlr->queue_empty = true;
1878 				trace_spi_controller_idle(ctlr);
1879 			} else {
1880 				kthread_queue_work(ctlr->kworker,
1881 						   &ctlr->pump_messages);
1882 			}
1883 			goto out_unlock;
1884 		}
1885 
1886 		ctlr->busy = false;
1887 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1888 
1889 		kfree(ctlr->dummy_rx);
1890 		ctlr->dummy_rx = NULL;
1891 		kfree(ctlr->dummy_tx);
1892 		ctlr->dummy_tx = NULL;
1893 		if (ctlr->unprepare_transfer_hardware &&
1894 		    ctlr->unprepare_transfer_hardware(ctlr))
1895 			dev_err(&ctlr->dev,
1896 				"failed to unprepare transfer hardware\n");
1897 		spi_idle_runtime_pm(ctlr);
1898 		trace_spi_controller_idle(ctlr);
1899 
1900 		spin_lock_irqsave(&ctlr->queue_lock, flags);
1901 		ctlr->queue_empty = true;
1902 		goto out_unlock;
1903 	}
1904 
1905 	/* Extract head of queue */
1906 	msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1907 	ctlr->cur_msg = msg;
1908 
1909 	list_del_init(&msg->queue);
1910 	if (ctlr->busy)
1911 		was_busy = true;
1912 	else
1913 		ctlr->busy = true;
1914 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1915 
1916 	ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
1917 	kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1918 
1919 	ctlr->cur_msg = NULL;
1920 	ctlr->fallback = false;
1921 
1922 	mutex_unlock(&ctlr->io_mutex);
1923 
1924 	/* Prod the scheduler in case transfer_one() was busy waiting */
1925 	if (!ret)
1926 		cond_resched();
1927 	return;
1928 
1929 out_unlock:
1930 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1931 	mutex_unlock(&ctlr->io_mutex);
1932 }
1933 
1934 /**
1935  * spi_pump_messages - kthread work function which processes spi message queue
1936  * @work: pointer to kthread work struct contained in the controller struct
1937  */
spi_pump_messages(struct kthread_work * work)1938 static void spi_pump_messages(struct kthread_work *work)
1939 {
1940 	struct spi_controller *ctlr =
1941 		container_of(work, struct spi_controller, pump_messages);
1942 
1943 	__spi_pump_messages(ctlr, true);
1944 }
1945 
1946 /**
1947  * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1948  * @ctlr: Pointer to the spi_controller structure of the driver
1949  * @xfer: Pointer to the transfer being timestamped
1950  * @progress: How many words (not bytes) have been transferred so far
1951  * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1952  *	      transfer, for less jitter in time measurement. Only compatible
1953  *	      with PIO drivers. If true, must follow up with
1954  *	      spi_take_timestamp_post or otherwise system will crash.
1955  *	      WARNING: for fully predictable results, the CPU frequency must
1956  *	      also be under control (governor).
1957  *
1958  * This is a helper for drivers to collect the beginning of the TX timestamp
1959  * for the requested byte from the SPI transfer. The frequency with which this
1960  * function must be called (once per word, once for the whole transfer, once
1961  * per batch of words etc) is arbitrary as long as the @tx buffer offset is
1962  * greater than or equal to the requested byte at the time of the call. The
1963  * timestamp is only taken once, at the first such call. It is assumed that
1964  * the driver advances its @tx buffer pointer monotonically.
1965  */
spi_take_timestamp_pre(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1966 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1967 			    struct spi_transfer *xfer,
1968 			    size_t progress, bool irqs_off)
1969 {
1970 	if (!xfer->ptp_sts)
1971 		return;
1972 
1973 	if (xfer->timestamped)
1974 		return;
1975 
1976 	if (progress > xfer->ptp_sts_word_pre)
1977 		return;
1978 
1979 	/* Capture the resolution of the timestamp */
1980 	xfer->ptp_sts_word_pre = progress;
1981 
1982 	if (irqs_off) {
1983 		local_irq_save(ctlr->irq_flags);
1984 		preempt_disable();
1985 	}
1986 
1987 	ptp_read_system_prets(xfer->ptp_sts);
1988 }
1989 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1990 
1991 /**
1992  * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1993  * @ctlr: Pointer to the spi_controller structure of the driver
1994  * @xfer: Pointer to the transfer being timestamped
1995  * @progress: How many words (not bytes) have been transferred so far
1996  * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1997  *
1998  * This is a helper for drivers to collect the end of the TX timestamp for
1999  * the requested byte from the SPI transfer. Can be called with an arbitrary
2000  * frequency: only the first call where @tx exceeds or is equal to the
2001  * requested word will be timestamped.
2002  */
spi_take_timestamp_post(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)2003 void spi_take_timestamp_post(struct spi_controller *ctlr,
2004 			     struct spi_transfer *xfer,
2005 			     size_t progress, bool irqs_off)
2006 {
2007 	if (!xfer->ptp_sts)
2008 		return;
2009 
2010 	if (xfer->timestamped)
2011 		return;
2012 
2013 	if (progress < xfer->ptp_sts_word_post)
2014 		return;
2015 
2016 	ptp_read_system_postts(xfer->ptp_sts);
2017 
2018 	if (irqs_off) {
2019 		local_irq_restore(ctlr->irq_flags);
2020 		preempt_enable();
2021 	}
2022 
2023 	/* Capture the resolution of the timestamp */
2024 	xfer->ptp_sts_word_post = progress;
2025 
2026 	xfer->timestamped = 1;
2027 }
2028 EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
2029 
2030 /**
2031  * spi_set_thread_rt - set the controller to pump at realtime priority
2032  * @ctlr: controller to boost priority of
2033  *
2034  * This can be called because the controller requested realtime priority
2035  * (by setting the ->rt value before calling spi_register_controller()) or
2036  * because a device on the bus said that its transfers needed realtime
2037  * priority.
2038  *
2039  * NOTE: at the moment if any device on a bus says it needs realtime then
2040  * the thread will be at realtime priority for all transfers on that
2041  * controller.  If this eventually becomes a problem we may see if we can
2042  * find a way to boost the priority only temporarily during relevant
2043  * transfers.
2044  */
spi_set_thread_rt(struct spi_controller * ctlr)2045 static void spi_set_thread_rt(struct spi_controller *ctlr)
2046 {
2047 	dev_info(&ctlr->dev,
2048 		"will run message pump with realtime priority\n");
2049 	sched_set_fifo(ctlr->kworker->task);
2050 }
2051 
spi_init_queue(struct spi_controller * ctlr)2052 static int spi_init_queue(struct spi_controller *ctlr)
2053 {
2054 	ctlr->running = false;
2055 	ctlr->busy = false;
2056 	ctlr->queue_empty = true;
2057 
2058 	ctlr->kworker = kthread_run_worker(0, dev_name(&ctlr->dev));
2059 	if (IS_ERR(ctlr->kworker)) {
2060 		dev_err(&ctlr->dev, "failed to create message pump kworker\n");
2061 		return PTR_ERR(ctlr->kworker);
2062 	}
2063 
2064 	kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
2065 
2066 	/*
2067 	 * Controller config will indicate if this controller should run the
2068 	 * message pump with high (realtime) priority to reduce the transfer
2069 	 * latency on the bus by minimising the delay between a transfer
2070 	 * request and the scheduling of the message pump thread. Without this
2071 	 * setting the message pump thread will remain at default priority.
2072 	 */
2073 	if (ctlr->rt)
2074 		spi_set_thread_rt(ctlr);
2075 
2076 	return 0;
2077 }
2078 
2079 /**
2080  * spi_get_next_queued_message() - called by driver to check for queued
2081  * messages
2082  * @ctlr: the controller to check for queued messages
2083  *
2084  * If there are more messages in the queue, the next message is returned from
2085  * this call.
2086  *
2087  * Return: the next message in the queue, else NULL if the queue is empty.
2088  */
spi_get_next_queued_message(struct spi_controller * ctlr)2089 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
2090 {
2091 	struct spi_message *next;
2092 	unsigned long flags;
2093 
2094 	/* Get a pointer to the next message, if any */
2095 	spin_lock_irqsave(&ctlr->queue_lock, flags);
2096 	next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
2097 					queue);
2098 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2099 
2100 	return next;
2101 }
2102 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
2103 
2104 /*
2105  * __spi_unoptimize_message - shared implementation of spi_unoptimize_message()
2106  *                            and spi_maybe_unoptimize_message()
2107  * @msg: the message to unoptimize
2108  *
2109  * Peripheral drivers should use spi_unoptimize_message() and callers inside
2110  * core should use spi_maybe_unoptimize_message() rather than calling this
2111  * function directly.
2112  *
2113  * It is not valid to call this on a message that is not currently optimized.
2114  */
__spi_unoptimize_message(struct spi_message * msg)2115 static void __spi_unoptimize_message(struct spi_message *msg)
2116 {
2117 	struct spi_controller *ctlr = msg->spi->controller;
2118 
2119 	if (ctlr->unoptimize_message)
2120 		ctlr->unoptimize_message(msg);
2121 
2122 	spi_res_release(ctlr, msg);
2123 
2124 	msg->optimized = false;
2125 	msg->opt_state = NULL;
2126 }
2127 
2128 /*
2129  * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral
2130  * @msg: the message to unoptimize
2131  *
2132  * This function is used to unoptimize a message if and only if it was
2133  * optimized by the core (via spi_maybe_optimize_message()).
2134  */
spi_maybe_unoptimize_message(struct spi_message * msg)2135 static void spi_maybe_unoptimize_message(struct spi_message *msg)
2136 {
2137 	if (!msg->pre_optimized && msg->optimized &&
2138 	    !msg->spi->controller->defer_optimize_message)
2139 		__spi_unoptimize_message(msg);
2140 }
2141 
2142 /**
2143  * spi_finalize_current_message() - the current message is complete
2144  * @ctlr: the controller to return the message to
2145  *
2146  * Called by the driver to notify the core that the message in the front of the
2147  * queue is complete and can be removed from the queue.
2148  */
spi_finalize_current_message(struct spi_controller * ctlr)2149 void spi_finalize_current_message(struct spi_controller *ctlr)
2150 {
2151 	struct spi_transfer *xfer;
2152 	struct spi_message *mesg;
2153 	int ret;
2154 
2155 	mesg = ctlr->cur_msg;
2156 
2157 	if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2158 		list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
2159 			ptp_read_system_postts(xfer->ptp_sts);
2160 			xfer->ptp_sts_word_post = xfer->len;
2161 		}
2162 	}
2163 
2164 	if (unlikely(ctlr->ptp_sts_supported))
2165 		list_for_each_entry(xfer, &mesg->transfers, transfer_list)
2166 			WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
2167 
2168 	spi_unmap_msg(ctlr, mesg);
2169 
2170 	if (mesg->prepared && ctlr->unprepare_message) {
2171 		ret = ctlr->unprepare_message(ctlr, mesg);
2172 		if (ret) {
2173 			dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2174 				ret);
2175 		}
2176 	}
2177 
2178 	mesg->prepared = false;
2179 
2180 	spi_maybe_unoptimize_message(mesg);
2181 
2182 	WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2183 	smp_mb(); /* See __spi_pump_transfer_message()... */
2184 	if (READ_ONCE(ctlr->cur_msg_need_completion))
2185 		complete(&ctlr->cur_msg_completion);
2186 
2187 	trace_spi_message_done(mesg);
2188 
2189 	mesg->state = NULL;
2190 	if (mesg->complete)
2191 		mesg->complete(mesg->context);
2192 }
2193 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
2194 
spi_start_queue(struct spi_controller * ctlr)2195 static int spi_start_queue(struct spi_controller *ctlr)
2196 {
2197 	unsigned long flags;
2198 
2199 	spin_lock_irqsave(&ctlr->queue_lock, flags);
2200 
2201 	if (ctlr->running || ctlr->busy) {
2202 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2203 		return -EBUSY;
2204 	}
2205 
2206 	ctlr->running = true;
2207 	ctlr->cur_msg = NULL;
2208 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2209 
2210 	kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2211 
2212 	return 0;
2213 }
2214 
spi_stop_queue(struct spi_controller * ctlr)2215 static int spi_stop_queue(struct spi_controller *ctlr)
2216 {
2217 	unsigned int limit = 500;
2218 	unsigned long flags;
2219 
2220 	/*
2221 	 * This is a bit lame, but is optimized for the common execution path.
2222 	 * A wait_queue on the ctlr->busy could be used, but then the common
2223 	 * execution path (pump_messages) would be required to call wake_up or
2224 	 * friends on every SPI message. Do this instead.
2225 	 */
2226 	do {
2227 		spin_lock_irqsave(&ctlr->queue_lock, flags);
2228 		if (list_empty(&ctlr->queue) && !ctlr->busy) {
2229 			ctlr->running = false;
2230 			spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2231 			return 0;
2232 		}
2233 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2234 		usleep_range(10000, 11000);
2235 	} while (--limit);
2236 
2237 	return -EBUSY;
2238 }
2239 
spi_destroy_queue(struct spi_controller * ctlr)2240 static int spi_destroy_queue(struct spi_controller *ctlr)
2241 {
2242 	int ret;
2243 
2244 	ret = spi_stop_queue(ctlr);
2245 
2246 	/*
2247 	 * kthread_flush_worker will block until all work is done.
2248 	 * If the reason that stop_queue timed out is that the work will never
2249 	 * finish, then it does no good to call flush/stop thread, so
2250 	 * return anyway.
2251 	 */
2252 	if (ret) {
2253 		dev_err(&ctlr->dev, "problem destroying queue\n");
2254 		return ret;
2255 	}
2256 
2257 	kthread_destroy_worker(ctlr->kworker);
2258 
2259 	return 0;
2260 }
2261 
__spi_queued_transfer(struct spi_device * spi,struct spi_message * msg,bool need_pump)2262 static int __spi_queued_transfer(struct spi_device *spi,
2263 				 struct spi_message *msg,
2264 				 bool need_pump)
2265 {
2266 	struct spi_controller *ctlr = spi->controller;
2267 	unsigned long flags;
2268 
2269 	spin_lock_irqsave(&ctlr->queue_lock, flags);
2270 
2271 	if (!ctlr->running) {
2272 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2273 		return -ESHUTDOWN;
2274 	}
2275 	msg->actual_length = 0;
2276 	msg->status = -EINPROGRESS;
2277 
2278 	list_add_tail(&msg->queue, &ctlr->queue);
2279 	ctlr->queue_empty = false;
2280 	if (!ctlr->busy && need_pump)
2281 		kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2282 
2283 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2284 	return 0;
2285 }
2286 
2287 /**
2288  * spi_queued_transfer - transfer function for queued transfers
2289  * @spi: SPI device which is requesting transfer
2290  * @msg: SPI message which is to handled is queued to driver queue
2291  *
2292  * Return: zero on success, else a negative error code.
2293  */
spi_queued_transfer(struct spi_device * spi,struct spi_message * msg)2294 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2295 {
2296 	return __spi_queued_transfer(spi, msg, true);
2297 }
2298 
spi_controller_initialize_queue(struct spi_controller * ctlr)2299 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2300 {
2301 	int ret;
2302 
2303 	ctlr->transfer = spi_queued_transfer;
2304 	if (!ctlr->transfer_one_message)
2305 		ctlr->transfer_one_message = spi_transfer_one_message;
2306 
2307 	/* Initialize and start queue */
2308 	ret = spi_init_queue(ctlr);
2309 	if (ret) {
2310 		dev_err(&ctlr->dev, "problem initializing queue\n");
2311 		goto err_init_queue;
2312 	}
2313 	ctlr->queued = true;
2314 	ret = spi_start_queue(ctlr);
2315 	if (ret) {
2316 		dev_err(&ctlr->dev, "problem starting queue\n");
2317 		goto err_start_queue;
2318 	}
2319 
2320 	return 0;
2321 
2322 err_start_queue:
2323 	spi_destroy_queue(ctlr);
2324 err_init_queue:
2325 	return ret;
2326 }
2327 
2328 /**
2329  * spi_flush_queue - Send all pending messages in the queue from the callers'
2330  *		     context
2331  * @ctlr: controller to process queue for
2332  *
2333  * This should be used when one wants to ensure all pending messages have been
2334  * sent before doing something. Is used by the spi-mem code to make sure SPI
2335  * memory operations do not preempt regular SPI transfers that have been queued
2336  * before the spi-mem operation.
2337  */
spi_flush_queue(struct spi_controller * ctlr)2338 void spi_flush_queue(struct spi_controller *ctlr)
2339 {
2340 	if (ctlr->transfer == spi_queued_transfer)
2341 		__spi_pump_messages(ctlr, false);
2342 }
2343 
2344 /*-------------------------------------------------------------------------*/
2345 
2346 #if defined(CONFIG_OF)
of_spi_parse_dt_cs_delay(struct device_node * nc,struct spi_delay * delay,const char * prop)2347 static void of_spi_parse_dt_cs_delay(struct device_node *nc,
2348 				     struct spi_delay *delay, const char *prop)
2349 {
2350 	u32 value;
2351 
2352 	if (!of_property_read_u32(nc, prop, &value)) {
2353 		if (value > U16_MAX) {
2354 			delay->value = DIV_ROUND_UP(value, 1000);
2355 			delay->unit = SPI_DELAY_UNIT_USECS;
2356 		} else {
2357 			delay->value = value;
2358 			delay->unit = SPI_DELAY_UNIT_NSECS;
2359 		}
2360 	}
2361 }
2362 
of_spi_parse_dt(struct spi_controller * ctlr,struct spi_device * spi,struct device_node * nc)2363 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2364 			   struct device_node *nc)
2365 {
2366 	u32 value, cs[SPI_CS_CNT_MAX];
2367 	int rc, idx;
2368 
2369 	/* Mode (clock phase/polarity/etc.) */
2370 	if (of_property_read_bool(nc, "spi-cpha"))
2371 		spi->mode |= SPI_CPHA;
2372 	if (of_property_read_bool(nc, "spi-cpol"))
2373 		spi->mode |= SPI_CPOL;
2374 	if (of_property_read_bool(nc, "spi-3wire"))
2375 		spi->mode |= SPI_3WIRE;
2376 	if (of_property_read_bool(nc, "spi-lsb-first"))
2377 		spi->mode |= SPI_LSB_FIRST;
2378 	if (of_property_read_bool(nc, "spi-cs-high"))
2379 		spi->mode |= SPI_CS_HIGH;
2380 
2381 	/* Device DUAL/QUAD mode */
2382 	if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2383 		switch (value) {
2384 		case 0:
2385 			spi->mode |= SPI_NO_TX;
2386 			break;
2387 		case 1:
2388 			break;
2389 		case 2:
2390 			spi->mode |= SPI_TX_DUAL;
2391 			break;
2392 		case 4:
2393 			spi->mode |= SPI_TX_QUAD;
2394 			break;
2395 		case 8:
2396 			spi->mode |= SPI_TX_OCTAL;
2397 			break;
2398 		default:
2399 			dev_warn(&ctlr->dev,
2400 				"spi-tx-bus-width %d not supported\n",
2401 				value);
2402 			break;
2403 		}
2404 	}
2405 
2406 	if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2407 		switch (value) {
2408 		case 0:
2409 			spi->mode |= SPI_NO_RX;
2410 			break;
2411 		case 1:
2412 			break;
2413 		case 2:
2414 			spi->mode |= SPI_RX_DUAL;
2415 			break;
2416 		case 4:
2417 			spi->mode |= SPI_RX_QUAD;
2418 			break;
2419 		case 8:
2420 			spi->mode |= SPI_RX_OCTAL;
2421 			break;
2422 		default:
2423 			dev_warn(&ctlr->dev,
2424 				"spi-rx-bus-width %d not supported\n",
2425 				value);
2426 			break;
2427 		}
2428 	}
2429 
2430 	if (spi_controller_is_target(ctlr)) {
2431 		if (!of_node_name_eq(nc, "slave")) {
2432 			dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2433 				nc);
2434 			return -EINVAL;
2435 		}
2436 		return 0;
2437 	}
2438 
2439 	if (ctlr->num_chipselect > SPI_CS_CNT_MAX) {
2440 		dev_err(&ctlr->dev, "No. of CS is more than max. no. of supported CS\n");
2441 		return -EINVAL;
2442 	}
2443 
2444 	spi_set_all_cs_unused(spi);
2445 
2446 	/* Device address */
2447 	rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1,
2448 						 SPI_CS_CNT_MAX);
2449 	if (rc < 0) {
2450 		dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2451 			nc, rc);
2452 		return rc;
2453 	}
2454 	if (rc > ctlr->num_chipselect) {
2455 		dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n",
2456 			nc, rc);
2457 		return rc;
2458 	}
2459 	if ((of_property_present(nc, "parallel-memories")) &&
2460 	    (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) {
2461 		dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n");
2462 		return -EINVAL;
2463 	}
2464 	for (idx = 0; idx < rc; idx++)
2465 		spi_set_chipselect(spi, idx, cs[idx]);
2466 
2467 	/*
2468 	 * By default spi->chip_select[0] will hold the physical CS number,
2469 	 * so set bit 0 in spi->cs_index_mask.
2470 	 */
2471 	spi->cs_index_mask = BIT(0);
2472 
2473 	/* Device speed */
2474 	if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2475 		spi->max_speed_hz = value;
2476 
2477 	/* Device CS delays */
2478 	of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
2479 	of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
2480 	of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
2481 
2482 	return 0;
2483 }
2484 
2485 static struct spi_device *
of_register_spi_device(struct spi_controller * ctlr,struct device_node * nc)2486 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2487 {
2488 	struct spi_device *spi;
2489 	int rc;
2490 
2491 	/* Alloc an spi_device */
2492 	spi = spi_alloc_device(ctlr);
2493 	if (!spi) {
2494 		dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2495 		rc = -ENOMEM;
2496 		goto err_out;
2497 	}
2498 
2499 	/* Select device driver */
2500 	rc = of_alias_from_compatible(nc, spi->modalias,
2501 				      sizeof(spi->modalias));
2502 	if (rc < 0) {
2503 		dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2504 		goto err_out;
2505 	}
2506 
2507 	rc = of_spi_parse_dt(ctlr, spi, nc);
2508 	if (rc)
2509 		goto err_out;
2510 
2511 	/* Store a pointer to the node in the device structure */
2512 	of_node_get(nc);
2513 
2514 	device_set_node(&spi->dev, of_fwnode_handle(nc));
2515 
2516 	/* Register the new device */
2517 	rc = spi_add_device(spi);
2518 	if (rc) {
2519 		dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2520 		goto err_of_node_put;
2521 	}
2522 
2523 	return spi;
2524 
2525 err_of_node_put:
2526 	of_node_put(nc);
2527 err_out:
2528 	spi_dev_put(spi);
2529 	return ERR_PTR(rc);
2530 }
2531 
2532 /**
2533  * of_register_spi_devices() - Register child devices onto the SPI bus
2534  * @ctlr:	Pointer to spi_controller device
2535  *
2536  * Registers an spi_device for each child node of controller node which
2537  * represents a valid SPI slave.
2538  */
of_register_spi_devices(struct spi_controller * ctlr)2539 static void of_register_spi_devices(struct spi_controller *ctlr)
2540 {
2541 	struct spi_device *spi;
2542 	struct device_node *nc;
2543 
2544 	for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2545 		if (of_node_test_and_set_flag(nc, OF_POPULATED))
2546 			continue;
2547 		spi = of_register_spi_device(ctlr, nc);
2548 		if (IS_ERR(spi)) {
2549 			dev_warn(&ctlr->dev,
2550 				 "Failed to create SPI device for %pOF\n", nc);
2551 			of_node_clear_flag(nc, OF_POPULATED);
2552 		}
2553 	}
2554 }
2555 #else
of_register_spi_devices(struct spi_controller * ctlr)2556 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2557 #endif
2558 
2559 /**
2560  * spi_new_ancillary_device() - Register ancillary SPI device
2561  * @spi:         Pointer to the main SPI device registering the ancillary device
2562  * @chip_select: Chip Select of the ancillary device
2563  *
2564  * Register an ancillary SPI device; for example some chips have a chip-select
2565  * for normal device usage and another one for setup/firmware upload.
2566  *
2567  * This may only be called from main SPI device's probe routine.
2568  *
2569  * Return: 0 on success; negative errno on failure
2570  */
spi_new_ancillary_device(struct spi_device * spi,u8 chip_select)2571 struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2572 					     u8 chip_select)
2573 {
2574 	struct spi_controller *ctlr = spi->controller;
2575 	struct spi_device *ancillary;
2576 	int rc;
2577 
2578 	/* Alloc an spi_device */
2579 	ancillary = spi_alloc_device(ctlr);
2580 	if (!ancillary) {
2581 		rc = -ENOMEM;
2582 		goto err_out;
2583 	}
2584 
2585 	strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2586 
2587 	/* Use provided chip-select for ancillary device */
2588 	spi_set_all_cs_unused(ancillary);
2589 	spi_set_chipselect(ancillary, 0, chip_select);
2590 
2591 	/* Take over SPI mode/speed from SPI main device */
2592 	ancillary->max_speed_hz = spi->max_speed_hz;
2593 	ancillary->mode = spi->mode;
2594 	/*
2595 	 * By default spi->chip_select[0] will hold the physical CS number,
2596 	 * so set bit 0 in spi->cs_index_mask.
2597 	 */
2598 	ancillary->cs_index_mask = BIT(0);
2599 
2600 	WARN_ON(!mutex_is_locked(&ctlr->add_lock));
2601 
2602 	/* Register the new device */
2603 	rc = __spi_add_device(ancillary);
2604 	if (rc) {
2605 		dev_err(&spi->dev, "failed to register ancillary device\n");
2606 		goto err_out;
2607 	}
2608 
2609 	return ancillary;
2610 
2611 err_out:
2612 	spi_dev_put(ancillary);
2613 	return ERR_PTR(rc);
2614 }
2615 EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2616 
2617 #ifdef CONFIG_ACPI
2618 struct acpi_spi_lookup {
2619 	struct spi_controller 	*ctlr;
2620 	u32			max_speed_hz;
2621 	u32			mode;
2622 	int			irq;
2623 	u8			bits_per_word;
2624 	u8			chip_select;
2625 	int			n;
2626 	int			index;
2627 };
2628 
acpi_spi_count(struct acpi_resource * ares,void * data)2629 static int acpi_spi_count(struct acpi_resource *ares, void *data)
2630 {
2631 	struct acpi_resource_spi_serialbus *sb;
2632 	int *count = data;
2633 
2634 	if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2635 		return 1;
2636 
2637 	sb = &ares->data.spi_serial_bus;
2638 	if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2639 		return 1;
2640 
2641 	*count = *count + 1;
2642 
2643 	return 1;
2644 }
2645 
2646 /**
2647  * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2648  * @adev:	ACPI device
2649  *
2650  * Return: the number of SpiSerialBus resources in the ACPI-device's
2651  * resource-list; or a negative error code.
2652  */
acpi_spi_count_resources(struct acpi_device * adev)2653 int acpi_spi_count_resources(struct acpi_device *adev)
2654 {
2655 	LIST_HEAD(r);
2656 	int count = 0;
2657 	int ret;
2658 
2659 	ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2660 	if (ret < 0)
2661 		return ret;
2662 
2663 	acpi_dev_free_resource_list(&r);
2664 
2665 	return count;
2666 }
2667 EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2668 
acpi_spi_parse_apple_properties(struct acpi_device * dev,struct acpi_spi_lookup * lookup)2669 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2670 					    struct acpi_spi_lookup *lookup)
2671 {
2672 	const union acpi_object *obj;
2673 
2674 	if (!x86_apple_machine)
2675 		return;
2676 
2677 	if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2678 	    && obj->buffer.length >= 4)
2679 		lookup->max_speed_hz  = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2680 
2681 	if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2682 	    && obj->buffer.length == 8)
2683 		lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2684 
2685 	if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2686 	    && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2687 		lookup->mode |= SPI_LSB_FIRST;
2688 
2689 	if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2690 	    && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2691 		lookup->mode |= SPI_CPOL;
2692 
2693 	if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2694 	    && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2695 		lookup->mode |= SPI_CPHA;
2696 }
2697 
acpi_spi_add_resource(struct acpi_resource * ares,void * data)2698 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2699 {
2700 	struct acpi_spi_lookup *lookup = data;
2701 	struct spi_controller *ctlr = lookup->ctlr;
2702 
2703 	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2704 		struct acpi_resource_spi_serialbus *sb;
2705 		acpi_handle parent_handle;
2706 		acpi_status status;
2707 
2708 		sb = &ares->data.spi_serial_bus;
2709 		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2710 
2711 			if (lookup->index != -1 && lookup->n++ != lookup->index)
2712 				return 1;
2713 
2714 			status = acpi_get_handle(NULL,
2715 						 sb->resource_source.string_ptr,
2716 						 &parent_handle);
2717 
2718 			if (ACPI_FAILURE(status))
2719 				return -ENODEV;
2720 
2721 			if (ctlr) {
2722 				if (!device_match_acpi_handle(ctlr->dev.parent, parent_handle))
2723 					return -ENODEV;
2724 			} else {
2725 				struct acpi_device *adev;
2726 
2727 				adev = acpi_fetch_acpi_dev(parent_handle);
2728 				if (!adev)
2729 					return -ENODEV;
2730 
2731 				ctlr = acpi_spi_find_controller_by_adev(adev);
2732 				if (!ctlr)
2733 					return -EPROBE_DEFER;
2734 
2735 				lookup->ctlr = ctlr;
2736 			}
2737 
2738 			/*
2739 			 * ACPI DeviceSelection numbering is handled by the
2740 			 * host controller driver in Windows and can vary
2741 			 * from driver to driver. In Linux we always expect
2742 			 * 0 .. max - 1 so we need to ask the driver to
2743 			 * translate between the two schemes.
2744 			 */
2745 			if (ctlr->fw_translate_cs) {
2746 				int cs = ctlr->fw_translate_cs(ctlr,
2747 						sb->device_selection);
2748 				if (cs < 0)
2749 					return cs;
2750 				lookup->chip_select = cs;
2751 			} else {
2752 				lookup->chip_select = sb->device_selection;
2753 			}
2754 
2755 			lookup->max_speed_hz = sb->connection_speed;
2756 			lookup->bits_per_word = sb->data_bit_length;
2757 
2758 			if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2759 				lookup->mode |= SPI_CPHA;
2760 			if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2761 				lookup->mode |= SPI_CPOL;
2762 			if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2763 				lookup->mode |= SPI_CS_HIGH;
2764 		}
2765 	} else if (lookup->irq < 0) {
2766 		struct resource r;
2767 
2768 		if (acpi_dev_resource_interrupt(ares, 0, &r))
2769 			lookup->irq = r.start;
2770 	}
2771 
2772 	/* Always tell the ACPI core to skip this resource */
2773 	return 1;
2774 }
2775 
2776 /**
2777  * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2778  * @ctlr: controller to which the spi device belongs
2779  * @adev: ACPI Device for the spi device
2780  * @index: Index of the spi resource inside the ACPI Node
2781  *
2782  * This should be used to allocate a new SPI device from and ACPI Device node.
2783  * The caller is responsible for calling spi_add_device to register the SPI device.
2784  *
2785  * If ctlr is set to NULL, the Controller for the SPI device will be looked up
2786  * using the resource.
2787  * If index is set to -1, index is not used.
2788  * Note: If index is -1, ctlr must be set.
2789  *
2790  * Return: a pointer to the new device, or ERR_PTR on error.
2791  */
acpi_spi_device_alloc(struct spi_controller * ctlr,struct acpi_device * adev,int index)2792 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2793 					 struct acpi_device *adev,
2794 					 int index)
2795 {
2796 	acpi_handle parent_handle = NULL;
2797 	struct list_head resource_list;
2798 	struct acpi_spi_lookup lookup = {};
2799 	struct spi_device *spi;
2800 	int ret;
2801 
2802 	if (!ctlr && index == -1)
2803 		return ERR_PTR(-EINVAL);
2804 
2805 	lookup.ctlr		= ctlr;
2806 	lookup.irq		= -1;
2807 	lookup.index		= index;
2808 	lookup.n		= 0;
2809 
2810 	INIT_LIST_HEAD(&resource_list);
2811 	ret = acpi_dev_get_resources(adev, &resource_list,
2812 				     acpi_spi_add_resource, &lookup);
2813 	acpi_dev_free_resource_list(&resource_list);
2814 
2815 	if (ret < 0)
2816 		/* Found SPI in _CRS but it points to another controller */
2817 		return ERR_PTR(ret);
2818 
2819 	if (!lookup.max_speed_hz &&
2820 	    ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2821 	    device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) {
2822 		/* Apple does not use _CRS but nested devices for SPI slaves */
2823 		acpi_spi_parse_apple_properties(adev, &lookup);
2824 	}
2825 
2826 	if (!lookup.max_speed_hz)
2827 		return ERR_PTR(-ENODEV);
2828 
2829 	spi = spi_alloc_device(lookup.ctlr);
2830 	if (!spi) {
2831 		dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2832 			dev_name(&adev->dev));
2833 		return ERR_PTR(-ENOMEM);
2834 	}
2835 
2836 	spi_set_all_cs_unused(spi);
2837 	spi_set_chipselect(spi, 0, lookup.chip_select);
2838 
2839 	ACPI_COMPANION_SET(&spi->dev, adev);
2840 	spi->max_speed_hz	= lookup.max_speed_hz;
2841 	spi->mode		|= lookup.mode;
2842 	spi->irq		= lookup.irq;
2843 	spi->bits_per_word	= lookup.bits_per_word;
2844 	/*
2845 	 * By default spi->chip_select[0] will hold the physical CS number,
2846 	 * so set bit 0 in spi->cs_index_mask.
2847 	 */
2848 	spi->cs_index_mask	= BIT(0);
2849 
2850 	return spi;
2851 }
2852 EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
2853 
acpi_register_spi_device(struct spi_controller * ctlr,struct acpi_device * adev)2854 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2855 					    struct acpi_device *adev)
2856 {
2857 	struct spi_device *spi;
2858 
2859 	if (acpi_bus_get_status(adev) || !adev->status.present ||
2860 	    acpi_device_enumerated(adev))
2861 		return AE_OK;
2862 
2863 	spi = acpi_spi_device_alloc(ctlr, adev, -1);
2864 	if (IS_ERR(spi)) {
2865 		if (PTR_ERR(spi) == -ENOMEM)
2866 			return AE_NO_MEMORY;
2867 		else
2868 			return AE_OK;
2869 	}
2870 
2871 	acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2872 			  sizeof(spi->modalias));
2873 
2874 	acpi_device_set_enumerated(adev);
2875 
2876 	adev->power.flags.ignore_parent = true;
2877 	if (spi_add_device(spi)) {
2878 		adev->power.flags.ignore_parent = false;
2879 		dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2880 			dev_name(&adev->dev));
2881 		spi_dev_put(spi);
2882 	}
2883 
2884 	return AE_OK;
2885 }
2886 
acpi_spi_add_device(acpi_handle handle,u32 level,void * data,void ** return_value)2887 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2888 				       void *data, void **return_value)
2889 {
2890 	struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
2891 	struct spi_controller *ctlr = data;
2892 
2893 	if (!adev)
2894 		return AE_OK;
2895 
2896 	return acpi_register_spi_device(ctlr, adev);
2897 }
2898 
2899 #define SPI_ACPI_ENUMERATE_MAX_DEPTH		32
2900 
acpi_register_spi_devices(struct spi_controller * ctlr)2901 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2902 {
2903 	acpi_status status;
2904 	acpi_handle handle;
2905 
2906 	handle = ACPI_HANDLE(ctlr->dev.parent);
2907 	if (!handle)
2908 		return;
2909 
2910 	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2911 				     SPI_ACPI_ENUMERATE_MAX_DEPTH,
2912 				     acpi_spi_add_device, NULL, ctlr, NULL);
2913 	if (ACPI_FAILURE(status))
2914 		dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2915 }
2916 #else
acpi_register_spi_devices(struct spi_controller * ctlr)2917 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2918 #endif /* CONFIG_ACPI */
2919 
spi_controller_release(struct device * dev)2920 static void spi_controller_release(struct device *dev)
2921 {
2922 	struct spi_controller *ctlr;
2923 
2924 	ctlr = container_of(dev, struct spi_controller, dev);
2925 	kfree(ctlr);
2926 }
2927 
2928 static const struct class spi_master_class = {
2929 	.name		= "spi_master",
2930 	.dev_release	= spi_controller_release,
2931 	.dev_groups	= spi_master_groups,
2932 };
2933 
2934 #ifdef CONFIG_SPI_SLAVE
2935 /**
2936  * spi_target_abort - abort the ongoing transfer request on an SPI slave
2937  *		     controller
2938  * @spi: device used for the current transfer
2939  */
spi_target_abort(struct spi_device * spi)2940 int spi_target_abort(struct spi_device *spi)
2941 {
2942 	struct spi_controller *ctlr = spi->controller;
2943 
2944 	if (spi_controller_is_target(ctlr) && ctlr->target_abort)
2945 		return ctlr->target_abort(ctlr);
2946 
2947 	return -ENOTSUPP;
2948 }
2949 EXPORT_SYMBOL_GPL(spi_target_abort);
2950 
slave_show(struct device * dev,struct device_attribute * attr,char * buf)2951 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2952 			  char *buf)
2953 {
2954 	struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2955 						   dev);
2956 	struct device *child;
2957 	int ret;
2958 
2959 	child = device_find_any_child(&ctlr->dev);
2960 	ret = sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
2961 	put_device(child);
2962 
2963 	return ret;
2964 }
2965 
slave_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2966 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2967 			   const char *buf, size_t count)
2968 {
2969 	struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2970 						   dev);
2971 	struct spi_device *spi;
2972 	struct device *child;
2973 	char name[32];
2974 	int rc;
2975 
2976 	rc = sscanf(buf, "%31s", name);
2977 	if (rc != 1 || !name[0])
2978 		return -EINVAL;
2979 
2980 	child = device_find_any_child(&ctlr->dev);
2981 	if (child) {
2982 		/* Remove registered slave */
2983 		device_unregister(child);
2984 		put_device(child);
2985 	}
2986 
2987 	if (strcmp(name, "(null)")) {
2988 		/* Register new slave */
2989 		spi = spi_alloc_device(ctlr);
2990 		if (!spi)
2991 			return -ENOMEM;
2992 
2993 		strscpy(spi->modalias, name, sizeof(spi->modalias));
2994 
2995 		rc = spi_add_device(spi);
2996 		if (rc) {
2997 			spi_dev_put(spi);
2998 			return rc;
2999 		}
3000 	}
3001 
3002 	return count;
3003 }
3004 
3005 static DEVICE_ATTR_RW(slave);
3006 
3007 static struct attribute *spi_slave_attrs[] = {
3008 	&dev_attr_slave.attr,
3009 	NULL,
3010 };
3011 
3012 static const struct attribute_group spi_slave_group = {
3013 	.attrs = spi_slave_attrs,
3014 };
3015 
3016 static const struct attribute_group *spi_slave_groups[] = {
3017 	&spi_controller_statistics_group,
3018 	&spi_slave_group,
3019 	NULL,
3020 };
3021 
3022 static const struct class spi_slave_class = {
3023 	.name		= "spi_slave",
3024 	.dev_release	= spi_controller_release,
3025 	.dev_groups	= spi_slave_groups,
3026 };
3027 #else
3028 extern struct class spi_slave_class;	/* dummy */
3029 #endif
3030 
3031 /**
3032  * __spi_alloc_controller - allocate an SPI master or slave controller
3033  * @dev: the controller, possibly using the platform_bus
3034  * @size: how much zeroed driver-private data to allocate; the pointer to this
3035  *	memory is in the driver_data field of the returned device, accessible
3036  *	with spi_controller_get_devdata(); the memory is cacheline aligned;
3037  *	drivers granting DMA access to portions of their private data need to
3038  *	round up @size using ALIGN(size, dma_get_cache_alignment()).
3039  * @slave: flag indicating whether to allocate an SPI master (false) or SPI
3040  *	slave (true) controller
3041  * Context: can sleep
3042  *
3043  * This call is used only by SPI controller drivers, which are the
3044  * only ones directly touching chip registers.  It's how they allocate
3045  * an spi_controller structure, prior to calling spi_register_controller().
3046  *
3047  * This must be called from context that can sleep.
3048  *
3049  * The caller is responsible for assigning the bus number and initializing the
3050  * controller's methods before calling spi_register_controller(); and (after
3051  * errors adding the device) calling spi_controller_put() to prevent a memory
3052  * leak.
3053  *
3054  * Return: the SPI controller structure on success, else NULL.
3055  */
__spi_alloc_controller(struct device * dev,unsigned int size,bool slave)3056 struct spi_controller *__spi_alloc_controller(struct device *dev,
3057 					      unsigned int size, bool slave)
3058 {
3059 	struct spi_controller	*ctlr;
3060 	size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
3061 
3062 	if (!dev)
3063 		return NULL;
3064 
3065 	ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
3066 	if (!ctlr)
3067 		return NULL;
3068 
3069 	device_initialize(&ctlr->dev);
3070 	INIT_LIST_HEAD(&ctlr->queue);
3071 	spin_lock_init(&ctlr->queue_lock);
3072 	spin_lock_init(&ctlr->bus_lock_spinlock);
3073 	mutex_init(&ctlr->bus_lock_mutex);
3074 	mutex_init(&ctlr->io_mutex);
3075 	mutex_init(&ctlr->add_lock);
3076 	ctlr->bus_num = -1;
3077 	ctlr->num_chipselect = 1;
3078 	ctlr->slave = slave;
3079 	if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
3080 		ctlr->dev.class = &spi_slave_class;
3081 	else
3082 		ctlr->dev.class = &spi_master_class;
3083 	ctlr->dev.parent = dev;
3084 	pm_suspend_ignore_children(&ctlr->dev, true);
3085 	spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
3086 
3087 	return ctlr;
3088 }
3089 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
3090 
devm_spi_release_controller(struct device * dev,void * ctlr)3091 static void devm_spi_release_controller(struct device *dev, void *ctlr)
3092 {
3093 	spi_controller_put(*(struct spi_controller **)ctlr);
3094 }
3095 
3096 /**
3097  * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
3098  * @dev: physical device of SPI controller
3099  * @size: how much zeroed driver-private data to allocate
3100  * @slave: whether to allocate an SPI master (false) or SPI slave (true)
3101  * Context: can sleep
3102  *
3103  * Allocate an SPI controller and automatically release a reference on it
3104  * when @dev is unbound from its driver.  Drivers are thus relieved from
3105  * having to call spi_controller_put().
3106  *
3107  * The arguments to this function are identical to __spi_alloc_controller().
3108  *
3109  * Return: the SPI controller structure on success, else NULL.
3110  */
__devm_spi_alloc_controller(struct device * dev,unsigned int size,bool slave)3111 struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
3112 						   unsigned int size,
3113 						   bool slave)
3114 {
3115 	struct spi_controller **ptr, *ctlr;
3116 
3117 	ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
3118 			   GFP_KERNEL);
3119 	if (!ptr)
3120 		return NULL;
3121 
3122 	ctlr = __spi_alloc_controller(dev, size, slave);
3123 	if (ctlr) {
3124 		ctlr->devm_allocated = true;
3125 		*ptr = ctlr;
3126 		devres_add(dev, ptr);
3127 	} else {
3128 		devres_free(ptr);
3129 	}
3130 
3131 	return ctlr;
3132 }
3133 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
3134 
3135 /**
3136  * spi_get_gpio_descs() - grab chip select GPIOs for the master
3137  * @ctlr: The SPI master to grab GPIO descriptors for
3138  */
spi_get_gpio_descs(struct spi_controller * ctlr)3139 static int spi_get_gpio_descs(struct spi_controller *ctlr)
3140 {
3141 	int nb, i;
3142 	struct gpio_desc **cs;
3143 	struct device *dev = &ctlr->dev;
3144 	unsigned long native_cs_mask = 0;
3145 	unsigned int num_cs_gpios = 0;
3146 
3147 	nb = gpiod_count(dev, "cs");
3148 	if (nb < 0) {
3149 		/* No GPIOs at all is fine, else return the error */
3150 		if (nb == -ENOENT)
3151 			return 0;
3152 		return nb;
3153 	}
3154 
3155 	ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
3156 
3157 	cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
3158 			  GFP_KERNEL);
3159 	if (!cs)
3160 		return -ENOMEM;
3161 	ctlr->cs_gpiods = cs;
3162 
3163 	for (i = 0; i < nb; i++) {
3164 		/*
3165 		 * Most chipselects are active low, the inverted
3166 		 * semantics are handled by special quirks in gpiolib,
3167 		 * so initializing them GPIOD_OUT_LOW here means
3168 		 * "unasserted", in most cases this will drive the physical
3169 		 * line high.
3170 		 */
3171 		cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
3172 						      GPIOD_OUT_LOW);
3173 		if (IS_ERR(cs[i]))
3174 			return PTR_ERR(cs[i]);
3175 
3176 		if (cs[i]) {
3177 			/*
3178 			 * If we find a CS GPIO, name it after the device and
3179 			 * chip select line.
3180 			 */
3181 			char *gpioname;
3182 
3183 			gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
3184 						  dev_name(dev), i);
3185 			if (!gpioname)
3186 				return -ENOMEM;
3187 			gpiod_set_consumer_name(cs[i], gpioname);
3188 			num_cs_gpios++;
3189 			continue;
3190 		}
3191 
3192 		if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3193 			dev_err(dev, "Invalid native chip select %d\n", i);
3194 			return -EINVAL;
3195 		}
3196 		native_cs_mask |= BIT(i);
3197 	}
3198 
3199 	ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3200 
3201 	if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios &&
3202 	    ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3203 		dev_err(dev, "No unused native chip select available\n");
3204 		return -EINVAL;
3205 	}
3206 
3207 	return 0;
3208 }
3209 
spi_controller_check_ops(struct spi_controller * ctlr)3210 static int spi_controller_check_ops(struct spi_controller *ctlr)
3211 {
3212 	/*
3213 	 * The controller may implement only the high-level SPI-memory like
3214 	 * operations if it does not support regular SPI transfers, and this is
3215 	 * valid use case.
3216 	 * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least
3217 	 * one of the ->transfer_xxx() method be implemented.
3218 	 */
3219 	if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
3220 		if (!ctlr->transfer && !ctlr->transfer_one &&
3221 		   !ctlr->transfer_one_message) {
3222 			return -EINVAL;
3223 		}
3224 	}
3225 
3226 	return 0;
3227 }
3228 
3229 /* Allocate dynamic bus number using Linux idr */
spi_controller_id_alloc(struct spi_controller * ctlr,int start,int end)3230 static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end)
3231 {
3232 	int id;
3233 
3234 	mutex_lock(&board_lock);
3235 	id = idr_alloc(&spi_master_idr, ctlr, start, end, GFP_KERNEL);
3236 	mutex_unlock(&board_lock);
3237 	if (WARN(id < 0, "couldn't get idr"))
3238 		return id == -ENOSPC ? -EBUSY : id;
3239 	ctlr->bus_num = id;
3240 	return 0;
3241 }
3242 
3243 /**
3244  * spi_register_controller - register SPI host or target controller
3245  * @ctlr: initialized controller, originally from spi_alloc_host() or
3246  *	spi_alloc_target()
3247  * Context: can sleep
3248  *
3249  * SPI controllers connect to their drivers using some non-SPI bus,
3250  * such as the platform bus.  The final stage of probe() in that code
3251  * includes calling spi_register_controller() to hook up to this SPI bus glue.
3252  *
3253  * SPI controllers use board specific (often SOC specific) bus numbers,
3254  * and board-specific addressing for SPI devices combines those numbers
3255  * with chip select numbers.  Since SPI does not directly support dynamic
3256  * device identification, boards need configuration tables telling which
3257  * chip is at which address.
3258  *
3259  * This must be called from context that can sleep.  It returns zero on
3260  * success, else a negative error code (dropping the controller's refcount).
3261  * After a successful return, the caller is responsible for calling
3262  * spi_unregister_controller().
3263  *
3264  * Return: zero on success, else a negative error code.
3265  */
spi_register_controller(struct spi_controller * ctlr)3266 int spi_register_controller(struct spi_controller *ctlr)
3267 {
3268 	struct device		*dev = ctlr->dev.parent;
3269 	struct boardinfo	*bi;
3270 	int			first_dynamic;
3271 	int			status;
3272 	int			idx;
3273 
3274 	if (!dev)
3275 		return -ENODEV;
3276 
3277 	/*
3278 	 * Make sure all necessary hooks are implemented before registering
3279 	 * the SPI controller.
3280 	 */
3281 	status = spi_controller_check_ops(ctlr);
3282 	if (status)
3283 		return status;
3284 
3285 	if (ctlr->bus_num < 0)
3286 		ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
3287 	if (ctlr->bus_num >= 0) {
3288 		/* Devices with a fixed bus num must check-in with the num */
3289 		status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1);
3290 		if (status)
3291 			return status;
3292 	}
3293 	if (ctlr->bus_num < 0) {
3294 		first_dynamic = of_alias_get_highest_id("spi");
3295 		if (first_dynamic < 0)
3296 			first_dynamic = 0;
3297 		else
3298 			first_dynamic++;
3299 
3300 		status = spi_controller_id_alloc(ctlr, first_dynamic, 0);
3301 		if (status)
3302 			return status;
3303 	}
3304 	ctlr->bus_lock_flag = 0;
3305 	init_completion(&ctlr->xfer_completion);
3306 	init_completion(&ctlr->cur_msg_completion);
3307 	if (!ctlr->max_dma_len)
3308 		ctlr->max_dma_len = INT_MAX;
3309 
3310 	/*
3311 	 * Register the device, then userspace will see it.
3312 	 * Registration fails if the bus ID is in use.
3313 	 */
3314 	dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3315 
3316 	if (!spi_controller_is_target(ctlr) && ctlr->use_gpio_descriptors) {
3317 		status = spi_get_gpio_descs(ctlr);
3318 		if (status)
3319 			goto free_bus_id;
3320 		/*
3321 		 * A controller using GPIO descriptors always
3322 		 * supports SPI_CS_HIGH if need be.
3323 		 */
3324 		ctlr->mode_bits |= SPI_CS_HIGH;
3325 	}
3326 
3327 	/*
3328 	 * Even if it's just one always-selected device, there must
3329 	 * be at least one chipselect.
3330 	 */
3331 	if (!ctlr->num_chipselect) {
3332 		status = -EINVAL;
3333 		goto free_bus_id;
3334 	}
3335 
3336 	/* Setting last_cs to SPI_INVALID_CS means no chip selected */
3337 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
3338 		ctlr->last_cs[idx] = SPI_INVALID_CS;
3339 
3340 	status = device_add(&ctlr->dev);
3341 	if (status < 0)
3342 		goto free_bus_id;
3343 	dev_dbg(dev, "registered %s %s\n",
3344 			spi_controller_is_target(ctlr) ? "target" : "host",
3345 			dev_name(&ctlr->dev));
3346 
3347 	/*
3348 	 * If we're using a queued driver, start the queue. Note that we don't
3349 	 * need the queueing logic if the driver is only supporting high-level
3350 	 * memory operations.
3351 	 */
3352 	if (ctlr->transfer) {
3353 		dev_info(dev, "controller is unqueued, this is deprecated\n");
3354 	} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3355 		status = spi_controller_initialize_queue(ctlr);
3356 		if (status) {
3357 			device_del(&ctlr->dev);
3358 			goto free_bus_id;
3359 		}
3360 	}
3361 	/* Add statistics */
3362 	ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3363 	if (!ctlr->pcpu_statistics) {
3364 		dev_err(dev, "Error allocating per-cpu statistics\n");
3365 		status = -ENOMEM;
3366 		goto destroy_queue;
3367 	}
3368 
3369 	mutex_lock(&board_lock);
3370 	list_add_tail(&ctlr->list, &spi_controller_list);
3371 	list_for_each_entry(bi, &board_list, list)
3372 		spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3373 	mutex_unlock(&board_lock);
3374 
3375 	/* Register devices from the device tree and ACPI */
3376 	of_register_spi_devices(ctlr);
3377 	acpi_register_spi_devices(ctlr);
3378 	return status;
3379 
3380 destroy_queue:
3381 	spi_destroy_queue(ctlr);
3382 free_bus_id:
3383 	mutex_lock(&board_lock);
3384 	idr_remove(&spi_master_idr, ctlr->bus_num);
3385 	mutex_unlock(&board_lock);
3386 	return status;
3387 }
3388 EXPORT_SYMBOL_GPL(spi_register_controller);
3389 
devm_spi_unregister(struct device * dev,void * res)3390 static void devm_spi_unregister(struct device *dev, void *res)
3391 {
3392 	spi_unregister_controller(*(struct spi_controller **)res);
3393 }
3394 
3395 /**
3396  * devm_spi_register_controller - register managed SPI host or target
3397  *	controller
3398  * @dev:    device managing SPI controller
3399  * @ctlr: initialized controller, originally from spi_alloc_host() or
3400  *	spi_alloc_target()
3401  * Context: can sleep
3402  *
3403  * Register a SPI device as with spi_register_controller() which will
3404  * automatically be unregistered and freed.
3405  *
3406  * Return: zero on success, else a negative error code.
3407  */
devm_spi_register_controller(struct device * dev,struct spi_controller * ctlr)3408 int devm_spi_register_controller(struct device *dev,
3409 				 struct spi_controller *ctlr)
3410 {
3411 	struct spi_controller **ptr;
3412 	int ret;
3413 
3414 	ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
3415 	if (!ptr)
3416 		return -ENOMEM;
3417 
3418 	ret = spi_register_controller(ctlr);
3419 	if (!ret) {
3420 		*ptr = ctlr;
3421 		devres_add(dev, ptr);
3422 	} else {
3423 		devres_free(ptr);
3424 	}
3425 
3426 	return ret;
3427 }
3428 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3429 
__unregister(struct device * dev,void * null)3430 static int __unregister(struct device *dev, void *null)
3431 {
3432 	spi_unregister_device(to_spi_device(dev));
3433 	return 0;
3434 }
3435 
3436 /**
3437  * spi_unregister_controller - unregister SPI master or slave controller
3438  * @ctlr: the controller being unregistered
3439  * Context: can sleep
3440  *
3441  * This call is used only by SPI controller drivers, which are the
3442  * only ones directly touching chip registers.
3443  *
3444  * This must be called from context that can sleep.
3445  *
3446  * Note that this function also drops a reference to the controller.
3447  */
spi_unregister_controller(struct spi_controller * ctlr)3448 void spi_unregister_controller(struct spi_controller *ctlr)
3449 {
3450 	struct spi_controller *found;
3451 	int id = ctlr->bus_num;
3452 
3453 	/* Prevent addition of new devices, unregister existing ones */
3454 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3455 		mutex_lock(&ctlr->add_lock);
3456 
3457 	device_for_each_child(&ctlr->dev, NULL, __unregister);
3458 
3459 	/* First make sure that this controller was ever added */
3460 	mutex_lock(&board_lock);
3461 	found = idr_find(&spi_master_idr, id);
3462 	mutex_unlock(&board_lock);
3463 	if (ctlr->queued) {
3464 		if (spi_destroy_queue(ctlr))
3465 			dev_err(&ctlr->dev, "queue remove failed\n");
3466 	}
3467 	mutex_lock(&board_lock);
3468 	list_del(&ctlr->list);
3469 	mutex_unlock(&board_lock);
3470 
3471 	device_del(&ctlr->dev);
3472 
3473 	/* Free bus id */
3474 	mutex_lock(&board_lock);
3475 	if (found == ctlr)
3476 		idr_remove(&spi_master_idr, id);
3477 	mutex_unlock(&board_lock);
3478 
3479 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3480 		mutex_unlock(&ctlr->add_lock);
3481 
3482 	/*
3483 	 * Release the last reference on the controller if its driver
3484 	 * has not yet been converted to devm_spi_alloc_host/target().
3485 	 */
3486 	if (!ctlr->devm_allocated)
3487 		put_device(&ctlr->dev);
3488 }
3489 EXPORT_SYMBOL_GPL(spi_unregister_controller);
3490 
__spi_check_suspended(const struct spi_controller * ctlr)3491 static inline int __spi_check_suspended(const struct spi_controller *ctlr)
3492 {
3493 	return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
3494 }
3495 
__spi_mark_suspended(struct spi_controller * ctlr)3496 static inline void __spi_mark_suspended(struct spi_controller *ctlr)
3497 {
3498 	mutex_lock(&ctlr->bus_lock_mutex);
3499 	ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
3500 	mutex_unlock(&ctlr->bus_lock_mutex);
3501 }
3502 
__spi_mark_resumed(struct spi_controller * ctlr)3503 static inline void __spi_mark_resumed(struct spi_controller *ctlr)
3504 {
3505 	mutex_lock(&ctlr->bus_lock_mutex);
3506 	ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
3507 	mutex_unlock(&ctlr->bus_lock_mutex);
3508 }
3509 
spi_controller_suspend(struct spi_controller * ctlr)3510 int spi_controller_suspend(struct spi_controller *ctlr)
3511 {
3512 	int ret = 0;
3513 
3514 	/* Basically no-ops for non-queued controllers */
3515 	if (ctlr->queued) {
3516 		ret = spi_stop_queue(ctlr);
3517 		if (ret)
3518 			dev_err(&ctlr->dev, "queue stop failed\n");
3519 	}
3520 
3521 	__spi_mark_suspended(ctlr);
3522 	return ret;
3523 }
3524 EXPORT_SYMBOL_GPL(spi_controller_suspend);
3525 
spi_controller_resume(struct spi_controller * ctlr)3526 int spi_controller_resume(struct spi_controller *ctlr)
3527 {
3528 	int ret = 0;
3529 
3530 	__spi_mark_resumed(ctlr);
3531 
3532 	if (ctlr->queued) {
3533 		ret = spi_start_queue(ctlr);
3534 		if (ret)
3535 			dev_err(&ctlr->dev, "queue restart failed\n");
3536 	}
3537 	return ret;
3538 }
3539 EXPORT_SYMBOL_GPL(spi_controller_resume);
3540 
3541 /*-------------------------------------------------------------------------*/
3542 
3543 /* Core methods for spi_message alterations */
3544 
__spi_replace_transfers_release(struct spi_controller * ctlr,struct spi_message * msg,void * res)3545 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3546 					    struct spi_message *msg,
3547 					    void *res)
3548 {
3549 	struct spi_replaced_transfers *rxfer = res;
3550 	size_t i;
3551 
3552 	/* Call extra callback if requested */
3553 	if (rxfer->release)
3554 		rxfer->release(ctlr, msg, res);
3555 
3556 	/* Insert replaced transfers back into the message */
3557 	list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3558 
3559 	/* Remove the formerly inserted entries */
3560 	for (i = 0; i < rxfer->inserted; i++)
3561 		list_del(&rxfer->inserted_transfers[i].transfer_list);
3562 }
3563 
3564 /**
3565  * spi_replace_transfers - replace transfers with several transfers
3566  *                         and register change with spi_message.resources
3567  * @msg:           the spi_message we work upon
3568  * @xfer_first:    the first spi_transfer we want to replace
3569  * @remove:        number of transfers to remove
3570  * @insert:        the number of transfers we want to insert instead
3571  * @release:       extra release code necessary in some circumstances
3572  * @extradatasize: extra data to allocate (with alignment guarantees
3573  *                 of struct @spi_transfer)
3574  * @gfp:           gfp flags
3575  *
3576  * Returns: pointer to @spi_replaced_transfers,
3577  *          PTR_ERR(...) in case of errors.
3578  */
spi_replace_transfers(struct spi_message * msg,struct spi_transfer * xfer_first,size_t remove,size_t insert,spi_replaced_release_t release,size_t extradatasize,gfp_t gfp)3579 static struct spi_replaced_transfers *spi_replace_transfers(
3580 	struct spi_message *msg,
3581 	struct spi_transfer *xfer_first,
3582 	size_t remove,
3583 	size_t insert,
3584 	spi_replaced_release_t release,
3585 	size_t extradatasize,
3586 	gfp_t gfp)
3587 {
3588 	struct spi_replaced_transfers *rxfer;
3589 	struct spi_transfer *xfer;
3590 	size_t i;
3591 
3592 	/* Allocate the structure using spi_res */
3593 	rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3594 			      struct_size(rxfer, inserted_transfers, insert)
3595 			      + extradatasize,
3596 			      gfp);
3597 	if (!rxfer)
3598 		return ERR_PTR(-ENOMEM);
3599 
3600 	/* The release code to invoke before running the generic release */
3601 	rxfer->release = release;
3602 
3603 	/* Assign extradata */
3604 	if (extradatasize)
3605 		rxfer->extradata =
3606 			&rxfer->inserted_transfers[insert];
3607 
3608 	/* Init the replaced_transfers list */
3609 	INIT_LIST_HEAD(&rxfer->replaced_transfers);
3610 
3611 	/*
3612 	 * Assign the list_entry after which we should reinsert
3613 	 * the @replaced_transfers - it may be spi_message.messages!
3614 	 */
3615 	rxfer->replaced_after = xfer_first->transfer_list.prev;
3616 
3617 	/* Remove the requested number of transfers */
3618 	for (i = 0; i < remove; i++) {
3619 		/*
3620 		 * If the entry after replaced_after it is msg->transfers
3621 		 * then we have been requested to remove more transfers
3622 		 * than are in the list.
3623 		 */
3624 		if (rxfer->replaced_after->next == &msg->transfers) {
3625 			dev_err(&msg->spi->dev,
3626 				"requested to remove more spi_transfers than are available\n");
3627 			/* Insert replaced transfers back into the message */
3628 			list_splice(&rxfer->replaced_transfers,
3629 				    rxfer->replaced_after);
3630 
3631 			/* Free the spi_replace_transfer structure... */
3632 			spi_res_free(rxfer);
3633 
3634 			/* ...and return with an error */
3635 			return ERR_PTR(-EINVAL);
3636 		}
3637 
3638 		/*
3639 		 * Remove the entry after replaced_after from list of
3640 		 * transfers and add it to list of replaced_transfers.
3641 		 */
3642 		list_move_tail(rxfer->replaced_after->next,
3643 			       &rxfer->replaced_transfers);
3644 	}
3645 
3646 	/*
3647 	 * Create copy of the given xfer with identical settings
3648 	 * based on the first transfer to get removed.
3649 	 */
3650 	for (i = 0; i < insert; i++) {
3651 		/* We need to run in reverse order */
3652 		xfer = &rxfer->inserted_transfers[insert - 1 - i];
3653 
3654 		/* Copy all spi_transfer data */
3655 		memcpy(xfer, xfer_first, sizeof(*xfer));
3656 
3657 		/* Add to list */
3658 		list_add(&xfer->transfer_list, rxfer->replaced_after);
3659 
3660 		/* Clear cs_change and delay for all but the last */
3661 		if (i) {
3662 			xfer->cs_change = false;
3663 			xfer->delay.value = 0;
3664 		}
3665 	}
3666 
3667 	/* Set up inserted... */
3668 	rxfer->inserted = insert;
3669 
3670 	/* ...and register it with spi_res/spi_message */
3671 	spi_res_add(msg, rxfer);
3672 
3673 	return rxfer;
3674 }
3675 
__spi_split_transfer_maxsize(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer ** xferp,size_t maxsize)3676 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3677 					struct spi_message *msg,
3678 					struct spi_transfer **xferp,
3679 					size_t maxsize)
3680 {
3681 	struct spi_transfer *xfer = *xferp, *xfers;
3682 	struct spi_replaced_transfers *srt;
3683 	size_t offset;
3684 	size_t count, i;
3685 
3686 	/* Calculate how many we have to replace */
3687 	count = DIV_ROUND_UP(xfer->len, maxsize);
3688 
3689 	/* Create replacement */
3690 	srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, GFP_KERNEL);
3691 	if (IS_ERR(srt))
3692 		return PTR_ERR(srt);
3693 	xfers = srt->inserted_transfers;
3694 
3695 	/*
3696 	 * Now handle each of those newly inserted spi_transfers.
3697 	 * Note that the replacements spi_transfers all are preset
3698 	 * to the same values as *xferp, so tx_buf, rx_buf and len
3699 	 * are all identical (as well as most others)
3700 	 * so we just have to fix up len and the pointers.
3701 	 */
3702 
3703 	/*
3704 	 * The first transfer just needs the length modified, so we
3705 	 * run it outside the loop.
3706 	 */
3707 	xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3708 
3709 	/* All the others need rx_buf/tx_buf also set */
3710 	for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3711 		/* Update rx_buf, tx_buf and DMA */
3712 		if (xfers[i].rx_buf)
3713 			xfers[i].rx_buf += offset;
3714 		if (xfers[i].tx_buf)
3715 			xfers[i].tx_buf += offset;
3716 
3717 		/* Update length */
3718 		xfers[i].len = min(maxsize, xfers[i].len - offset);
3719 	}
3720 
3721 	/*
3722 	 * We set up xferp to the last entry we have inserted,
3723 	 * so that we skip those already split transfers.
3724 	 */
3725 	*xferp = &xfers[count - 1];
3726 
3727 	/* Increment statistics counters */
3728 	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3729 				       transfers_split_maxsize);
3730 	SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3731 				       transfers_split_maxsize);
3732 
3733 	return 0;
3734 }
3735 
3736 /**
3737  * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3738  *                               when an individual transfer exceeds a
3739  *                               certain size
3740  * @ctlr:    the @spi_controller for this transfer
3741  * @msg:   the @spi_message to transform
3742  * @maxsize:  the maximum when to apply this
3743  *
3744  * This function allocates resources that are automatically freed during the
3745  * spi message unoptimize phase so this function should only be called from
3746  * optimize_message callbacks.
3747  *
3748  * Return: status of transformation
3749  */
spi_split_transfers_maxsize(struct spi_controller * ctlr,struct spi_message * msg,size_t maxsize)3750 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3751 				struct spi_message *msg,
3752 				size_t maxsize)
3753 {
3754 	struct spi_transfer *xfer;
3755 	int ret;
3756 
3757 	/*
3758 	 * Iterate over the transfer_list,
3759 	 * but note that xfer is advanced to the last transfer inserted
3760 	 * to avoid checking sizes again unnecessarily (also xfer does
3761 	 * potentially belong to a different list by the time the
3762 	 * replacement has happened).
3763 	 */
3764 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3765 		if (xfer->len > maxsize) {
3766 			ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3767 							   maxsize);
3768 			if (ret)
3769 				return ret;
3770 		}
3771 	}
3772 
3773 	return 0;
3774 }
3775 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3776 
3777 
3778 /**
3779  * spi_split_transfers_maxwords - split SPI transfers into multiple transfers
3780  *                                when an individual transfer exceeds a
3781  *                                certain number of SPI words
3782  * @ctlr:     the @spi_controller for this transfer
3783  * @msg:      the @spi_message to transform
3784  * @maxwords: the number of words to limit each transfer to
3785  *
3786  * This function allocates resources that are automatically freed during the
3787  * spi message unoptimize phase so this function should only be called from
3788  * optimize_message callbacks.
3789  *
3790  * Return: status of transformation
3791  */
spi_split_transfers_maxwords(struct spi_controller * ctlr,struct spi_message * msg,size_t maxwords)3792 int spi_split_transfers_maxwords(struct spi_controller *ctlr,
3793 				 struct spi_message *msg,
3794 				 size_t maxwords)
3795 {
3796 	struct spi_transfer *xfer;
3797 
3798 	/*
3799 	 * Iterate over the transfer_list,
3800 	 * but note that xfer is advanced to the last transfer inserted
3801 	 * to avoid checking sizes again unnecessarily (also xfer does
3802 	 * potentially belong to a different list by the time the
3803 	 * replacement has happened).
3804 	 */
3805 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3806 		size_t maxsize;
3807 		int ret;
3808 
3809 		maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word));
3810 		if (xfer->len > maxsize) {
3811 			ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3812 							   maxsize);
3813 			if (ret)
3814 				return ret;
3815 		}
3816 	}
3817 
3818 	return 0;
3819 }
3820 EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords);
3821 
3822 /*-------------------------------------------------------------------------*/
3823 
3824 /*
3825  * Core methods for SPI controller protocol drivers. Some of the
3826  * other core methods are currently defined as inline functions.
3827  */
3828 
__spi_validate_bits_per_word(struct spi_controller * ctlr,u8 bits_per_word)3829 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3830 					u8 bits_per_word)
3831 {
3832 	if (ctlr->bits_per_word_mask) {
3833 		/* Only 32 bits fit in the mask */
3834 		if (bits_per_word > 32)
3835 			return -EINVAL;
3836 		if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3837 			return -EINVAL;
3838 	}
3839 
3840 	return 0;
3841 }
3842 
3843 /**
3844  * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3845  * @spi: the device that requires specific CS timing configuration
3846  *
3847  * Return: zero on success, else a negative error code.
3848  */
spi_set_cs_timing(struct spi_device * spi)3849 static int spi_set_cs_timing(struct spi_device *spi)
3850 {
3851 	struct device *parent = spi->controller->dev.parent;
3852 	int status = 0;
3853 
3854 	if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
3855 		if (spi->controller->auto_runtime_pm) {
3856 			status = pm_runtime_get_sync(parent);
3857 			if (status < 0) {
3858 				pm_runtime_put_noidle(parent);
3859 				dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3860 					status);
3861 				return status;
3862 			}
3863 
3864 			status = spi->controller->set_cs_timing(spi);
3865 			pm_runtime_mark_last_busy(parent);
3866 			pm_runtime_put_autosuspend(parent);
3867 		} else {
3868 			status = spi->controller->set_cs_timing(spi);
3869 		}
3870 	}
3871 	return status;
3872 }
3873 
3874 /**
3875  * spi_setup - setup SPI mode and clock rate
3876  * @spi: the device whose settings are being modified
3877  * Context: can sleep, and no requests are queued to the device
3878  *
3879  * SPI protocol drivers may need to update the transfer mode if the
3880  * device doesn't work with its default.  They may likewise need
3881  * to update clock rates or word sizes from initial values.  This function
3882  * changes those settings, and must be called from a context that can sleep.
3883  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3884  * effect the next time the device is selected and data is transferred to
3885  * or from it.  When this function returns, the SPI device is deselected.
3886  *
3887  * Note that this call will fail if the protocol driver specifies an option
3888  * that the underlying controller or its driver does not support.  For
3889  * example, not all hardware supports wire transfers using nine bit words,
3890  * LSB-first wire encoding, or active-high chipselects.
3891  *
3892  * Return: zero on success, else a negative error code.
3893  */
spi_setup(struct spi_device * spi)3894 int spi_setup(struct spi_device *spi)
3895 {
3896 	unsigned	bad_bits, ugly_bits;
3897 	int		status;
3898 
3899 	/*
3900 	 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3901 	 * are set at the same time.
3902 	 */
3903 	if ((hweight_long(spi->mode &
3904 		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3905 	    (hweight_long(spi->mode &
3906 		(SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3907 		dev_err(&spi->dev,
3908 		"setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3909 		return -EINVAL;
3910 	}
3911 	/* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
3912 	if ((spi->mode & SPI_3WIRE) && (spi->mode &
3913 		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3914 		 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3915 		return -EINVAL;
3916 	/* Check against conflicting MOSI idle configuration */
3917 	if ((spi->mode & SPI_MOSI_IDLE_LOW) && (spi->mode & SPI_MOSI_IDLE_HIGH)) {
3918 		dev_err(&spi->dev,
3919 			"setup: MOSI configured to idle low and high at the same time.\n");
3920 		return -EINVAL;
3921 	}
3922 	/*
3923 	 * Help drivers fail *cleanly* when they need options
3924 	 * that aren't supported with their current controller.
3925 	 * SPI_CS_WORD has a fallback software implementation,
3926 	 * so it is ignored here.
3927 	 */
3928 	bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3929 				 SPI_NO_TX | SPI_NO_RX);
3930 	ugly_bits = bad_bits &
3931 		    (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3932 		     SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3933 	if (ugly_bits) {
3934 		dev_warn(&spi->dev,
3935 			 "setup: ignoring unsupported mode bits %x\n",
3936 			 ugly_bits);
3937 		spi->mode &= ~ugly_bits;
3938 		bad_bits &= ~ugly_bits;
3939 	}
3940 	if (bad_bits) {
3941 		dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3942 			bad_bits);
3943 		return -EINVAL;
3944 	}
3945 
3946 	if (!spi->bits_per_word) {
3947 		spi->bits_per_word = 8;
3948 	} else {
3949 		/*
3950 		 * Some controllers may not support the default 8 bits-per-word
3951 		 * so only perform the check when this is explicitly provided.
3952 		 */
3953 		status = __spi_validate_bits_per_word(spi->controller,
3954 						      spi->bits_per_word);
3955 		if (status)
3956 			return status;
3957 	}
3958 
3959 	if (spi->controller->max_speed_hz &&
3960 	    (!spi->max_speed_hz ||
3961 	     spi->max_speed_hz > spi->controller->max_speed_hz))
3962 		spi->max_speed_hz = spi->controller->max_speed_hz;
3963 
3964 	mutex_lock(&spi->controller->io_mutex);
3965 
3966 	if (spi->controller->setup) {
3967 		status = spi->controller->setup(spi);
3968 		if (status) {
3969 			mutex_unlock(&spi->controller->io_mutex);
3970 			dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3971 				status);
3972 			return status;
3973 		}
3974 	}
3975 
3976 	status = spi_set_cs_timing(spi);
3977 	if (status) {
3978 		mutex_unlock(&spi->controller->io_mutex);
3979 		return status;
3980 	}
3981 
3982 	if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3983 		status = pm_runtime_resume_and_get(spi->controller->dev.parent);
3984 		if (status < 0) {
3985 			mutex_unlock(&spi->controller->io_mutex);
3986 			dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3987 				status);
3988 			return status;
3989 		}
3990 
3991 		/*
3992 		 * We do not want to return positive value from pm_runtime_get,
3993 		 * there are many instances of devices calling spi_setup() and
3994 		 * checking for a non-zero return value instead of a negative
3995 		 * return value.
3996 		 */
3997 		status = 0;
3998 
3999 		spi_set_cs(spi, false, true);
4000 		pm_runtime_mark_last_busy(spi->controller->dev.parent);
4001 		pm_runtime_put_autosuspend(spi->controller->dev.parent);
4002 	} else {
4003 		spi_set_cs(spi, false, true);
4004 	}
4005 
4006 	mutex_unlock(&spi->controller->io_mutex);
4007 
4008 	if (spi->rt && !spi->controller->rt) {
4009 		spi->controller->rt = true;
4010 		spi_set_thread_rt(spi->controller);
4011 	}
4012 
4013 	trace_spi_setup(spi, status);
4014 
4015 	dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
4016 			spi->mode & SPI_MODE_X_MASK,
4017 			(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
4018 			(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
4019 			(spi->mode & SPI_3WIRE) ? "3wire, " : "",
4020 			(spi->mode & SPI_LOOP) ? "loopback, " : "",
4021 			spi->bits_per_word, spi->max_speed_hz,
4022 			status);
4023 
4024 	return status;
4025 }
4026 EXPORT_SYMBOL_GPL(spi_setup);
4027 
_spi_xfer_word_delay_update(struct spi_transfer * xfer,struct spi_device * spi)4028 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
4029 				       struct spi_device *spi)
4030 {
4031 	int delay1, delay2;
4032 
4033 	delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
4034 	if (delay1 < 0)
4035 		return delay1;
4036 
4037 	delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
4038 	if (delay2 < 0)
4039 		return delay2;
4040 
4041 	if (delay1 < delay2)
4042 		memcpy(&xfer->word_delay, &spi->word_delay,
4043 		       sizeof(xfer->word_delay));
4044 
4045 	return 0;
4046 }
4047 
__spi_validate(struct spi_device * spi,struct spi_message * message)4048 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
4049 {
4050 	struct spi_controller *ctlr = spi->controller;
4051 	struct spi_transfer *xfer;
4052 	int w_size;
4053 
4054 	if (list_empty(&message->transfers))
4055 		return -EINVAL;
4056 
4057 	message->spi = spi;
4058 
4059 	/*
4060 	 * Half-duplex links include original MicroWire, and ones with
4061 	 * only one data pin like SPI_3WIRE (switches direction) or where
4062 	 * either MOSI or MISO is missing.  They can also be caused by
4063 	 * software limitations.
4064 	 */
4065 	if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
4066 	    (spi->mode & SPI_3WIRE)) {
4067 		unsigned flags = ctlr->flags;
4068 
4069 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
4070 			if (xfer->rx_buf && xfer->tx_buf)
4071 				return -EINVAL;
4072 			if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
4073 				return -EINVAL;
4074 			if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
4075 				return -EINVAL;
4076 		}
4077 	}
4078 
4079 	/*
4080 	 * Set transfer bits_per_word and max speed as spi device default if
4081 	 * it is not set for this transfer.
4082 	 * Set transfer tx_nbits and rx_nbits as single transfer default
4083 	 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
4084 	 * Ensure transfer word_delay is at least as long as that required by
4085 	 * device itself.
4086 	 */
4087 	message->frame_length = 0;
4088 	list_for_each_entry(xfer, &message->transfers, transfer_list) {
4089 		xfer->effective_speed_hz = 0;
4090 		message->frame_length += xfer->len;
4091 		if (!xfer->bits_per_word)
4092 			xfer->bits_per_word = spi->bits_per_word;
4093 
4094 		if (!xfer->speed_hz)
4095 			xfer->speed_hz = spi->max_speed_hz;
4096 
4097 		if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
4098 			xfer->speed_hz = ctlr->max_speed_hz;
4099 
4100 		if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
4101 			return -EINVAL;
4102 
4103 		/*
4104 		 * SPI transfer length should be multiple of SPI word size
4105 		 * where SPI word size should be power-of-two multiple.
4106 		 */
4107 		if (xfer->bits_per_word <= 8)
4108 			w_size = 1;
4109 		else if (xfer->bits_per_word <= 16)
4110 			w_size = 2;
4111 		else
4112 			w_size = 4;
4113 
4114 		/* No partial transfers accepted */
4115 		if (xfer->len % w_size)
4116 			return -EINVAL;
4117 
4118 		if (xfer->speed_hz && ctlr->min_speed_hz &&
4119 		    xfer->speed_hz < ctlr->min_speed_hz)
4120 			return -EINVAL;
4121 
4122 		if (xfer->tx_buf && !xfer->tx_nbits)
4123 			xfer->tx_nbits = SPI_NBITS_SINGLE;
4124 		if (xfer->rx_buf && !xfer->rx_nbits)
4125 			xfer->rx_nbits = SPI_NBITS_SINGLE;
4126 		/*
4127 		 * Check transfer tx/rx_nbits:
4128 		 * 1. check the value matches one of single, dual and quad
4129 		 * 2. check tx/rx_nbits match the mode in spi_device
4130 		 */
4131 		if (xfer->tx_buf) {
4132 			if (spi->mode & SPI_NO_TX)
4133 				return -EINVAL;
4134 			if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
4135 				xfer->tx_nbits != SPI_NBITS_DUAL &&
4136 				xfer->tx_nbits != SPI_NBITS_QUAD &&
4137 				xfer->tx_nbits != SPI_NBITS_OCTAL)
4138 				return -EINVAL;
4139 			if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
4140 				!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
4141 				return -EINVAL;
4142 			if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
4143 				!(spi->mode & SPI_TX_QUAD))
4144 				return -EINVAL;
4145 		}
4146 		/* Check transfer rx_nbits */
4147 		if (xfer->rx_buf) {
4148 			if (spi->mode & SPI_NO_RX)
4149 				return -EINVAL;
4150 			if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
4151 				xfer->rx_nbits != SPI_NBITS_DUAL &&
4152 				xfer->rx_nbits != SPI_NBITS_QUAD &&
4153 				xfer->rx_nbits != SPI_NBITS_OCTAL)
4154 				return -EINVAL;
4155 			if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
4156 				!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
4157 				return -EINVAL;
4158 			if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
4159 				!(spi->mode & SPI_RX_QUAD))
4160 				return -EINVAL;
4161 		}
4162 
4163 		if (_spi_xfer_word_delay_update(xfer, spi))
4164 			return -EINVAL;
4165 	}
4166 
4167 	message->status = -EINPROGRESS;
4168 
4169 	return 0;
4170 }
4171 
4172 /*
4173  * spi_split_transfers - generic handling of transfer splitting
4174  * @msg: the message to split
4175  *
4176  * Under certain conditions, a SPI controller may not support arbitrary
4177  * transfer sizes or other features required by a peripheral. This function
4178  * will split the transfers in the message into smaller transfers that are
4179  * supported by the controller.
4180  *
4181  * Controllers with special requirements not covered here can also split
4182  * transfers in the optimize_message() callback.
4183  *
4184  * Context: can sleep
4185  * Return: zero on success, else a negative error code
4186  */
spi_split_transfers(struct spi_message * msg)4187 static int spi_split_transfers(struct spi_message *msg)
4188 {
4189 	struct spi_controller *ctlr = msg->spi->controller;
4190 	struct spi_transfer *xfer;
4191 	int ret;
4192 
4193 	/*
4194 	 * If an SPI controller does not support toggling the CS line on each
4195 	 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
4196 	 * for the CS line, we can emulate the CS-per-word hardware function by
4197 	 * splitting transfers into one-word transfers and ensuring that
4198 	 * cs_change is set for each transfer.
4199 	 */
4200 	if ((msg->spi->mode & SPI_CS_WORD) &&
4201 	    (!(ctlr->mode_bits & SPI_CS_WORD) || spi_is_csgpiod(msg->spi))) {
4202 		ret = spi_split_transfers_maxwords(ctlr, msg, 1);
4203 		if (ret)
4204 			return ret;
4205 
4206 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
4207 			/* Don't change cs_change on the last entry in the list */
4208 			if (list_is_last(&xfer->transfer_list, &msg->transfers))
4209 				break;
4210 
4211 			xfer->cs_change = 1;
4212 		}
4213 	} else {
4214 		ret = spi_split_transfers_maxsize(ctlr, msg,
4215 						  spi_max_transfer_size(msg->spi));
4216 		if (ret)
4217 			return ret;
4218 	}
4219 
4220 	return 0;
4221 }
4222 
4223 /*
4224  * __spi_optimize_message - shared implementation for spi_optimize_message()
4225  *                          and spi_maybe_optimize_message()
4226  * @spi: the device that will be used for the message
4227  * @msg: the message to optimize
4228  *
4229  * Peripheral drivers will call spi_optimize_message() and the spi core will
4230  * call spi_maybe_optimize_message() instead of calling this directly.
4231  *
4232  * It is not valid to call this on a message that has already been optimized.
4233  *
4234  * Return: zero on success, else a negative error code
4235  */
__spi_optimize_message(struct spi_device * spi,struct spi_message * msg)4236 static int __spi_optimize_message(struct spi_device *spi,
4237 				  struct spi_message *msg)
4238 {
4239 	struct spi_controller *ctlr = spi->controller;
4240 	int ret;
4241 
4242 	ret = __spi_validate(spi, msg);
4243 	if (ret)
4244 		return ret;
4245 
4246 	ret = spi_split_transfers(msg);
4247 	if (ret)
4248 		return ret;
4249 
4250 	if (ctlr->optimize_message) {
4251 		ret = ctlr->optimize_message(msg);
4252 		if (ret) {
4253 			spi_res_release(ctlr, msg);
4254 			return ret;
4255 		}
4256 	}
4257 
4258 	msg->optimized = true;
4259 
4260 	return 0;
4261 }
4262 
4263 /*
4264  * spi_maybe_optimize_message - optimize message if it isn't already pre-optimized
4265  * @spi: the device that will be used for the message
4266  * @msg: the message to optimize
4267  * Return: zero on success, else a negative error code
4268  */
spi_maybe_optimize_message(struct spi_device * spi,struct spi_message * msg)4269 static int spi_maybe_optimize_message(struct spi_device *spi,
4270 				      struct spi_message *msg)
4271 {
4272 	if (spi->controller->defer_optimize_message) {
4273 		msg->spi = spi;
4274 		return 0;
4275 	}
4276 
4277 	if (msg->pre_optimized)
4278 		return 0;
4279 
4280 	return __spi_optimize_message(spi, msg);
4281 }
4282 
4283 /**
4284  * spi_optimize_message - do any one-time validation and setup for a SPI message
4285  * @spi: the device that will be used for the message
4286  * @msg: the message to optimize
4287  *
4288  * Peripheral drivers that reuse the same message repeatedly may call this to
4289  * perform as much message prep as possible once, rather than repeating it each
4290  * time a message transfer is performed to improve throughput and reduce CPU
4291  * usage.
4292  *
4293  * Once a message has been optimized, it cannot be modified with the exception
4294  * of updating the contents of any xfer->tx_buf (the pointer can't be changed,
4295  * only the data in the memory it points to).
4296  *
4297  * Calls to this function must be balanced with calls to spi_unoptimize_message()
4298  * to avoid leaking resources.
4299  *
4300  * Context: can sleep
4301  * Return: zero on success, else a negative error code
4302  */
spi_optimize_message(struct spi_device * spi,struct spi_message * msg)4303 int spi_optimize_message(struct spi_device *spi, struct spi_message *msg)
4304 {
4305 	int ret;
4306 
4307 	/*
4308 	 * Pre-optimization is not supported and optimization is deferred e.g.
4309 	 * when using spi-mux.
4310 	 */
4311 	if (spi->controller->defer_optimize_message)
4312 		return 0;
4313 
4314 	ret = __spi_optimize_message(spi, msg);
4315 	if (ret)
4316 		return ret;
4317 
4318 	/*
4319 	 * This flag indicates that the peripheral driver called spi_optimize_message()
4320 	 * and therefore we shouldn't unoptimize message automatically when finalizing
4321 	 * the message but rather wait until spi_unoptimize_message() is called
4322 	 * by the peripheral driver.
4323 	 */
4324 	msg->pre_optimized = true;
4325 
4326 	return 0;
4327 }
4328 EXPORT_SYMBOL_GPL(spi_optimize_message);
4329 
4330 /**
4331  * spi_unoptimize_message - releases any resources allocated by spi_optimize_message()
4332  * @msg: the message to unoptimize
4333  *
4334  * Calls to this function must be balanced with calls to spi_optimize_message().
4335  *
4336  * Context: can sleep
4337  */
spi_unoptimize_message(struct spi_message * msg)4338 void spi_unoptimize_message(struct spi_message *msg)
4339 {
4340 	if (msg->spi->controller->defer_optimize_message)
4341 		return;
4342 
4343 	__spi_unoptimize_message(msg);
4344 	msg->pre_optimized = false;
4345 }
4346 EXPORT_SYMBOL_GPL(spi_unoptimize_message);
4347 
__spi_async(struct spi_device * spi,struct spi_message * message)4348 static int __spi_async(struct spi_device *spi, struct spi_message *message)
4349 {
4350 	struct spi_controller *ctlr = spi->controller;
4351 	struct spi_transfer *xfer;
4352 
4353 	/*
4354 	 * Some controllers do not support doing regular SPI transfers. Return
4355 	 * ENOTSUPP when this is the case.
4356 	 */
4357 	if (!ctlr->transfer)
4358 		return -ENOTSUPP;
4359 
4360 	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
4361 	SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
4362 
4363 	trace_spi_message_submit(message);
4364 
4365 	if (!ctlr->ptp_sts_supported) {
4366 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
4367 			xfer->ptp_sts_word_pre = 0;
4368 			ptp_read_system_prets(xfer->ptp_sts);
4369 		}
4370 	}
4371 
4372 	return ctlr->transfer(spi, message);
4373 }
4374 
devm_spi_unoptimize_message(void * msg)4375 static void devm_spi_unoptimize_message(void *msg)
4376 {
4377 	spi_unoptimize_message(msg);
4378 }
4379 
4380 /**
4381  * devm_spi_optimize_message - managed version of spi_optimize_message()
4382  * @dev: the device that manages @msg (usually @spi->dev)
4383  * @spi: the device that will be used for the message
4384  * @msg: the message to optimize
4385  * Return: zero on success, else a negative error code
4386  *
4387  * spi_unoptimize_message() will automatically be called when the device is
4388  * removed.
4389  */
devm_spi_optimize_message(struct device * dev,struct spi_device * spi,struct spi_message * msg)4390 int devm_spi_optimize_message(struct device *dev, struct spi_device *spi,
4391 			      struct spi_message *msg)
4392 {
4393 	int ret;
4394 
4395 	ret = spi_optimize_message(spi, msg);
4396 	if (ret)
4397 		return ret;
4398 
4399 	return devm_add_action_or_reset(dev, devm_spi_unoptimize_message, msg);
4400 }
4401 EXPORT_SYMBOL_GPL(devm_spi_optimize_message);
4402 
4403 /**
4404  * spi_async - asynchronous SPI transfer
4405  * @spi: device with which data will be exchanged
4406  * @message: describes the data transfers, including completion callback
4407  * Context: any (IRQs may be blocked, etc)
4408  *
4409  * This call may be used in_irq and other contexts which can't sleep,
4410  * as well as from task contexts which can sleep.
4411  *
4412  * The completion callback is invoked in a context which can't sleep.
4413  * Before that invocation, the value of message->status is undefined.
4414  * When the callback is issued, message->status holds either zero (to
4415  * indicate complete success) or a negative error code.  After that
4416  * callback returns, the driver which issued the transfer request may
4417  * deallocate the associated memory; it's no longer in use by any SPI
4418  * core or controller driver code.
4419  *
4420  * Note that although all messages to a spi_device are handled in
4421  * FIFO order, messages may go to different devices in other orders.
4422  * Some device might be higher priority, or have various "hard" access
4423  * time requirements, for example.
4424  *
4425  * On detection of any fault during the transfer, processing of
4426  * the entire message is aborted, and the device is deselected.
4427  * Until returning from the associated message completion callback,
4428  * no other spi_message queued to that device will be processed.
4429  * (This rule applies equally to all the synchronous transfer calls,
4430  * which are wrappers around this core asynchronous primitive.)
4431  *
4432  * Return: zero on success, else a negative error code.
4433  */
spi_async(struct spi_device * spi,struct spi_message * message)4434 int spi_async(struct spi_device *spi, struct spi_message *message)
4435 {
4436 	struct spi_controller *ctlr = spi->controller;
4437 	int ret;
4438 	unsigned long flags;
4439 
4440 	ret = spi_maybe_optimize_message(spi, message);
4441 	if (ret)
4442 		return ret;
4443 
4444 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4445 
4446 	if (ctlr->bus_lock_flag)
4447 		ret = -EBUSY;
4448 	else
4449 		ret = __spi_async(spi, message);
4450 
4451 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4452 
4453 	return ret;
4454 }
4455 EXPORT_SYMBOL_GPL(spi_async);
4456 
__spi_transfer_message_noqueue(struct spi_controller * ctlr,struct spi_message * msg)4457 static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
4458 {
4459 	bool was_busy;
4460 	int ret;
4461 
4462 	mutex_lock(&ctlr->io_mutex);
4463 
4464 	was_busy = ctlr->busy;
4465 
4466 	ctlr->cur_msg = msg;
4467 	ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
4468 	if (ret)
4469 		dev_err(&ctlr->dev, "noqueue transfer failed\n");
4470 	ctlr->cur_msg = NULL;
4471 	ctlr->fallback = false;
4472 
4473 	if (!was_busy) {
4474 		kfree(ctlr->dummy_rx);
4475 		ctlr->dummy_rx = NULL;
4476 		kfree(ctlr->dummy_tx);
4477 		ctlr->dummy_tx = NULL;
4478 		if (ctlr->unprepare_transfer_hardware &&
4479 		    ctlr->unprepare_transfer_hardware(ctlr))
4480 			dev_err(&ctlr->dev,
4481 				"failed to unprepare transfer hardware\n");
4482 		spi_idle_runtime_pm(ctlr);
4483 	}
4484 
4485 	mutex_unlock(&ctlr->io_mutex);
4486 }
4487 
4488 /*-------------------------------------------------------------------------*/
4489 
4490 /*
4491  * Utility methods for SPI protocol drivers, layered on
4492  * top of the core.  Some other utility methods are defined as
4493  * inline functions.
4494  */
4495 
spi_complete(void * arg)4496 static void spi_complete(void *arg)
4497 {
4498 	complete(arg);
4499 }
4500 
__spi_sync(struct spi_device * spi,struct spi_message * message)4501 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
4502 {
4503 	DECLARE_COMPLETION_ONSTACK(done);
4504 	unsigned long flags;
4505 	int status;
4506 	struct spi_controller *ctlr = spi->controller;
4507 
4508 	if (__spi_check_suspended(ctlr)) {
4509 		dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
4510 		return -ESHUTDOWN;
4511 	}
4512 
4513 	status = spi_maybe_optimize_message(spi, message);
4514 	if (status)
4515 		return status;
4516 
4517 	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4518 	SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4519 
4520 	/*
4521 	 * Checking queue_empty here only guarantees async/sync message
4522 	 * ordering when coming from the same context. It does not need to
4523 	 * guard against reentrancy from a different context. The io_mutex
4524 	 * will catch those cases.
4525 	 */
4526 	if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4527 		message->actual_length = 0;
4528 		message->status = -EINPROGRESS;
4529 
4530 		trace_spi_message_submit(message);
4531 
4532 		SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4533 		SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4534 
4535 		__spi_transfer_message_noqueue(ctlr, message);
4536 
4537 		return message->status;
4538 	}
4539 
4540 	/*
4541 	 * There are messages in the async queue that could have originated
4542 	 * from the same context, so we need to preserve ordering.
4543 	 * Therefor we send the message to the async queue and wait until they
4544 	 * are completed.
4545 	 */
4546 	message->complete = spi_complete;
4547 	message->context = &done;
4548 
4549 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4550 	status = __spi_async(spi, message);
4551 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4552 
4553 	if (status == 0) {
4554 		wait_for_completion(&done);
4555 		status = message->status;
4556 	}
4557 	message->complete = NULL;
4558 	message->context = NULL;
4559 
4560 	return status;
4561 }
4562 
4563 /**
4564  * spi_sync - blocking/synchronous SPI data transfers
4565  * @spi: device with which data will be exchanged
4566  * @message: describes the data transfers
4567  * Context: can sleep
4568  *
4569  * This call may only be used from a context that may sleep.  The sleep
4570  * is non-interruptible, and has no timeout.  Low-overhead controller
4571  * drivers may DMA directly into and out of the message buffers.
4572  *
4573  * Note that the SPI device's chip select is active during the message,
4574  * and then is normally disabled between messages.  Drivers for some
4575  * frequently-used devices may want to minimize costs of selecting a chip,
4576  * by leaving it selected in anticipation that the next message will go
4577  * to the same chip.  (That may increase power usage.)
4578  *
4579  * Also, the caller is guaranteeing that the memory associated with the
4580  * message will not be freed before this call returns.
4581  *
4582  * Return: zero on success, else a negative error code.
4583  */
spi_sync(struct spi_device * spi,struct spi_message * message)4584 int spi_sync(struct spi_device *spi, struct spi_message *message)
4585 {
4586 	int ret;
4587 
4588 	mutex_lock(&spi->controller->bus_lock_mutex);
4589 	ret = __spi_sync(spi, message);
4590 	mutex_unlock(&spi->controller->bus_lock_mutex);
4591 
4592 	return ret;
4593 }
4594 EXPORT_SYMBOL_GPL(spi_sync);
4595 
4596 /**
4597  * spi_sync_locked - version of spi_sync with exclusive bus usage
4598  * @spi: device with which data will be exchanged
4599  * @message: describes the data transfers
4600  * Context: can sleep
4601  *
4602  * This call may only be used from a context that may sleep.  The sleep
4603  * is non-interruptible, and has no timeout.  Low-overhead controller
4604  * drivers may DMA directly into and out of the message buffers.
4605  *
4606  * This call should be used by drivers that require exclusive access to the
4607  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4608  * be released by a spi_bus_unlock call when the exclusive access is over.
4609  *
4610  * Return: zero on success, else a negative error code.
4611  */
spi_sync_locked(struct spi_device * spi,struct spi_message * message)4612 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4613 {
4614 	return __spi_sync(spi, message);
4615 }
4616 EXPORT_SYMBOL_GPL(spi_sync_locked);
4617 
4618 /**
4619  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4620  * @ctlr: SPI bus master that should be locked for exclusive bus access
4621  * Context: can sleep
4622  *
4623  * This call may only be used from a context that may sleep.  The sleep
4624  * is non-interruptible, and has no timeout.
4625  *
4626  * This call should be used by drivers that require exclusive access to the
4627  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4628  * exclusive access is over. Data transfer must be done by spi_sync_locked
4629  * and spi_async_locked calls when the SPI bus lock is held.
4630  *
4631  * Return: always zero.
4632  */
spi_bus_lock(struct spi_controller * ctlr)4633 int spi_bus_lock(struct spi_controller *ctlr)
4634 {
4635 	unsigned long flags;
4636 
4637 	mutex_lock(&ctlr->bus_lock_mutex);
4638 
4639 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4640 	ctlr->bus_lock_flag = 1;
4641 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4642 
4643 	/* Mutex remains locked until spi_bus_unlock() is called */
4644 
4645 	return 0;
4646 }
4647 EXPORT_SYMBOL_GPL(spi_bus_lock);
4648 
4649 /**
4650  * spi_bus_unlock - release the lock for exclusive SPI bus usage
4651  * @ctlr: SPI bus master that was locked for exclusive bus access
4652  * Context: can sleep
4653  *
4654  * This call may only be used from a context that may sleep.  The sleep
4655  * is non-interruptible, and has no timeout.
4656  *
4657  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4658  * call.
4659  *
4660  * Return: always zero.
4661  */
spi_bus_unlock(struct spi_controller * ctlr)4662 int spi_bus_unlock(struct spi_controller *ctlr)
4663 {
4664 	ctlr->bus_lock_flag = 0;
4665 
4666 	mutex_unlock(&ctlr->bus_lock_mutex);
4667 
4668 	return 0;
4669 }
4670 EXPORT_SYMBOL_GPL(spi_bus_unlock);
4671 
4672 /* Portable code must never pass more than 32 bytes */
4673 #define	SPI_BUFSIZ	max(32, SMP_CACHE_BYTES)
4674 
4675 static u8	*buf;
4676 
4677 /**
4678  * spi_write_then_read - SPI synchronous write followed by read
4679  * @spi: device with which data will be exchanged
4680  * @txbuf: data to be written (need not be DMA-safe)
4681  * @n_tx: size of txbuf, in bytes
4682  * @rxbuf: buffer into which data will be read (need not be DMA-safe)
4683  * @n_rx: size of rxbuf, in bytes
4684  * Context: can sleep
4685  *
4686  * This performs a half duplex MicroWire style transaction with the
4687  * device, sending txbuf and then reading rxbuf.  The return value
4688  * is zero for success, else a negative errno status code.
4689  * This call may only be used from a context that may sleep.
4690  *
4691  * Parameters to this routine are always copied using a small buffer.
4692  * Performance-sensitive or bulk transfer code should instead use
4693  * spi_{async,sync}() calls with DMA-safe buffers.
4694  *
4695  * Return: zero on success, else a negative error code.
4696  */
spi_write_then_read(struct spi_device * spi,const void * txbuf,unsigned n_tx,void * rxbuf,unsigned n_rx)4697 int spi_write_then_read(struct spi_device *spi,
4698 		const void *txbuf, unsigned n_tx,
4699 		void *rxbuf, unsigned n_rx)
4700 {
4701 	static DEFINE_MUTEX(lock);
4702 
4703 	int			status;
4704 	struct spi_message	message;
4705 	struct spi_transfer	x[2];
4706 	u8			*local_buf;
4707 
4708 	/*
4709 	 * Use preallocated DMA-safe buffer if we can. We can't avoid
4710 	 * copying here, (as a pure convenience thing), but we can
4711 	 * keep heap costs out of the hot path unless someone else is
4712 	 * using the pre-allocated buffer or the transfer is too large.
4713 	 */
4714 	if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4715 		local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4716 				    GFP_KERNEL | GFP_DMA);
4717 		if (!local_buf)
4718 			return -ENOMEM;
4719 	} else {
4720 		local_buf = buf;
4721 	}
4722 
4723 	spi_message_init(&message);
4724 	memset(x, 0, sizeof(x));
4725 	if (n_tx) {
4726 		x[0].len = n_tx;
4727 		spi_message_add_tail(&x[0], &message);
4728 	}
4729 	if (n_rx) {
4730 		x[1].len = n_rx;
4731 		spi_message_add_tail(&x[1], &message);
4732 	}
4733 
4734 	memcpy(local_buf, txbuf, n_tx);
4735 	x[0].tx_buf = local_buf;
4736 	x[1].rx_buf = local_buf + n_tx;
4737 
4738 	/* Do the I/O */
4739 	status = spi_sync(spi, &message);
4740 	if (status == 0)
4741 		memcpy(rxbuf, x[1].rx_buf, n_rx);
4742 
4743 	if (x[0].tx_buf == buf)
4744 		mutex_unlock(&lock);
4745 	else
4746 		kfree(local_buf);
4747 
4748 	return status;
4749 }
4750 EXPORT_SYMBOL_GPL(spi_write_then_read);
4751 
4752 /*-------------------------------------------------------------------------*/
4753 
4754 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
4755 /* Must call put_device() when done with returned spi_device device */
of_find_spi_device_by_node(struct device_node * node)4756 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4757 {
4758 	struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4759 
4760 	return dev ? to_spi_device(dev) : NULL;
4761 }
4762 
4763 /* The spi controllers are not using spi_bus, so we find it with another way */
of_find_spi_controller_by_node(struct device_node * node)4764 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4765 {
4766 	struct device *dev;
4767 
4768 	dev = class_find_device_by_of_node(&spi_master_class, node);
4769 	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4770 		dev = class_find_device_by_of_node(&spi_slave_class, node);
4771 	if (!dev)
4772 		return NULL;
4773 
4774 	/* Reference got in class_find_device */
4775 	return container_of(dev, struct spi_controller, dev);
4776 }
4777 
of_spi_notify(struct notifier_block * nb,unsigned long action,void * arg)4778 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4779 			 void *arg)
4780 {
4781 	struct of_reconfig_data *rd = arg;
4782 	struct spi_controller *ctlr;
4783 	struct spi_device *spi;
4784 
4785 	switch (of_reconfig_get_state_change(action, arg)) {
4786 	case OF_RECONFIG_CHANGE_ADD:
4787 		ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4788 		if (ctlr == NULL)
4789 			return NOTIFY_OK;	/* Not for us */
4790 
4791 		if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4792 			put_device(&ctlr->dev);
4793 			return NOTIFY_OK;
4794 		}
4795 
4796 		/*
4797 		 * Clear the flag before adding the device so that fw_devlink
4798 		 * doesn't skip adding consumers to this device.
4799 		 */
4800 		rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
4801 		spi = of_register_spi_device(ctlr, rd->dn);
4802 		put_device(&ctlr->dev);
4803 
4804 		if (IS_ERR(spi)) {
4805 			pr_err("%s: failed to create for '%pOF'\n",
4806 					__func__, rd->dn);
4807 			of_node_clear_flag(rd->dn, OF_POPULATED);
4808 			return notifier_from_errno(PTR_ERR(spi));
4809 		}
4810 		break;
4811 
4812 	case OF_RECONFIG_CHANGE_REMOVE:
4813 		/* Already depopulated? */
4814 		if (!of_node_check_flag(rd->dn, OF_POPULATED))
4815 			return NOTIFY_OK;
4816 
4817 		/* Find our device by node */
4818 		spi = of_find_spi_device_by_node(rd->dn);
4819 		if (spi == NULL)
4820 			return NOTIFY_OK;	/* No? not meant for us */
4821 
4822 		/* Unregister takes one ref away */
4823 		spi_unregister_device(spi);
4824 
4825 		/* And put the reference of the find */
4826 		put_device(&spi->dev);
4827 		break;
4828 	}
4829 
4830 	return NOTIFY_OK;
4831 }
4832 
4833 static struct notifier_block spi_of_notifier = {
4834 	.notifier_call = of_spi_notify,
4835 };
4836 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4837 extern struct notifier_block spi_of_notifier;
4838 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4839 
4840 #if IS_ENABLED(CONFIG_ACPI)
spi_acpi_controller_match(struct device * dev,const void * data)4841 static int spi_acpi_controller_match(struct device *dev, const void *data)
4842 {
4843 	return device_match_acpi_dev(dev->parent, data);
4844 }
4845 
acpi_spi_find_controller_by_adev(struct acpi_device * adev)4846 struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4847 {
4848 	struct device *dev;
4849 
4850 	dev = class_find_device(&spi_master_class, NULL, adev,
4851 				spi_acpi_controller_match);
4852 	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4853 		dev = class_find_device(&spi_slave_class, NULL, adev,
4854 					spi_acpi_controller_match);
4855 	if (!dev)
4856 		return NULL;
4857 
4858 	return container_of(dev, struct spi_controller, dev);
4859 }
4860 EXPORT_SYMBOL_GPL(acpi_spi_find_controller_by_adev);
4861 
acpi_spi_find_device_by_adev(struct acpi_device * adev)4862 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4863 {
4864 	struct device *dev;
4865 
4866 	dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4867 	return to_spi_device(dev);
4868 }
4869 
acpi_spi_notify(struct notifier_block * nb,unsigned long value,void * arg)4870 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4871 			   void *arg)
4872 {
4873 	struct acpi_device *adev = arg;
4874 	struct spi_controller *ctlr;
4875 	struct spi_device *spi;
4876 
4877 	switch (value) {
4878 	case ACPI_RECONFIG_DEVICE_ADD:
4879 		ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
4880 		if (!ctlr)
4881 			break;
4882 
4883 		acpi_register_spi_device(ctlr, adev);
4884 		put_device(&ctlr->dev);
4885 		break;
4886 	case ACPI_RECONFIG_DEVICE_REMOVE:
4887 		if (!acpi_device_enumerated(adev))
4888 			break;
4889 
4890 		spi = acpi_spi_find_device_by_adev(adev);
4891 		if (!spi)
4892 			break;
4893 
4894 		spi_unregister_device(spi);
4895 		put_device(&spi->dev);
4896 		break;
4897 	}
4898 
4899 	return NOTIFY_OK;
4900 }
4901 
4902 static struct notifier_block spi_acpi_notifier = {
4903 	.notifier_call = acpi_spi_notify,
4904 };
4905 #else
4906 extern struct notifier_block spi_acpi_notifier;
4907 #endif
4908 
spi_init(void)4909 static int __init spi_init(void)
4910 {
4911 	int	status;
4912 
4913 	buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4914 	if (!buf) {
4915 		status = -ENOMEM;
4916 		goto err0;
4917 	}
4918 
4919 	status = bus_register(&spi_bus_type);
4920 	if (status < 0)
4921 		goto err1;
4922 
4923 	status = class_register(&spi_master_class);
4924 	if (status < 0)
4925 		goto err2;
4926 
4927 	if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4928 		status = class_register(&spi_slave_class);
4929 		if (status < 0)
4930 			goto err3;
4931 	}
4932 
4933 	if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4934 		WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4935 	if (IS_ENABLED(CONFIG_ACPI))
4936 		WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4937 
4938 	return 0;
4939 
4940 err3:
4941 	class_unregister(&spi_master_class);
4942 err2:
4943 	bus_unregister(&spi_bus_type);
4944 err1:
4945 	kfree(buf);
4946 	buf = NULL;
4947 err0:
4948 	return status;
4949 }
4950 
4951 /*
4952  * A board_info is normally registered in arch_initcall(),
4953  * but even essential drivers wait till later.
4954  *
4955  * REVISIT only boardinfo really needs static linking. The rest (device and
4956  * driver registration) _could_ be dynamically linked (modular) ... Costs
4957  * include needing to have boardinfo data structures be much more public.
4958  */
4959 postcore_initcall(spi_init);
4960