xref: /linux/drivers/spi/spi.c (revision b58b13f156c00c2457035b7071eaaac105fe6836)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
3 //
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
6 
7 #include <linux/acpi.h>
8 #include <linux/cache.h>
9 #include <linux/clk/clk-conf.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/dmaengine.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/export.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/highmem.h>
17 #include <linux/idr.h>
18 #include <linux/init.h>
19 #include <linux/ioport.h>
20 #include <linux/kernel.h>
21 #include <linux/kthread.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/mutex.h>
24 #include <linux/of_device.h>
25 #include <linux/of_irq.h>
26 #include <linux/percpu.h>
27 #include <linux/platform_data/x86/apple.h>
28 #include <linux/pm_domain.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/property.h>
31 #include <linux/ptp_clock_kernel.h>
32 #include <linux/sched/rt.h>
33 #include <linux/slab.h>
34 #include <linux/spi/spi.h>
35 #include <linux/spi/spi-mem.h>
36 #include <uapi/linux/sched/types.h>
37 
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/spi.h>
40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
41 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
42 
43 #include "internals.h"
44 
45 static DEFINE_IDR(spi_master_idr);
46 
47 static void spidev_release(struct device *dev)
48 {
49 	struct spi_device	*spi = to_spi_device(dev);
50 
51 	spi_controller_put(spi->controller);
52 	kfree(spi->driver_override);
53 	free_percpu(spi->pcpu_statistics);
54 	kfree(spi);
55 }
56 
57 static ssize_t
58 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
59 {
60 	const struct spi_device	*spi = to_spi_device(dev);
61 	int len;
62 
63 	len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
64 	if (len != -ENODEV)
65 		return len;
66 
67 	return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
68 }
69 static DEVICE_ATTR_RO(modalias);
70 
71 static ssize_t driver_override_store(struct device *dev,
72 				     struct device_attribute *a,
73 				     const char *buf, size_t count)
74 {
75 	struct spi_device *spi = to_spi_device(dev);
76 	int ret;
77 
78 	ret = driver_set_override(dev, &spi->driver_override, buf, count);
79 	if (ret)
80 		return ret;
81 
82 	return count;
83 }
84 
85 static ssize_t driver_override_show(struct device *dev,
86 				    struct device_attribute *a, char *buf)
87 {
88 	const struct spi_device *spi = to_spi_device(dev);
89 	ssize_t len;
90 
91 	device_lock(dev);
92 	len = sysfs_emit(buf, "%s\n", spi->driver_override ? : "");
93 	device_unlock(dev);
94 	return len;
95 }
96 static DEVICE_ATTR_RW(driver_override);
97 
98 static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
99 {
100 	struct spi_statistics __percpu *pcpu_stats;
101 
102 	if (dev)
103 		pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
104 	else
105 		pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
106 
107 	if (pcpu_stats) {
108 		int cpu;
109 
110 		for_each_possible_cpu(cpu) {
111 			struct spi_statistics *stat;
112 
113 			stat = per_cpu_ptr(pcpu_stats, cpu);
114 			u64_stats_init(&stat->syncp);
115 		}
116 	}
117 	return pcpu_stats;
118 }
119 
120 static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat,
121 				   char *buf, size_t offset)
122 {
123 	u64 val = 0;
124 	int i;
125 
126 	for_each_possible_cpu(i) {
127 		const struct spi_statistics *pcpu_stats;
128 		u64_stats_t *field;
129 		unsigned int start;
130 		u64 inc;
131 
132 		pcpu_stats = per_cpu_ptr(stat, i);
133 		field = (void *)pcpu_stats + offset;
134 		do {
135 			start = u64_stats_fetch_begin(&pcpu_stats->syncp);
136 			inc = u64_stats_read(field);
137 		} while (u64_stats_fetch_retry(&pcpu_stats->syncp, start));
138 		val += inc;
139 	}
140 	return sysfs_emit(buf, "%llu\n", val);
141 }
142 
143 #define SPI_STATISTICS_ATTRS(field, file)				\
144 static ssize_t spi_controller_##field##_show(struct device *dev,	\
145 					     struct device_attribute *attr, \
146 					     char *buf)			\
147 {									\
148 	struct spi_controller *ctlr = container_of(dev,			\
149 					 struct spi_controller, dev);	\
150 	return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
151 }									\
152 static struct device_attribute dev_attr_spi_controller_##field = {	\
153 	.attr = { .name = file, .mode = 0444 },				\
154 	.show = spi_controller_##field##_show,				\
155 };									\
156 static ssize_t spi_device_##field##_show(struct device *dev,		\
157 					 struct device_attribute *attr,	\
158 					char *buf)			\
159 {									\
160 	struct spi_device *spi = to_spi_device(dev);			\
161 	return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
162 }									\
163 static struct device_attribute dev_attr_spi_device_##field = {		\
164 	.attr = { .name = file, .mode = 0444 },				\
165 	.show = spi_device_##field##_show,				\
166 }
167 
168 #define SPI_STATISTICS_SHOW_NAME(name, file, field)			\
169 static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
170 					    char *buf)			\
171 {									\
172 	return spi_emit_pcpu_stats(stat, buf,				\
173 			offsetof(struct spi_statistics, field));	\
174 }									\
175 SPI_STATISTICS_ATTRS(name, file)
176 
177 #define SPI_STATISTICS_SHOW(field)					\
178 	SPI_STATISTICS_SHOW_NAME(field, __stringify(field),		\
179 				 field)
180 
181 SPI_STATISTICS_SHOW(messages);
182 SPI_STATISTICS_SHOW(transfers);
183 SPI_STATISTICS_SHOW(errors);
184 SPI_STATISTICS_SHOW(timedout);
185 
186 SPI_STATISTICS_SHOW(spi_sync);
187 SPI_STATISTICS_SHOW(spi_sync_immediate);
188 SPI_STATISTICS_SHOW(spi_async);
189 
190 SPI_STATISTICS_SHOW(bytes);
191 SPI_STATISTICS_SHOW(bytes_rx);
192 SPI_STATISTICS_SHOW(bytes_tx);
193 
194 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)		\
195 	SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,		\
196 				 "transfer_bytes_histo_" number,	\
197 				 transfer_bytes_histo[index])
198 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
199 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
200 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
201 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
202 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
203 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
204 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
205 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
206 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
207 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
208 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
209 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
210 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
211 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
212 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
213 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
214 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
215 
216 SPI_STATISTICS_SHOW(transfers_split_maxsize);
217 
218 static struct attribute *spi_dev_attrs[] = {
219 	&dev_attr_modalias.attr,
220 	&dev_attr_driver_override.attr,
221 	NULL,
222 };
223 
224 static const struct attribute_group spi_dev_group = {
225 	.attrs  = spi_dev_attrs,
226 };
227 
228 static struct attribute *spi_device_statistics_attrs[] = {
229 	&dev_attr_spi_device_messages.attr,
230 	&dev_attr_spi_device_transfers.attr,
231 	&dev_attr_spi_device_errors.attr,
232 	&dev_attr_spi_device_timedout.attr,
233 	&dev_attr_spi_device_spi_sync.attr,
234 	&dev_attr_spi_device_spi_sync_immediate.attr,
235 	&dev_attr_spi_device_spi_async.attr,
236 	&dev_attr_spi_device_bytes.attr,
237 	&dev_attr_spi_device_bytes_rx.attr,
238 	&dev_attr_spi_device_bytes_tx.attr,
239 	&dev_attr_spi_device_transfer_bytes_histo0.attr,
240 	&dev_attr_spi_device_transfer_bytes_histo1.attr,
241 	&dev_attr_spi_device_transfer_bytes_histo2.attr,
242 	&dev_attr_spi_device_transfer_bytes_histo3.attr,
243 	&dev_attr_spi_device_transfer_bytes_histo4.attr,
244 	&dev_attr_spi_device_transfer_bytes_histo5.attr,
245 	&dev_attr_spi_device_transfer_bytes_histo6.attr,
246 	&dev_attr_spi_device_transfer_bytes_histo7.attr,
247 	&dev_attr_spi_device_transfer_bytes_histo8.attr,
248 	&dev_attr_spi_device_transfer_bytes_histo9.attr,
249 	&dev_attr_spi_device_transfer_bytes_histo10.attr,
250 	&dev_attr_spi_device_transfer_bytes_histo11.attr,
251 	&dev_attr_spi_device_transfer_bytes_histo12.attr,
252 	&dev_attr_spi_device_transfer_bytes_histo13.attr,
253 	&dev_attr_spi_device_transfer_bytes_histo14.attr,
254 	&dev_attr_spi_device_transfer_bytes_histo15.attr,
255 	&dev_attr_spi_device_transfer_bytes_histo16.attr,
256 	&dev_attr_spi_device_transfers_split_maxsize.attr,
257 	NULL,
258 };
259 
260 static const struct attribute_group spi_device_statistics_group = {
261 	.name  = "statistics",
262 	.attrs  = spi_device_statistics_attrs,
263 };
264 
265 static const struct attribute_group *spi_dev_groups[] = {
266 	&spi_dev_group,
267 	&spi_device_statistics_group,
268 	NULL,
269 };
270 
271 static struct attribute *spi_controller_statistics_attrs[] = {
272 	&dev_attr_spi_controller_messages.attr,
273 	&dev_attr_spi_controller_transfers.attr,
274 	&dev_attr_spi_controller_errors.attr,
275 	&dev_attr_spi_controller_timedout.attr,
276 	&dev_attr_spi_controller_spi_sync.attr,
277 	&dev_attr_spi_controller_spi_sync_immediate.attr,
278 	&dev_attr_spi_controller_spi_async.attr,
279 	&dev_attr_spi_controller_bytes.attr,
280 	&dev_attr_spi_controller_bytes_rx.attr,
281 	&dev_attr_spi_controller_bytes_tx.attr,
282 	&dev_attr_spi_controller_transfer_bytes_histo0.attr,
283 	&dev_attr_spi_controller_transfer_bytes_histo1.attr,
284 	&dev_attr_spi_controller_transfer_bytes_histo2.attr,
285 	&dev_attr_spi_controller_transfer_bytes_histo3.attr,
286 	&dev_attr_spi_controller_transfer_bytes_histo4.attr,
287 	&dev_attr_spi_controller_transfer_bytes_histo5.attr,
288 	&dev_attr_spi_controller_transfer_bytes_histo6.attr,
289 	&dev_attr_spi_controller_transfer_bytes_histo7.attr,
290 	&dev_attr_spi_controller_transfer_bytes_histo8.attr,
291 	&dev_attr_spi_controller_transfer_bytes_histo9.attr,
292 	&dev_attr_spi_controller_transfer_bytes_histo10.attr,
293 	&dev_attr_spi_controller_transfer_bytes_histo11.attr,
294 	&dev_attr_spi_controller_transfer_bytes_histo12.attr,
295 	&dev_attr_spi_controller_transfer_bytes_histo13.attr,
296 	&dev_attr_spi_controller_transfer_bytes_histo14.attr,
297 	&dev_attr_spi_controller_transfer_bytes_histo15.attr,
298 	&dev_attr_spi_controller_transfer_bytes_histo16.attr,
299 	&dev_attr_spi_controller_transfers_split_maxsize.attr,
300 	NULL,
301 };
302 
303 static const struct attribute_group spi_controller_statistics_group = {
304 	.name  = "statistics",
305 	.attrs  = spi_controller_statistics_attrs,
306 };
307 
308 static const struct attribute_group *spi_master_groups[] = {
309 	&spi_controller_statistics_group,
310 	NULL,
311 };
312 
313 static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
314 					      struct spi_transfer *xfer,
315 					      struct spi_controller *ctlr)
316 {
317 	int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
318 	struct spi_statistics *stats;
319 
320 	if (l2len < 0)
321 		l2len = 0;
322 
323 	get_cpu();
324 	stats = this_cpu_ptr(pcpu_stats);
325 	u64_stats_update_begin(&stats->syncp);
326 
327 	u64_stats_inc(&stats->transfers);
328 	u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
329 
330 	u64_stats_add(&stats->bytes, xfer->len);
331 	if ((xfer->tx_buf) &&
332 	    (xfer->tx_buf != ctlr->dummy_tx))
333 		u64_stats_add(&stats->bytes_tx, xfer->len);
334 	if ((xfer->rx_buf) &&
335 	    (xfer->rx_buf != ctlr->dummy_rx))
336 		u64_stats_add(&stats->bytes_rx, xfer->len);
337 
338 	u64_stats_update_end(&stats->syncp);
339 	put_cpu();
340 }
341 
342 /*
343  * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
344  * and the sysfs version makes coldplug work too.
345  */
346 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
347 {
348 	while (id->name[0]) {
349 		if (!strcmp(name, id->name))
350 			return id;
351 		id++;
352 	}
353 	return NULL;
354 }
355 
356 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
357 {
358 	const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
359 
360 	return spi_match_id(sdrv->id_table, sdev->modalias);
361 }
362 EXPORT_SYMBOL_GPL(spi_get_device_id);
363 
364 const void *spi_get_device_match_data(const struct spi_device *sdev)
365 {
366 	const void *match;
367 
368 	match = device_get_match_data(&sdev->dev);
369 	if (match)
370 		return match;
371 
372 	return (const void *)spi_get_device_id(sdev)->driver_data;
373 }
374 EXPORT_SYMBOL_GPL(spi_get_device_match_data);
375 
376 static int spi_match_device(struct device *dev, struct device_driver *drv)
377 {
378 	const struct spi_device	*spi = to_spi_device(dev);
379 	const struct spi_driver	*sdrv = to_spi_driver(drv);
380 
381 	/* Check override first, and if set, only use the named driver */
382 	if (spi->driver_override)
383 		return strcmp(spi->driver_override, drv->name) == 0;
384 
385 	/* Attempt an OF style match */
386 	if (of_driver_match_device(dev, drv))
387 		return 1;
388 
389 	/* Then try ACPI */
390 	if (acpi_driver_match_device(dev, drv))
391 		return 1;
392 
393 	if (sdrv->id_table)
394 		return !!spi_match_id(sdrv->id_table, spi->modalias);
395 
396 	return strcmp(spi->modalias, drv->name) == 0;
397 }
398 
399 static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env)
400 {
401 	const struct spi_device		*spi = to_spi_device(dev);
402 	int rc;
403 
404 	rc = acpi_device_uevent_modalias(dev, env);
405 	if (rc != -ENODEV)
406 		return rc;
407 
408 	return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
409 }
410 
411 static int spi_probe(struct device *dev)
412 {
413 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
414 	struct spi_device		*spi = to_spi_device(dev);
415 	int ret;
416 
417 	ret = of_clk_set_defaults(dev->of_node, false);
418 	if (ret)
419 		return ret;
420 
421 	if (dev->of_node) {
422 		spi->irq = of_irq_get(dev->of_node, 0);
423 		if (spi->irq == -EPROBE_DEFER)
424 			return -EPROBE_DEFER;
425 		if (spi->irq < 0)
426 			spi->irq = 0;
427 	}
428 
429 	ret = dev_pm_domain_attach(dev, true);
430 	if (ret)
431 		return ret;
432 
433 	if (sdrv->probe) {
434 		ret = sdrv->probe(spi);
435 		if (ret)
436 			dev_pm_domain_detach(dev, true);
437 	}
438 
439 	return ret;
440 }
441 
442 static void spi_remove(struct device *dev)
443 {
444 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
445 
446 	if (sdrv->remove)
447 		sdrv->remove(to_spi_device(dev));
448 
449 	dev_pm_domain_detach(dev, true);
450 }
451 
452 static void spi_shutdown(struct device *dev)
453 {
454 	if (dev->driver) {
455 		const struct spi_driver	*sdrv = to_spi_driver(dev->driver);
456 
457 		if (sdrv->shutdown)
458 			sdrv->shutdown(to_spi_device(dev));
459 	}
460 }
461 
462 struct bus_type spi_bus_type = {
463 	.name		= "spi",
464 	.dev_groups	= spi_dev_groups,
465 	.match		= spi_match_device,
466 	.uevent		= spi_uevent,
467 	.probe		= spi_probe,
468 	.remove		= spi_remove,
469 	.shutdown	= spi_shutdown,
470 };
471 EXPORT_SYMBOL_GPL(spi_bus_type);
472 
473 /**
474  * __spi_register_driver - register a SPI driver
475  * @owner: owner module of the driver to register
476  * @sdrv: the driver to register
477  * Context: can sleep
478  *
479  * Return: zero on success, else a negative error code.
480  */
481 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
482 {
483 	sdrv->driver.owner = owner;
484 	sdrv->driver.bus = &spi_bus_type;
485 
486 	/*
487 	 * For Really Good Reasons we use spi: modaliases not of:
488 	 * modaliases for DT so module autoloading won't work if we
489 	 * don't have a spi_device_id as well as a compatible string.
490 	 */
491 	if (sdrv->driver.of_match_table) {
492 		const struct of_device_id *of_id;
493 
494 		for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
495 		     of_id++) {
496 			const char *of_name;
497 
498 			/* Strip off any vendor prefix */
499 			of_name = strnchr(of_id->compatible,
500 					  sizeof(of_id->compatible), ',');
501 			if (of_name)
502 				of_name++;
503 			else
504 				of_name = of_id->compatible;
505 
506 			if (sdrv->id_table) {
507 				const struct spi_device_id *spi_id;
508 
509 				spi_id = spi_match_id(sdrv->id_table, of_name);
510 				if (spi_id)
511 					continue;
512 			} else {
513 				if (strcmp(sdrv->driver.name, of_name) == 0)
514 					continue;
515 			}
516 
517 			pr_warn("SPI driver %s has no spi_device_id for %s\n",
518 				sdrv->driver.name, of_id->compatible);
519 		}
520 	}
521 
522 	return driver_register(&sdrv->driver);
523 }
524 EXPORT_SYMBOL_GPL(__spi_register_driver);
525 
526 /*-------------------------------------------------------------------------*/
527 
528 /*
529  * SPI devices should normally not be created by SPI device drivers; that
530  * would make them board-specific.  Similarly with SPI controller drivers.
531  * Device registration normally goes into like arch/.../mach.../board-YYY.c
532  * with other readonly (flashable) information about mainboard devices.
533  */
534 
535 struct boardinfo {
536 	struct list_head	list;
537 	struct spi_board_info	board_info;
538 };
539 
540 static LIST_HEAD(board_list);
541 static LIST_HEAD(spi_controller_list);
542 
543 /*
544  * Used to protect add/del operation for board_info list and
545  * spi_controller list, and their matching process also used
546  * to protect object of type struct idr.
547  */
548 static DEFINE_MUTEX(board_lock);
549 
550 /**
551  * spi_alloc_device - Allocate a new SPI device
552  * @ctlr: Controller to which device is connected
553  * Context: can sleep
554  *
555  * Allows a driver to allocate and initialize a spi_device without
556  * registering it immediately.  This allows a driver to directly
557  * fill the spi_device with device parameters before calling
558  * spi_add_device() on it.
559  *
560  * Caller is responsible to call spi_add_device() on the returned
561  * spi_device structure to add it to the SPI controller.  If the caller
562  * needs to discard the spi_device without adding it, then it should
563  * call spi_dev_put() on it.
564  *
565  * Return: a pointer to the new device, or NULL.
566  */
567 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
568 {
569 	struct spi_device	*spi;
570 
571 	if (!spi_controller_get(ctlr))
572 		return NULL;
573 
574 	spi = kzalloc(sizeof(*spi), GFP_KERNEL);
575 	if (!spi) {
576 		spi_controller_put(ctlr);
577 		return NULL;
578 	}
579 
580 	spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
581 	if (!spi->pcpu_statistics) {
582 		kfree(spi);
583 		spi_controller_put(ctlr);
584 		return NULL;
585 	}
586 
587 	spi->master = spi->controller = ctlr;
588 	spi->dev.parent = &ctlr->dev;
589 	spi->dev.bus = &spi_bus_type;
590 	spi->dev.release = spidev_release;
591 	spi->mode = ctlr->buswidth_override_bits;
592 
593 	device_initialize(&spi->dev);
594 	return spi;
595 }
596 EXPORT_SYMBOL_GPL(spi_alloc_device);
597 
598 static void spi_dev_set_name(struct spi_device *spi)
599 {
600 	struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
601 
602 	if (adev) {
603 		dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
604 		return;
605 	}
606 
607 	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
608 		     spi_get_chipselect(spi, 0));
609 }
610 
611 static int spi_dev_check(struct device *dev, void *data)
612 {
613 	struct spi_device *spi = to_spi_device(dev);
614 	struct spi_device *new_spi = data;
615 	int idx, nw_idx;
616 	u8 cs, cs_nw;
617 
618 	if (spi->controller == new_spi->controller) {
619 		for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
620 			cs = spi_get_chipselect(spi, idx);
621 			for (nw_idx = 0; nw_idx < SPI_CS_CNT_MAX; nw_idx++) {
622 				cs_nw = spi_get_chipselect(new_spi, nw_idx);
623 				if (cs != 0xFF && cs_nw != 0xFF && cs == cs_nw) {
624 					dev_err(dev, "chipselect %d already in use\n", cs_nw);
625 					return -EBUSY;
626 				}
627 			}
628 		}
629 	}
630 	return 0;
631 }
632 
633 static void spi_cleanup(struct spi_device *spi)
634 {
635 	if (spi->controller->cleanup)
636 		spi->controller->cleanup(spi);
637 }
638 
639 static int __spi_add_device(struct spi_device *spi)
640 {
641 	struct spi_controller *ctlr = spi->controller;
642 	struct device *dev = ctlr->dev.parent;
643 	int status, idx, nw_idx;
644 	u8 cs, nw_cs;
645 
646 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
647 		/* Chipselects are numbered 0..max; validate. */
648 		cs = spi_get_chipselect(spi, idx);
649 		if (cs != 0xFF && cs >= ctlr->num_chipselect) {
650 			dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx),
651 				ctlr->num_chipselect);
652 			return -EINVAL;
653 		}
654 	}
655 
656 	/*
657 	 * Make sure that multiple logical CS doesn't map to the same physical CS.
658 	 * For example, spi->chip_select[0] != spi->chip_select[1] and so on.
659 	 */
660 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
661 		cs = spi_get_chipselect(spi, idx);
662 		for (nw_idx = idx + 1; nw_idx < SPI_CS_CNT_MAX; nw_idx++) {
663 			nw_cs = spi_get_chipselect(spi, nw_idx);
664 			if (cs != 0xFF && nw_cs != 0xFF && cs == nw_cs) {
665 				dev_err(dev, "chipselect %d already in use\n", nw_cs);
666 				return -EBUSY;
667 			}
668 		}
669 	}
670 
671 	/* Set the bus ID string */
672 	spi_dev_set_name(spi);
673 
674 	/*
675 	 * We need to make sure there's no other device with this
676 	 * chipselect **BEFORE** we call setup(), else we'll trash
677 	 * its configuration.
678 	 */
679 	status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
680 	if (status)
681 		return status;
682 
683 	/* Controller may unregister concurrently */
684 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
685 	    !device_is_registered(&ctlr->dev)) {
686 		return -ENODEV;
687 	}
688 
689 	if (ctlr->cs_gpiods) {
690 		u8 cs;
691 
692 		for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
693 			cs = spi_get_chipselect(spi, idx);
694 			if (cs != 0xFF)
695 				spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
696 		}
697 	}
698 
699 	/*
700 	 * Drivers may modify this initial i/o setup, but will
701 	 * normally rely on the device being setup.  Devices
702 	 * using SPI_CS_HIGH can't coexist well otherwise...
703 	 */
704 	status = spi_setup(spi);
705 	if (status < 0) {
706 		dev_err(dev, "can't setup %s, status %d\n",
707 				dev_name(&spi->dev), status);
708 		return status;
709 	}
710 
711 	/* Device may be bound to an active driver when this returns */
712 	status = device_add(&spi->dev);
713 	if (status < 0) {
714 		dev_err(dev, "can't add %s, status %d\n",
715 				dev_name(&spi->dev), status);
716 		spi_cleanup(spi);
717 	} else {
718 		dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
719 	}
720 
721 	return status;
722 }
723 
724 /**
725  * spi_add_device - Add spi_device allocated with spi_alloc_device
726  * @spi: spi_device to register
727  *
728  * Companion function to spi_alloc_device.  Devices allocated with
729  * spi_alloc_device can be added onto the SPI bus with this function.
730  *
731  * Return: 0 on success; negative errno on failure
732  */
733 int spi_add_device(struct spi_device *spi)
734 {
735 	struct spi_controller *ctlr = spi->controller;
736 	int status;
737 
738 	/* Set the bus ID string */
739 	spi_dev_set_name(spi);
740 
741 	mutex_lock(&ctlr->add_lock);
742 	status = __spi_add_device(spi);
743 	mutex_unlock(&ctlr->add_lock);
744 	return status;
745 }
746 EXPORT_SYMBOL_GPL(spi_add_device);
747 
748 /**
749  * spi_new_device - instantiate one new SPI device
750  * @ctlr: Controller to which device is connected
751  * @chip: Describes the SPI device
752  * Context: can sleep
753  *
754  * On typical mainboards, this is purely internal; and it's not needed
755  * after board init creates the hard-wired devices.  Some development
756  * platforms may not be able to use spi_register_board_info though, and
757  * this is exported so that for example a USB or parport based adapter
758  * driver could add devices (which it would learn about out-of-band).
759  *
760  * Return: the new device, or NULL.
761  */
762 struct spi_device *spi_new_device(struct spi_controller *ctlr,
763 				  struct spi_board_info *chip)
764 {
765 	struct spi_device	*proxy;
766 	int			status;
767 	u8                      idx;
768 
769 	/*
770 	 * NOTE:  caller did any chip->bus_num checks necessary.
771 	 *
772 	 * Also, unless we change the return value convention to use
773 	 * error-or-pointer (not NULL-or-pointer), troubleshootability
774 	 * suggests syslogged diagnostics are best here (ugh).
775 	 */
776 
777 	proxy = spi_alloc_device(ctlr);
778 	if (!proxy)
779 		return NULL;
780 
781 	WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
782 
783 	/*
784 	 * Zero(0) is a valid physical CS value and can be located at any
785 	 * logical CS in the spi->chip_select[]. If all the physical CS
786 	 * are initialized to 0 then It would be difficult to differentiate
787 	 * between a valid physical CS 0 & an unused logical CS whose physical
788 	 * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
789 	 * Now all the unused logical CS will have 0xFF physical CS value & can be
790 	 * ignore while performing physical CS validity checks.
791 	 */
792 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
793 		spi_set_chipselect(proxy, idx, 0xFF);
794 
795 	spi_set_chipselect(proxy, 0, chip->chip_select);
796 	proxy->max_speed_hz = chip->max_speed_hz;
797 	proxy->mode = chip->mode;
798 	proxy->irq = chip->irq;
799 	strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
800 	proxy->dev.platform_data = (void *) chip->platform_data;
801 	proxy->controller_data = chip->controller_data;
802 	proxy->controller_state = NULL;
803 	/*
804 	 * spi->chip_select[i] gives the corresponding physical CS for logical CS i
805 	 * logical CS number is represented by setting the ith bit in spi->cs_index_mask
806 	 * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
807 	 * spi->chip_select[0] will give the physical CS.
808 	 * By default spi->chip_select[0] will hold the physical CS number so, set
809 	 * spi->cs_index_mask as 0x01.
810 	 */
811 	proxy->cs_index_mask = 0x01;
812 
813 	if (chip->swnode) {
814 		status = device_add_software_node(&proxy->dev, chip->swnode);
815 		if (status) {
816 			dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
817 				chip->modalias, status);
818 			goto err_dev_put;
819 		}
820 	}
821 
822 	status = spi_add_device(proxy);
823 	if (status < 0)
824 		goto err_dev_put;
825 
826 	return proxy;
827 
828 err_dev_put:
829 	device_remove_software_node(&proxy->dev);
830 	spi_dev_put(proxy);
831 	return NULL;
832 }
833 EXPORT_SYMBOL_GPL(spi_new_device);
834 
835 /**
836  * spi_unregister_device - unregister a single SPI device
837  * @spi: spi_device to unregister
838  *
839  * Start making the passed SPI device vanish. Normally this would be handled
840  * by spi_unregister_controller().
841  */
842 void spi_unregister_device(struct spi_device *spi)
843 {
844 	if (!spi)
845 		return;
846 
847 	if (spi->dev.of_node) {
848 		of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
849 		of_node_put(spi->dev.of_node);
850 	}
851 	if (ACPI_COMPANION(&spi->dev))
852 		acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
853 	device_remove_software_node(&spi->dev);
854 	device_del(&spi->dev);
855 	spi_cleanup(spi);
856 	put_device(&spi->dev);
857 }
858 EXPORT_SYMBOL_GPL(spi_unregister_device);
859 
860 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
861 					      struct spi_board_info *bi)
862 {
863 	struct spi_device *dev;
864 
865 	if (ctlr->bus_num != bi->bus_num)
866 		return;
867 
868 	dev = spi_new_device(ctlr, bi);
869 	if (!dev)
870 		dev_err(ctlr->dev.parent, "can't create new device for %s\n",
871 			bi->modalias);
872 }
873 
874 /**
875  * spi_register_board_info - register SPI devices for a given board
876  * @info: array of chip descriptors
877  * @n: how many descriptors are provided
878  * Context: can sleep
879  *
880  * Board-specific early init code calls this (probably during arch_initcall)
881  * with segments of the SPI device table.  Any device nodes are created later,
882  * after the relevant parent SPI controller (bus_num) is defined.  We keep
883  * this table of devices forever, so that reloading a controller driver will
884  * not make Linux forget about these hard-wired devices.
885  *
886  * Other code can also call this, e.g. a particular add-on board might provide
887  * SPI devices through its expansion connector, so code initializing that board
888  * would naturally declare its SPI devices.
889  *
890  * The board info passed can safely be __initdata ... but be careful of
891  * any embedded pointers (platform_data, etc), they're copied as-is.
892  *
893  * Return: zero on success, else a negative error code.
894  */
895 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
896 {
897 	struct boardinfo *bi;
898 	int i;
899 
900 	if (!n)
901 		return 0;
902 
903 	bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
904 	if (!bi)
905 		return -ENOMEM;
906 
907 	for (i = 0; i < n; i++, bi++, info++) {
908 		struct spi_controller *ctlr;
909 
910 		memcpy(&bi->board_info, info, sizeof(*info));
911 
912 		mutex_lock(&board_lock);
913 		list_add_tail(&bi->list, &board_list);
914 		list_for_each_entry(ctlr, &spi_controller_list, list)
915 			spi_match_controller_to_boardinfo(ctlr,
916 							  &bi->board_info);
917 		mutex_unlock(&board_lock);
918 	}
919 
920 	return 0;
921 }
922 
923 /*-------------------------------------------------------------------------*/
924 
925 /* Core methods for SPI resource management */
926 
927 /**
928  * spi_res_alloc - allocate a spi resource that is life-cycle managed
929  *                 during the processing of a spi_message while using
930  *                 spi_transfer_one
931  * @spi:     the SPI device for which we allocate memory
932  * @release: the release code to execute for this resource
933  * @size:    size to alloc and return
934  * @gfp:     GFP allocation flags
935  *
936  * Return: the pointer to the allocated data
937  *
938  * This may get enhanced in the future to allocate from a memory pool
939  * of the @spi_device or @spi_controller to avoid repeated allocations.
940  */
941 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
942 			   size_t size, gfp_t gfp)
943 {
944 	struct spi_res *sres;
945 
946 	sres = kzalloc(sizeof(*sres) + size, gfp);
947 	if (!sres)
948 		return NULL;
949 
950 	INIT_LIST_HEAD(&sres->entry);
951 	sres->release = release;
952 
953 	return sres->data;
954 }
955 
956 /**
957  * spi_res_free - free an SPI resource
958  * @res: pointer to the custom data of a resource
959  */
960 static void spi_res_free(void *res)
961 {
962 	struct spi_res *sres = container_of(res, struct spi_res, data);
963 
964 	if (!res)
965 		return;
966 
967 	WARN_ON(!list_empty(&sres->entry));
968 	kfree(sres);
969 }
970 
971 /**
972  * spi_res_add - add a spi_res to the spi_message
973  * @message: the SPI message
974  * @res:     the spi_resource
975  */
976 static void spi_res_add(struct spi_message *message, void *res)
977 {
978 	struct spi_res *sres = container_of(res, struct spi_res, data);
979 
980 	WARN_ON(!list_empty(&sres->entry));
981 	list_add_tail(&sres->entry, &message->resources);
982 }
983 
984 /**
985  * spi_res_release - release all SPI resources for this message
986  * @ctlr:  the @spi_controller
987  * @message: the @spi_message
988  */
989 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
990 {
991 	struct spi_res *res, *tmp;
992 
993 	list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
994 		if (res->release)
995 			res->release(ctlr, message, res->data);
996 
997 		list_del(&res->entry);
998 
999 		kfree(res);
1000 	}
1001 }
1002 
1003 /*-------------------------------------------------------------------------*/
1004 static inline bool spi_is_last_cs(struct spi_device *spi)
1005 {
1006 	u8 idx;
1007 	bool last = false;
1008 
1009 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
1010 		if ((spi->cs_index_mask >> idx) & 0x01) {
1011 			if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx))
1012 				last = true;
1013 		}
1014 	}
1015 	return last;
1016 }
1017 
1018 
1019 static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
1020 {
1021 	bool activate = enable;
1022 	u8 idx;
1023 
1024 	/*
1025 	 * Avoid calling into the driver (or doing delays) if the chip select
1026 	 * isn't actually changing from the last time this was called.
1027 	 */
1028 	if (!force && ((enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
1029 			spi_is_last_cs(spi)) ||
1030 		       (!enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
1031 			!spi_is_last_cs(spi))) &&
1032 	    (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
1033 		return;
1034 
1035 	trace_spi_set_cs(spi, activate);
1036 
1037 	spi->controller->last_cs_index_mask = spi->cs_index_mask;
1038 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
1039 		spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : -1;
1040 	spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
1041 
1042 	if (spi->mode & SPI_CS_HIGH)
1043 		enable = !enable;
1044 
1045 	if (spi_is_csgpiod(spi)) {
1046 		if (!spi->controller->set_cs_timing && !activate)
1047 			spi_delay_exec(&spi->cs_hold, NULL);
1048 
1049 		if (!(spi->mode & SPI_NO_CS)) {
1050 			/*
1051 			 * Historically ACPI has no means of the GPIO polarity and
1052 			 * thus the SPISerialBus() resource defines it on the per-chip
1053 			 * basis. In order to avoid a chain of negations, the GPIO
1054 			 * polarity is considered being Active High. Even for the cases
1055 			 * when _DSD() is involved (in the updated versions of ACPI)
1056 			 * the GPIO CS polarity must be defined Active High to avoid
1057 			 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
1058 			 * into account.
1059 			 */
1060 			for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
1061 				if (((spi->cs_index_mask >> idx) & 0x01) &&
1062 				    spi_get_csgpiod(spi, idx)) {
1063 					if (has_acpi_companion(&spi->dev))
1064 						gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx),
1065 									 !enable);
1066 					else
1067 						/* Polarity handled by GPIO library */
1068 						gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx),
1069 									 activate);
1070 
1071 					if (activate)
1072 						spi_delay_exec(&spi->cs_setup, NULL);
1073 					else
1074 						spi_delay_exec(&spi->cs_inactive, NULL);
1075 				}
1076 			}
1077 		}
1078 		/* Some SPI masters need both GPIO CS & slave_select */
1079 		if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
1080 		    spi->controller->set_cs)
1081 			spi->controller->set_cs(spi, !enable);
1082 
1083 		if (!spi->controller->set_cs_timing) {
1084 			if (activate)
1085 				spi_delay_exec(&spi->cs_setup, NULL);
1086 			else
1087 				spi_delay_exec(&spi->cs_inactive, NULL);
1088 		}
1089 	} else if (spi->controller->set_cs) {
1090 		spi->controller->set_cs(spi, !enable);
1091 	}
1092 }
1093 
1094 #ifdef CONFIG_HAS_DMA
1095 static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
1096 			     struct sg_table *sgt, void *buf, size_t len,
1097 			     enum dma_data_direction dir, unsigned long attrs)
1098 {
1099 	const bool vmalloced_buf = is_vmalloc_addr(buf);
1100 	unsigned int max_seg_size = dma_get_max_seg_size(dev);
1101 #ifdef CONFIG_HIGHMEM
1102 	const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
1103 				(unsigned long)buf < (PKMAP_BASE +
1104 					(LAST_PKMAP * PAGE_SIZE)));
1105 #else
1106 	const bool kmap_buf = false;
1107 #endif
1108 	int desc_len;
1109 	int sgs;
1110 	struct page *vm_page;
1111 	struct scatterlist *sg;
1112 	void *sg_buf;
1113 	size_t min;
1114 	int i, ret;
1115 
1116 	if (vmalloced_buf || kmap_buf) {
1117 		desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
1118 		sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1119 	} else if (virt_addr_valid(buf)) {
1120 		desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1121 		sgs = DIV_ROUND_UP(len, desc_len);
1122 	} else {
1123 		return -EINVAL;
1124 	}
1125 
1126 	ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1127 	if (ret != 0)
1128 		return ret;
1129 
1130 	sg = &sgt->sgl[0];
1131 	for (i = 0; i < sgs; i++) {
1132 
1133 		if (vmalloced_buf || kmap_buf) {
1134 			/*
1135 			 * Next scatterlist entry size is the minimum between
1136 			 * the desc_len and the remaining buffer length that
1137 			 * fits in a page.
1138 			 */
1139 			min = min_t(size_t, desc_len,
1140 				    min_t(size_t, len,
1141 					  PAGE_SIZE - offset_in_page(buf)));
1142 			if (vmalloced_buf)
1143 				vm_page = vmalloc_to_page(buf);
1144 			else
1145 				vm_page = kmap_to_page(buf);
1146 			if (!vm_page) {
1147 				sg_free_table(sgt);
1148 				return -ENOMEM;
1149 			}
1150 			sg_set_page(sg, vm_page,
1151 				    min, offset_in_page(buf));
1152 		} else {
1153 			min = min_t(size_t, len, desc_len);
1154 			sg_buf = buf;
1155 			sg_set_buf(sg, sg_buf, min);
1156 		}
1157 
1158 		buf += min;
1159 		len -= min;
1160 		sg = sg_next(sg);
1161 	}
1162 
1163 	ret = dma_map_sgtable(dev, sgt, dir, attrs);
1164 	if (ret < 0) {
1165 		sg_free_table(sgt);
1166 		return ret;
1167 	}
1168 
1169 	return 0;
1170 }
1171 
1172 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1173 		struct sg_table *sgt, void *buf, size_t len,
1174 		enum dma_data_direction dir)
1175 {
1176 	return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
1177 }
1178 
1179 static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
1180 				struct device *dev, struct sg_table *sgt,
1181 				enum dma_data_direction dir,
1182 				unsigned long attrs)
1183 {
1184 	if (sgt->orig_nents) {
1185 		dma_unmap_sgtable(dev, sgt, dir, attrs);
1186 		sg_free_table(sgt);
1187 		sgt->orig_nents = 0;
1188 		sgt->nents = 0;
1189 	}
1190 }
1191 
1192 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1193 		   struct sg_table *sgt, enum dma_data_direction dir)
1194 {
1195 	spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
1196 }
1197 
1198 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1199 {
1200 	struct device *tx_dev, *rx_dev;
1201 	struct spi_transfer *xfer;
1202 	int ret;
1203 
1204 	if (!ctlr->can_dma)
1205 		return 0;
1206 
1207 	if (ctlr->dma_tx)
1208 		tx_dev = ctlr->dma_tx->device->dev;
1209 	else if (ctlr->dma_map_dev)
1210 		tx_dev = ctlr->dma_map_dev;
1211 	else
1212 		tx_dev = ctlr->dev.parent;
1213 
1214 	if (ctlr->dma_rx)
1215 		rx_dev = ctlr->dma_rx->device->dev;
1216 	else if (ctlr->dma_map_dev)
1217 		rx_dev = ctlr->dma_map_dev;
1218 	else
1219 		rx_dev = ctlr->dev.parent;
1220 
1221 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1222 		/* The sync is done before each transfer. */
1223 		unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1224 
1225 		if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1226 			continue;
1227 
1228 		if (xfer->tx_buf != NULL) {
1229 			ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1230 						(void *)xfer->tx_buf,
1231 						xfer->len, DMA_TO_DEVICE,
1232 						attrs);
1233 			if (ret != 0)
1234 				return ret;
1235 		}
1236 
1237 		if (xfer->rx_buf != NULL) {
1238 			ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1239 						xfer->rx_buf, xfer->len,
1240 						DMA_FROM_DEVICE, attrs);
1241 			if (ret != 0) {
1242 				spi_unmap_buf_attrs(ctlr, tx_dev,
1243 						&xfer->tx_sg, DMA_TO_DEVICE,
1244 						attrs);
1245 
1246 				return ret;
1247 			}
1248 		}
1249 	}
1250 
1251 	ctlr->cur_rx_dma_dev = rx_dev;
1252 	ctlr->cur_tx_dma_dev = tx_dev;
1253 	ctlr->cur_msg_mapped = true;
1254 
1255 	return 0;
1256 }
1257 
1258 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1259 {
1260 	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1261 	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1262 	struct spi_transfer *xfer;
1263 
1264 	if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
1265 		return 0;
1266 
1267 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1268 		/* The sync has already been done after each transfer. */
1269 		unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1270 
1271 		if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1272 			continue;
1273 
1274 		spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1275 				    DMA_FROM_DEVICE, attrs);
1276 		spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1277 				    DMA_TO_DEVICE, attrs);
1278 	}
1279 
1280 	ctlr->cur_msg_mapped = false;
1281 
1282 	return 0;
1283 }
1284 
1285 static void spi_dma_sync_for_device(struct spi_controller *ctlr,
1286 				    struct spi_transfer *xfer)
1287 {
1288 	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1289 	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1290 
1291 	if (!ctlr->cur_msg_mapped)
1292 		return;
1293 
1294 	if (xfer->tx_sg.orig_nents)
1295 		dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1296 	if (xfer->rx_sg.orig_nents)
1297 		dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1298 }
1299 
1300 static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
1301 				 struct spi_transfer *xfer)
1302 {
1303 	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1304 	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1305 
1306 	if (!ctlr->cur_msg_mapped)
1307 		return;
1308 
1309 	if (xfer->rx_sg.orig_nents)
1310 		dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1311 	if (xfer->tx_sg.orig_nents)
1312 		dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1313 }
1314 #else /* !CONFIG_HAS_DMA */
1315 static inline int __spi_map_msg(struct spi_controller *ctlr,
1316 				struct spi_message *msg)
1317 {
1318 	return 0;
1319 }
1320 
1321 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1322 				  struct spi_message *msg)
1323 {
1324 	return 0;
1325 }
1326 
1327 static void spi_dma_sync_for_device(struct spi_controller *ctrl,
1328 				    struct spi_transfer *xfer)
1329 {
1330 }
1331 
1332 static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
1333 				 struct spi_transfer *xfer)
1334 {
1335 }
1336 #endif /* !CONFIG_HAS_DMA */
1337 
1338 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1339 				struct spi_message *msg)
1340 {
1341 	struct spi_transfer *xfer;
1342 
1343 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1344 		/*
1345 		 * Restore the original value of tx_buf or rx_buf if they are
1346 		 * NULL.
1347 		 */
1348 		if (xfer->tx_buf == ctlr->dummy_tx)
1349 			xfer->tx_buf = NULL;
1350 		if (xfer->rx_buf == ctlr->dummy_rx)
1351 			xfer->rx_buf = NULL;
1352 	}
1353 
1354 	return __spi_unmap_msg(ctlr, msg);
1355 }
1356 
1357 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1358 {
1359 	struct spi_transfer *xfer;
1360 	void *tmp;
1361 	unsigned int max_tx, max_rx;
1362 
1363 	if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1364 		&& !(msg->spi->mode & SPI_3WIRE)) {
1365 		max_tx = 0;
1366 		max_rx = 0;
1367 
1368 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1369 			if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1370 			    !xfer->tx_buf)
1371 				max_tx = max(xfer->len, max_tx);
1372 			if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1373 			    !xfer->rx_buf)
1374 				max_rx = max(xfer->len, max_rx);
1375 		}
1376 
1377 		if (max_tx) {
1378 			tmp = krealloc(ctlr->dummy_tx, max_tx,
1379 				       GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1380 			if (!tmp)
1381 				return -ENOMEM;
1382 			ctlr->dummy_tx = tmp;
1383 		}
1384 
1385 		if (max_rx) {
1386 			tmp = krealloc(ctlr->dummy_rx, max_rx,
1387 				       GFP_KERNEL | GFP_DMA);
1388 			if (!tmp)
1389 				return -ENOMEM;
1390 			ctlr->dummy_rx = tmp;
1391 		}
1392 
1393 		if (max_tx || max_rx) {
1394 			list_for_each_entry(xfer, &msg->transfers,
1395 					    transfer_list) {
1396 				if (!xfer->len)
1397 					continue;
1398 				if (!xfer->tx_buf)
1399 					xfer->tx_buf = ctlr->dummy_tx;
1400 				if (!xfer->rx_buf)
1401 					xfer->rx_buf = ctlr->dummy_rx;
1402 			}
1403 		}
1404 	}
1405 
1406 	return __spi_map_msg(ctlr, msg);
1407 }
1408 
1409 static int spi_transfer_wait(struct spi_controller *ctlr,
1410 			     struct spi_message *msg,
1411 			     struct spi_transfer *xfer)
1412 {
1413 	struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1414 	struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1415 	u32 speed_hz = xfer->speed_hz;
1416 	unsigned long long ms;
1417 
1418 	if (spi_controller_is_slave(ctlr)) {
1419 		if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1420 			dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1421 			return -EINTR;
1422 		}
1423 	} else {
1424 		if (!speed_hz)
1425 			speed_hz = 100000;
1426 
1427 		/*
1428 		 * For each byte we wait for 8 cycles of the SPI clock.
1429 		 * Since speed is defined in Hz and we want milliseconds,
1430 		 * use respective multiplier, but before the division,
1431 		 * otherwise we may get 0 for short transfers.
1432 		 */
1433 		ms = 8LL * MSEC_PER_SEC * xfer->len;
1434 		do_div(ms, speed_hz);
1435 
1436 		/*
1437 		 * Increase it twice and add 200 ms tolerance, use
1438 		 * predefined maximum in case of overflow.
1439 		 */
1440 		ms += ms + 200;
1441 		if (ms > UINT_MAX)
1442 			ms = UINT_MAX;
1443 
1444 		ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1445 						 msecs_to_jiffies(ms));
1446 
1447 		if (ms == 0) {
1448 			SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1449 			SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1450 			dev_err(&msg->spi->dev,
1451 				"SPI transfer timed out\n");
1452 			return -ETIMEDOUT;
1453 		}
1454 
1455 		if (xfer->error & SPI_TRANS_FAIL_IO)
1456 			return -EIO;
1457 	}
1458 
1459 	return 0;
1460 }
1461 
1462 static void _spi_transfer_delay_ns(u32 ns)
1463 {
1464 	if (!ns)
1465 		return;
1466 	if (ns <= NSEC_PER_USEC) {
1467 		ndelay(ns);
1468 	} else {
1469 		u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1470 
1471 		if (us <= 10)
1472 			udelay(us);
1473 		else
1474 			usleep_range(us, us + DIV_ROUND_UP(us, 10));
1475 	}
1476 }
1477 
1478 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1479 {
1480 	u32 delay = _delay->value;
1481 	u32 unit = _delay->unit;
1482 	u32 hz;
1483 
1484 	if (!delay)
1485 		return 0;
1486 
1487 	switch (unit) {
1488 	case SPI_DELAY_UNIT_USECS:
1489 		delay *= NSEC_PER_USEC;
1490 		break;
1491 	case SPI_DELAY_UNIT_NSECS:
1492 		/* Nothing to do here */
1493 		break;
1494 	case SPI_DELAY_UNIT_SCK:
1495 		/* Clock cycles need to be obtained from spi_transfer */
1496 		if (!xfer)
1497 			return -EINVAL;
1498 		/*
1499 		 * If there is unknown effective speed, approximate it
1500 		 * by underestimating with half of the requested Hz.
1501 		 */
1502 		hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1503 		if (!hz)
1504 			return -EINVAL;
1505 
1506 		/* Convert delay to nanoseconds */
1507 		delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1508 		break;
1509 	default:
1510 		return -EINVAL;
1511 	}
1512 
1513 	return delay;
1514 }
1515 EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1516 
1517 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1518 {
1519 	int delay;
1520 
1521 	might_sleep();
1522 
1523 	if (!_delay)
1524 		return -EINVAL;
1525 
1526 	delay = spi_delay_to_ns(_delay, xfer);
1527 	if (delay < 0)
1528 		return delay;
1529 
1530 	_spi_transfer_delay_ns(delay);
1531 
1532 	return 0;
1533 }
1534 EXPORT_SYMBOL_GPL(spi_delay_exec);
1535 
1536 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1537 					  struct spi_transfer *xfer)
1538 {
1539 	u32 default_delay_ns = 10 * NSEC_PER_USEC;
1540 	u32 delay = xfer->cs_change_delay.value;
1541 	u32 unit = xfer->cs_change_delay.unit;
1542 	int ret;
1543 
1544 	/* Return early on "fast" mode - for everything but USECS */
1545 	if (!delay) {
1546 		if (unit == SPI_DELAY_UNIT_USECS)
1547 			_spi_transfer_delay_ns(default_delay_ns);
1548 		return;
1549 	}
1550 
1551 	ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1552 	if (ret) {
1553 		dev_err_once(&msg->spi->dev,
1554 			     "Use of unsupported delay unit %i, using default of %luus\n",
1555 			     unit, default_delay_ns / NSEC_PER_USEC);
1556 		_spi_transfer_delay_ns(default_delay_ns);
1557 	}
1558 }
1559 
1560 void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
1561 						  struct spi_transfer *xfer)
1562 {
1563 	_spi_transfer_cs_change_delay(msg, xfer);
1564 }
1565 EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec);
1566 
1567 /*
1568  * spi_transfer_one_message - Default implementation of transfer_one_message()
1569  *
1570  * This is a standard implementation of transfer_one_message() for
1571  * drivers which implement a transfer_one() operation.  It provides
1572  * standard handling of delays and chip select management.
1573  */
1574 static int spi_transfer_one_message(struct spi_controller *ctlr,
1575 				    struct spi_message *msg)
1576 {
1577 	struct spi_transfer *xfer;
1578 	bool keep_cs = false;
1579 	int ret = 0;
1580 	struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1581 	struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1582 
1583 	xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
1584 	spi_set_cs(msg->spi, !xfer->cs_off, false);
1585 
1586 	SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1587 	SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1588 
1589 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1590 		trace_spi_transfer_start(msg, xfer);
1591 
1592 		spi_statistics_add_transfer_stats(statm, xfer, ctlr);
1593 		spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1594 
1595 		if (!ctlr->ptp_sts_supported) {
1596 			xfer->ptp_sts_word_pre = 0;
1597 			ptp_read_system_prets(xfer->ptp_sts);
1598 		}
1599 
1600 		if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1601 			reinit_completion(&ctlr->xfer_completion);
1602 
1603 fallback_pio:
1604 			spi_dma_sync_for_device(ctlr, xfer);
1605 			ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1606 			if (ret < 0) {
1607 				spi_dma_sync_for_cpu(ctlr, xfer);
1608 
1609 				if (ctlr->cur_msg_mapped &&
1610 				   (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1611 					__spi_unmap_msg(ctlr, msg);
1612 					ctlr->fallback = true;
1613 					xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1614 					goto fallback_pio;
1615 				}
1616 
1617 				SPI_STATISTICS_INCREMENT_FIELD(statm,
1618 							       errors);
1619 				SPI_STATISTICS_INCREMENT_FIELD(stats,
1620 							       errors);
1621 				dev_err(&msg->spi->dev,
1622 					"SPI transfer failed: %d\n", ret);
1623 				goto out;
1624 			}
1625 
1626 			if (ret > 0) {
1627 				ret = spi_transfer_wait(ctlr, msg, xfer);
1628 				if (ret < 0)
1629 					msg->status = ret;
1630 			}
1631 
1632 			spi_dma_sync_for_cpu(ctlr, xfer);
1633 		} else {
1634 			if (xfer->len)
1635 				dev_err(&msg->spi->dev,
1636 					"Bufferless transfer has length %u\n",
1637 					xfer->len);
1638 		}
1639 
1640 		if (!ctlr->ptp_sts_supported) {
1641 			ptp_read_system_postts(xfer->ptp_sts);
1642 			xfer->ptp_sts_word_post = xfer->len;
1643 		}
1644 
1645 		trace_spi_transfer_stop(msg, xfer);
1646 
1647 		if (msg->status != -EINPROGRESS)
1648 			goto out;
1649 
1650 		spi_transfer_delay_exec(xfer);
1651 
1652 		if (xfer->cs_change) {
1653 			if (list_is_last(&xfer->transfer_list,
1654 					 &msg->transfers)) {
1655 				keep_cs = true;
1656 			} else {
1657 				if (!xfer->cs_off)
1658 					spi_set_cs(msg->spi, false, false);
1659 				_spi_transfer_cs_change_delay(msg, xfer);
1660 				if (!list_next_entry(xfer, transfer_list)->cs_off)
1661 					spi_set_cs(msg->spi, true, false);
1662 			}
1663 		} else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
1664 			   xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
1665 			spi_set_cs(msg->spi, xfer->cs_off, false);
1666 		}
1667 
1668 		msg->actual_length += xfer->len;
1669 	}
1670 
1671 out:
1672 	if (ret != 0 || !keep_cs)
1673 		spi_set_cs(msg->spi, false, false);
1674 
1675 	if (msg->status == -EINPROGRESS)
1676 		msg->status = ret;
1677 
1678 	if (msg->status && ctlr->handle_err)
1679 		ctlr->handle_err(ctlr, msg);
1680 
1681 	spi_finalize_current_message(ctlr);
1682 
1683 	return ret;
1684 }
1685 
1686 /**
1687  * spi_finalize_current_transfer - report completion of a transfer
1688  * @ctlr: the controller reporting completion
1689  *
1690  * Called by SPI drivers using the core transfer_one_message()
1691  * implementation to notify it that the current interrupt driven
1692  * transfer has finished and the next one may be scheduled.
1693  */
1694 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1695 {
1696 	complete(&ctlr->xfer_completion);
1697 }
1698 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1699 
1700 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1701 {
1702 	if (ctlr->auto_runtime_pm) {
1703 		pm_runtime_mark_last_busy(ctlr->dev.parent);
1704 		pm_runtime_put_autosuspend(ctlr->dev.parent);
1705 	}
1706 }
1707 
1708 static int __spi_pump_transfer_message(struct spi_controller *ctlr,
1709 		struct spi_message *msg, bool was_busy)
1710 {
1711 	struct spi_transfer *xfer;
1712 	int ret;
1713 
1714 	if (!was_busy && ctlr->auto_runtime_pm) {
1715 		ret = pm_runtime_get_sync(ctlr->dev.parent);
1716 		if (ret < 0) {
1717 			pm_runtime_put_noidle(ctlr->dev.parent);
1718 			dev_err(&ctlr->dev, "Failed to power device: %d\n",
1719 				ret);
1720 			return ret;
1721 		}
1722 	}
1723 
1724 	if (!was_busy)
1725 		trace_spi_controller_busy(ctlr);
1726 
1727 	if (!was_busy && ctlr->prepare_transfer_hardware) {
1728 		ret = ctlr->prepare_transfer_hardware(ctlr);
1729 		if (ret) {
1730 			dev_err(&ctlr->dev,
1731 				"failed to prepare transfer hardware: %d\n",
1732 				ret);
1733 
1734 			if (ctlr->auto_runtime_pm)
1735 				pm_runtime_put(ctlr->dev.parent);
1736 
1737 			msg->status = ret;
1738 			spi_finalize_current_message(ctlr);
1739 
1740 			return ret;
1741 		}
1742 	}
1743 
1744 	trace_spi_message_start(msg);
1745 
1746 	ret = spi_split_transfers_maxsize(ctlr, msg,
1747 					  spi_max_transfer_size(msg->spi),
1748 					  GFP_KERNEL | GFP_DMA);
1749 	if (ret) {
1750 		msg->status = ret;
1751 		spi_finalize_current_message(ctlr);
1752 		return ret;
1753 	}
1754 
1755 	if (ctlr->prepare_message) {
1756 		ret = ctlr->prepare_message(ctlr, msg);
1757 		if (ret) {
1758 			dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1759 				ret);
1760 			msg->status = ret;
1761 			spi_finalize_current_message(ctlr);
1762 			return ret;
1763 		}
1764 		msg->prepared = true;
1765 	}
1766 
1767 	ret = spi_map_msg(ctlr, msg);
1768 	if (ret) {
1769 		msg->status = ret;
1770 		spi_finalize_current_message(ctlr);
1771 		return ret;
1772 	}
1773 
1774 	if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1775 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1776 			xfer->ptp_sts_word_pre = 0;
1777 			ptp_read_system_prets(xfer->ptp_sts);
1778 		}
1779 	}
1780 
1781 	/*
1782 	 * Drivers implementation of transfer_one_message() must arrange for
1783 	 * spi_finalize_current_message() to get called. Most drivers will do
1784 	 * this in the calling context, but some don't. For those cases, a
1785 	 * completion is used to guarantee that this function does not return
1786 	 * until spi_finalize_current_message() is done accessing
1787 	 * ctlr->cur_msg.
1788 	 * Use of the following two flags enable to opportunistically skip the
1789 	 * use of the completion since its use involves expensive spin locks.
1790 	 * In case of a race with the context that calls
1791 	 * spi_finalize_current_message() the completion will always be used,
1792 	 * due to strict ordering of these flags using barriers.
1793 	 */
1794 	WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1795 	WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1796 	reinit_completion(&ctlr->cur_msg_completion);
1797 	smp_wmb(); /* Make these available to spi_finalize_current_message() */
1798 
1799 	ret = ctlr->transfer_one_message(ctlr, msg);
1800 	if (ret) {
1801 		dev_err(&ctlr->dev,
1802 			"failed to transfer one message from queue\n");
1803 		return ret;
1804 	}
1805 
1806 	WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1807 	smp_mb(); /* See spi_finalize_current_message()... */
1808 	if (READ_ONCE(ctlr->cur_msg_incomplete))
1809 		wait_for_completion(&ctlr->cur_msg_completion);
1810 
1811 	return 0;
1812 }
1813 
1814 /**
1815  * __spi_pump_messages - function which processes SPI message queue
1816  * @ctlr: controller to process queue for
1817  * @in_kthread: true if we are in the context of the message pump thread
1818  *
1819  * This function checks if there is any SPI message in the queue that
1820  * needs processing and if so call out to the driver to initialize hardware
1821  * and transfer each message.
1822  *
1823  * Note that it is called both from the kthread itself and also from
1824  * inside spi_sync(); the queue extraction handling at the top of the
1825  * function should deal with this safely.
1826  */
1827 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1828 {
1829 	struct spi_message *msg;
1830 	bool was_busy = false;
1831 	unsigned long flags;
1832 	int ret;
1833 
1834 	/* Take the I/O mutex */
1835 	mutex_lock(&ctlr->io_mutex);
1836 
1837 	/* Lock queue */
1838 	spin_lock_irqsave(&ctlr->queue_lock, flags);
1839 
1840 	/* Make sure we are not already running a message */
1841 	if (ctlr->cur_msg)
1842 		goto out_unlock;
1843 
1844 	/* Check if the queue is idle */
1845 	if (list_empty(&ctlr->queue) || !ctlr->running) {
1846 		if (!ctlr->busy)
1847 			goto out_unlock;
1848 
1849 		/* Defer any non-atomic teardown to the thread */
1850 		if (!in_kthread) {
1851 			if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1852 			    !ctlr->unprepare_transfer_hardware) {
1853 				spi_idle_runtime_pm(ctlr);
1854 				ctlr->busy = false;
1855 				ctlr->queue_empty = true;
1856 				trace_spi_controller_idle(ctlr);
1857 			} else {
1858 				kthread_queue_work(ctlr->kworker,
1859 						   &ctlr->pump_messages);
1860 			}
1861 			goto out_unlock;
1862 		}
1863 
1864 		ctlr->busy = false;
1865 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1866 
1867 		kfree(ctlr->dummy_rx);
1868 		ctlr->dummy_rx = NULL;
1869 		kfree(ctlr->dummy_tx);
1870 		ctlr->dummy_tx = NULL;
1871 		if (ctlr->unprepare_transfer_hardware &&
1872 		    ctlr->unprepare_transfer_hardware(ctlr))
1873 			dev_err(&ctlr->dev,
1874 				"failed to unprepare transfer hardware\n");
1875 		spi_idle_runtime_pm(ctlr);
1876 		trace_spi_controller_idle(ctlr);
1877 
1878 		spin_lock_irqsave(&ctlr->queue_lock, flags);
1879 		ctlr->queue_empty = true;
1880 		goto out_unlock;
1881 	}
1882 
1883 	/* Extract head of queue */
1884 	msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1885 	ctlr->cur_msg = msg;
1886 
1887 	list_del_init(&msg->queue);
1888 	if (ctlr->busy)
1889 		was_busy = true;
1890 	else
1891 		ctlr->busy = true;
1892 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1893 
1894 	ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
1895 	kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1896 
1897 	ctlr->cur_msg = NULL;
1898 	ctlr->fallback = false;
1899 
1900 	mutex_unlock(&ctlr->io_mutex);
1901 
1902 	/* Prod the scheduler in case transfer_one() was busy waiting */
1903 	if (!ret)
1904 		cond_resched();
1905 	return;
1906 
1907 out_unlock:
1908 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1909 	mutex_unlock(&ctlr->io_mutex);
1910 }
1911 
1912 /**
1913  * spi_pump_messages - kthread work function which processes spi message queue
1914  * @work: pointer to kthread work struct contained in the controller struct
1915  */
1916 static void spi_pump_messages(struct kthread_work *work)
1917 {
1918 	struct spi_controller *ctlr =
1919 		container_of(work, struct spi_controller, pump_messages);
1920 
1921 	__spi_pump_messages(ctlr, true);
1922 }
1923 
1924 /**
1925  * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1926  * @ctlr: Pointer to the spi_controller structure of the driver
1927  * @xfer: Pointer to the transfer being timestamped
1928  * @progress: How many words (not bytes) have been transferred so far
1929  * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1930  *	      transfer, for less jitter in time measurement. Only compatible
1931  *	      with PIO drivers. If true, must follow up with
1932  *	      spi_take_timestamp_post or otherwise system will crash.
1933  *	      WARNING: for fully predictable results, the CPU frequency must
1934  *	      also be under control (governor).
1935  *
1936  * This is a helper for drivers to collect the beginning of the TX timestamp
1937  * for the requested byte from the SPI transfer. The frequency with which this
1938  * function must be called (once per word, once for the whole transfer, once
1939  * per batch of words etc) is arbitrary as long as the @tx buffer offset is
1940  * greater than or equal to the requested byte at the time of the call. The
1941  * timestamp is only taken once, at the first such call. It is assumed that
1942  * the driver advances its @tx buffer pointer monotonically.
1943  */
1944 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1945 			    struct spi_transfer *xfer,
1946 			    size_t progress, bool irqs_off)
1947 {
1948 	if (!xfer->ptp_sts)
1949 		return;
1950 
1951 	if (xfer->timestamped)
1952 		return;
1953 
1954 	if (progress > xfer->ptp_sts_word_pre)
1955 		return;
1956 
1957 	/* Capture the resolution of the timestamp */
1958 	xfer->ptp_sts_word_pre = progress;
1959 
1960 	if (irqs_off) {
1961 		local_irq_save(ctlr->irq_flags);
1962 		preempt_disable();
1963 	}
1964 
1965 	ptp_read_system_prets(xfer->ptp_sts);
1966 }
1967 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1968 
1969 /**
1970  * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1971  * @ctlr: Pointer to the spi_controller structure of the driver
1972  * @xfer: Pointer to the transfer being timestamped
1973  * @progress: How many words (not bytes) have been transferred so far
1974  * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1975  *
1976  * This is a helper for drivers to collect the end of the TX timestamp for
1977  * the requested byte from the SPI transfer. Can be called with an arbitrary
1978  * frequency: only the first call where @tx exceeds or is equal to the
1979  * requested word will be timestamped.
1980  */
1981 void spi_take_timestamp_post(struct spi_controller *ctlr,
1982 			     struct spi_transfer *xfer,
1983 			     size_t progress, bool irqs_off)
1984 {
1985 	if (!xfer->ptp_sts)
1986 		return;
1987 
1988 	if (xfer->timestamped)
1989 		return;
1990 
1991 	if (progress < xfer->ptp_sts_word_post)
1992 		return;
1993 
1994 	ptp_read_system_postts(xfer->ptp_sts);
1995 
1996 	if (irqs_off) {
1997 		local_irq_restore(ctlr->irq_flags);
1998 		preempt_enable();
1999 	}
2000 
2001 	/* Capture the resolution of the timestamp */
2002 	xfer->ptp_sts_word_post = progress;
2003 
2004 	xfer->timestamped = 1;
2005 }
2006 EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
2007 
2008 /**
2009  * spi_set_thread_rt - set the controller to pump at realtime priority
2010  * @ctlr: controller to boost priority of
2011  *
2012  * This can be called because the controller requested realtime priority
2013  * (by setting the ->rt value before calling spi_register_controller()) or
2014  * because a device on the bus said that its transfers needed realtime
2015  * priority.
2016  *
2017  * NOTE: at the moment if any device on a bus says it needs realtime then
2018  * the thread will be at realtime priority for all transfers on that
2019  * controller.  If this eventually becomes a problem we may see if we can
2020  * find a way to boost the priority only temporarily during relevant
2021  * transfers.
2022  */
2023 static void spi_set_thread_rt(struct spi_controller *ctlr)
2024 {
2025 	dev_info(&ctlr->dev,
2026 		"will run message pump with realtime priority\n");
2027 	sched_set_fifo(ctlr->kworker->task);
2028 }
2029 
2030 static int spi_init_queue(struct spi_controller *ctlr)
2031 {
2032 	ctlr->running = false;
2033 	ctlr->busy = false;
2034 	ctlr->queue_empty = true;
2035 
2036 	ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
2037 	if (IS_ERR(ctlr->kworker)) {
2038 		dev_err(&ctlr->dev, "failed to create message pump kworker\n");
2039 		return PTR_ERR(ctlr->kworker);
2040 	}
2041 
2042 	kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
2043 
2044 	/*
2045 	 * Controller config will indicate if this controller should run the
2046 	 * message pump with high (realtime) priority to reduce the transfer
2047 	 * latency on the bus by minimising the delay between a transfer
2048 	 * request and the scheduling of the message pump thread. Without this
2049 	 * setting the message pump thread will remain at default priority.
2050 	 */
2051 	if (ctlr->rt)
2052 		spi_set_thread_rt(ctlr);
2053 
2054 	return 0;
2055 }
2056 
2057 /**
2058  * spi_get_next_queued_message() - called by driver to check for queued
2059  * messages
2060  * @ctlr: the controller to check for queued messages
2061  *
2062  * If there are more messages in the queue, the next message is returned from
2063  * this call.
2064  *
2065  * Return: the next message in the queue, else NULL if the queue is empty.
2066  */
2067 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
2068 {
2069 	struct spi_message *next;
2070 	unsigned long flags;
2071 
2072 	/* Get a pointer to the next message, if any */
2073 	spin_lock_irqsave(&ctlr->queue_lock, flags);
2074 	next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
2075 					queue);
2076 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2077 
2078 	return next;
2079 }
2080 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
2081 
2082 /**
2083  * spi_finalize_current_message() - the current message is complete
2084  * @ctlr: the controller to return the message to
2085  *
2086  * Called by the driver to notify the core that the message in the front of the
2087  * queue is complete and can be removed from the queue.
2088  */
2089 void spi_finalize_current_message(struct spi_controller *ctlr)
2090 {
2091 	struct spi_transfer *xfer;
2092 	struct spi_message *mesg;
2093 	int ret;
2094 
2095 	mesg = ctlr->cur_msg;
2096 
2097 	if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2098 		list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
2099 			ptp_read_system_postts(xfer->ptp_sts);
2100 			xfer->ptp_sts_word_post = xfer->len;
2101 		}
2102 	}
2103 
2104 	if (unlikely(ctlr->ptp_sts_supported))
2105 		list_for_each_entry(xfer, &mesg->transfers, transfer_list)
2106 			WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
2107 
2108 	spi_unmap_msg(ctlr, mesg);
2109 
2110 	/*
2111 	 * In the prepare_messages callback the SPI bus has the opportunity
2112 	 * to split a transfer to smaller chunks.
2113 	 *
2114 	 * Release the split transfers here since spi_map_msg() is done on
2115 	 * the split transfers.
2116 	 */
2117 	spi_res_release(ctlr, mesg);
2118 
2119 	if (mesg->prepared && ctlr->unprepare_message) {
2120 		ret = ctlr->unprepare_message(ctlr, mesg);
2121 		if (ret) {
2122 			dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2123 				ret);
2124 		}
2125 	}
2126 
2127 	mesg->prepared = false;
2128 
2129 	WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2130 	smp_mb(); /* See __spi_pump_transfer_message()... */
2131 	if (READ_ONCE(ctlr->cur_msg_need_completion))
2132 		complete(&ctlr->cur_msg_completion);
2133 
2134 	trace_spi_message_done(mesg);
2135 
2136 	mesg->state = NULL;
2137 	if (mesg->complete)
2138 		mesg->complete(mesg->context);
2139 }
2140 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
2141 
2142 static int spi_start_queue(struct spi_controller *ctlr)
2143 {
2144 	unsigned long flags;
2145 
2146 	spin_lock_irqsave(&ctlr->queue_lock, flags);
2147 
2148 	if (ctlr->running || ctlr->busy) {
2149 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2150 		return -EBUSY;
2151 	}
2152 
2153 	ctlr->running = true;
2154 	ctlr->cur_msg = NULL;
2155 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2156 
2157 	kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2158 
2159 	return 0;
2160 }
2161 
2162 static int spi_stop_queue(struct spi_controller *ctlr)
2163 {
2164 	unsigned long flags;
2165 	unsigned limit = 500;
2166 	int ret = 0;
2167 
2168 	spin_lock_irqsave(&ctlr->queue_lock, flags);
2169 
2170 	/*
2171 	 * This is a bit lame, but is optimized for the common execution path.
2172 	 * A wait_queue on the ctlr->busy could be used, but then the common
2173 	 * execution path (pump_messages) would be required to call wake_up or
2174 	 * friends on every SPI message. Do this instead.
2175 	 */
2176 	while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
2177 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2178 		usleep_range(10000, 11000);
2179 		spin_lock_irqsave(&ctlr->queue_lock, flags);
2180 	}
2181 
2182 	if (!list_empty(&ctlr->queue) || ctlr->busy)
2183 		ret = -EBUSY;
2184 	else
2185 		ctlr->running = false;
2186 
2187 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2188 
2189 	return ret;
2190 }
2191 
2192 static int spi_destroy_queue(struct spi_controller *ctlr)
2193 {
2194 	int ret;
2195 
2196 	ret = spi_stop_queue(ctlr);
2197 
2198 	/*
2199 	 * kthread_flush_worker will block until all work is done.
2200 	 * If the reason that stop_queue timed out is that the work will never
2201 	 * finish, then it does no good to call flush/stop thread, so
2202 	 * return anyway.
2203 	 */
2204 	if (ret) {
2205 		dev_err(&ctlr->dev, "problem destroying queue\n");
2206 		return ret;
2207 	}
2208 
2209 	kthread_destroy_worker(ctlr->kworker);
2210 
2211 	return 0;
2212 }
2213 
2214 static int __spi_queued_transfer(struct spi_device *spi,
2215 				 struct spi_message *msg,
2216 				 bool need_pump)
2217 {
2218 	struct spi_controller *ctlr = spi->controller;
2219 	unsigned long flags;
2220 
2221 	spin_lock_irqsave(&ctlr->queue_lock, flags);
2222 
2223 	if (!ctlr->running) {
2224 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2225 		return -ESHUTDOWN;
2226 	}
2227 	msg->actual_length = 0;
2228 	msg->status = -EINPROGRESS;
2229 
2230 	list_add_tail(&msg->queue, &ctlr->queue);
2231 	ctlr->queue_empty = false;
2232 	if (!ctlr->busy && need_pump)
2233 		kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2234 
2235 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2236 	return 0;
2237 }
2238 
2239 /**
2240  * spi_queued_transfer - transfer function for queued transfers
2241  * @spi: SPI device which is requesting transfer
2242  * @msg: SPI message which is to handled is queued to driver queue
2243  *
2244  * Return: zero on success, else a negative error code.
2245  */
2246 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2247 {
2248 	return __spi_queued_transfer(spi, msg, true);
2249 }
2250 
2251 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2252 {
2253 	int ret;
2254 
2255 	ctlr->transfer = spi_queued_transfer;
2256 	if (!ctlr->transfer_one_message)
2257 		ctlr->transfer_one_message = spi_transfer_one_message;
2258 
2259 	/* Initialize and start queue */
2260 	ret = spi_init_queue(ctlr);
2261 	if (ret) {
2262 		dev_err(&ctlr->dev, "problem initializing queue\n");
2263 		goto err_init_queue;
2264 	}
2265 	ctlr->queued = true;
2266 	ret = spi_start_queue(ctlr);
2267 	if (ret) {
2268 		dev_err(&ctlr->dev, "problem starting queue\n");
2269 		goto err_start_queue;
2270 	}
2271 
2272 	return 0;
2273 
2274 err_start_queue:
2275 	spi_destroy_queue(ctlr);
2276 err_init_queue:
2277 	return ret;
2278 }
2279 
2280 /**
2281  * spi_flush_queue - Send all pending messages in the queue from the callers'
2282  *		     context
2283  * @ctlr: controller to process queue for
2284  *
2285  * This should be used when one wants to ensure all pending messages have been
2286  * sent before doing something. Is used by the spi-mem code to make sure SPI
2287  * memory operations do not preempt regular SPI transfers that have been queued
2288  * before the spi-mem operation.
2289  */
2290 void spi_flush_queue(struct spi_controller *ctlr)
2291 {
2292 	if (ctlr->transfer == spi_queued_transfer)
2293 		__spi_pump_messages(ctlr, false);
2294 }
2295 
2296 /*-------------------------------------------------------------------------*/
2297 
2298 #if defined(CONFIG_OF)
2299 static void of_spi_parse_dt_cs_delay(struct device_node *nc,
2300 				     struct spi_delay *delay, const char *prop)
2301 {
2302 	u32 value;
2303 
2304 	if (!of_property_read_u32(nc, prop, &value)) {
2305 		if (value > U16_MAX) {
2306 			delay->value = DIV_ROUND_UP(value, 1000);
2307 			delay->unit = SPI_DELAY_UNIT_USECS;
2308 		} else {
2309 			delay->value = value;
2310 			delay->unit = SPI_DELAY_UNIT_NSECS;
2311 		}
2312 	}
2313 }
2314 
2315 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2316 			   struct device_node *nc)
2317 {
2318 	u32 value, cs[SPI_CS_CNT_MAX];
2319 	int rc, idx;
2320 
2321 	/* Mode (clock phase/polarity/etc.) */
2322 	if (of_property_read_bool(nc, "spi-cpha"))
2323 		spi->mode |= SPI_CPHA;
2324 	if (of_property_read_bool(nc, "spi-cpol"))
2325 		spi->mode |= SPI_CPOL;
2326 	if (of_property_read_bool(nc, "spi-3wire"))
2327 		spi->mode |= SPI_3WIRE;
2328 	if (of_property_read_bool(nc, "spi-lsb-first"))
2329 		spi->mode |= SPI_LSB_FIRST;
2330 	if (of_property_read_bool(nc, "spi-cs-high"))
2331 		spi->mode |= SPI_CS_HIGH;
2332 
2333 	/* Device DUAL/QUAD mode */
2334 	if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2335 		switch (value) {
2336 		case 0:
2337 			spi->mode |= SPI_NO_TX;
2338 			break;
2339 		case 1:
2340 			break;
2341 		case 2:
2342 			spi->mode |= SPI_TX_DUAL;
2343 			break;
2344 		case 4:
2345 			spi->mode |= SPI_TX_QUAD;
2346 			break;
2347 		case 8:
2348 			spi->mode |= SPI_TX_OCTAL;
2349 			break;
2350 		default:
2351 			dev_warn(&ctlr->dev,
2352 				"spi-tx-bus-width %d not supported\n",
2353 				value);
2354 			break;
2355 		}
2356 	}
2357 
2358 	if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2359 		switch (value) {
2360 		case 0:
2361 			spi->mode |= SPI_NO_RX;
2362 			break;
2363 		case 1:
2364 			break;
2365 		case 2:
2366 			spi->mode |= SPI_RX_DUAL;
2367 			break;
2368 		case 4:
2369 			spi->mode |= SPI_RX_QUAD;
2370 			break;
2371 		case 8:
2372 			spi->mode |= SPI_RX_OCTAL;
2373 			break;
2374 		default:
2375 			dev_warn(&ctlr->dev,
2376 				"spi-rx-bus-width %d not supported\n",
2377 				value);
2378 			break;
2379 		}
2380 	}
2381 
2382 	if (spi_controller_is_slave(ctlr)) {
2383 		if (!of_node_name_eq(nc, "slave")) {
2384 			dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2385 				nc);
2386 			return -EINVAL;
2387 		}
2388 		return 0;
2389 	}
2390 
2391 	if (ctlr->num_chipselect > SPI_CS_CNT_MAX) {
2392 		dev_err(&ctlr->dev, "No. of CS is more than max. no. of supported CS\n");
2393 		return -EINVAL;
2394 	}
2395 
2396 	/*
2397 	 * Zero(0) is a valid physical CS value and can be located at any
2398 	 * logical CS in the spi->chip_select[]. If all the physical CS
2399 	 * are initialized to 0 then It would be difficult to differentiate
2400 	 * between a valid physical CS 0 & an unused logical CS whose physical
2401 	 * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
2402 	 * Now all the unused logical CS will have 0xFF physical CS value & can be
2403 	 * ignore while performing physical CS validity checks.
2404 	 */
2405 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
2406 		spi_set_chipselect(spi, idx, 0xFF);
2407 
2408 	/* Device address */
2409 	rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1,
2410 						 SPI_CS_CNT_MAX);
2411 	if (rc < 0) {
2412 		dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2413 			nc, rc);
2414 		return rc;
2415 	}
2416 	if (rc > ctlr->num_chipselect) {
2417 		dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n",
2418 			nc, rc);
2419 		return rc;
2420 	}
2421 	if ((of_property_read_bool(nc, "parallel-memories")) &&
2422 	    (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) {
2423 		dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n");
2424 		return -EINVAL;
2425 	}
2426 	for (idx = 0; idx < rc; idx++)
2427 		spi_set_chipselect(spi, idx, cs[idx]);
2428 
2429 	/*
2430 	 * spi->chip_select[i] gives the corresponding physical CS for logical CS i
2431 	 * logical CS number is represented by setting the ith bit in spi->cs_index_mask
2432 	 * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
2433 	 * spi->chip_select[0] will give the physical CS.
2434 	 * By default spi->chip_select[0] will hold the physical CS number so, set
2435 	 * spi->cs_index_mask as 0x01.
2436 	 */
2437 	spi->cs_index_mask = 0x01;
2438 
2439 	/* Device speed */
2440 	if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2441 		spi->max_speed_hz = value;
2442 
2443 	/* Device CS delays */
2444 	of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
2445 	of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
2446 	of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
2447 
2448 	return 0;
2449 }
2450 
2451 static struct spi_device *
2452 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2453 {
2454 	struct spi_device *spi;
2455 	int rc;
2456 
2457 	/* Alloc an spi_device */
2458 	spi = spi_alloc_device(ctlr);
2459 	if (!spi) {
2460 		dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2461 		rc = -ENOMEM;
2462 		goto err_out;
2463 	}
2464 
2465 	/* Select device driver */
2466 	rc = of_alias_from_compatible(nc, spi->modalias,
2467 				      sizeof(spi->modalias));
2468 	if (rc < 0) {
2469 		dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2470 		goto err_out;
2471 	}
2472 
2473 	rc = of_spi_parse_dt(ctlr, spi, nc);
2474 	if (rc)
2475 		goto err_out;
2476 
2477 	/* Store a pointer to the node in the device structure */
2478 	of_node_get(nc);
2479 
2480 	device_set_node(&spi->dev, of_fwnode_handle(nc));
2481 
2482 	/* Register the new device */
2483 	rc = spi_add_device(spi);
2484 	if (rc) {
2485 		dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2486 		goto err_of_node_put;
2487 	}
2488 
2489 	return spi;
2490 
2491 err_of_node_put:
2492 	of_node_put(nc);
2493 err_out:
2494 	spi_dev_put(spi);
2495 	return ERR_PTR(rc);
2496 }
2497 
2498 /**
2499  * of_register_spi_devices() - Register child devices onto the SPI bus
2500  * @ctlr:	Pointer to spi_controller device
2501  *
2502  * Registers an spi_device for each child node of controller node which
2503  * represents a valid SPI slave.
2504  */
2505 static void of_register_spi_devices(struct spi_controller *ctlr)
2506 {
2507 	struct spi_device *spi;
2508 	struct device_node *nc;
2509 
2510 	for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2511 		if (of_node_test_and_set_flag(nc, OF_POPULATED))
2512 			continue;
2513 		spi = of_register_spi_device(ctlr, nc);
2514 		if (IS_ERR(spi)) {
2515 			dev_warn(&ctlr->dev,
2516 				 "Failed to create SPI device for %pOF\n", nc);
2517 			of_node_clear_flag(nc, OF_POPULATED);
2518 		}
2519 	}
2520 }
2521 #else
2522 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2523 #endif
2524 
2525 /**
2526  * spi_new_ancillary_device() - Register ancillary SPI device
2527  * @spi:         Pointer to the main SPI device registering the ancillary device
2528  * @chip_select: Chip Select of the ancillary device
2529  *
2530  * Register an ancillary SPI device; for example some chips have a chip-select
2531  * for normal device usage and another one for setup/firmware upload.
2532  *
2533  * This may only be called from main SPI device's probe routine.
2534  *
2535  * Return: 0 on success; negative errno on failure
2536  */
2537 struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2538 					     u8 chip_select)
2539 {
2540 	struct spi_controller *ctlr = spi->controller;
2541 	struct spi_device *ancillary;
2542 	int rc = 0;
2543 	u8 idx;
2544 
2545 	/* Alloc an spi_device */
2546 	ancillary = spi_alloc_device(ctlr);
2547 	if (!ancillary) {
2548 		rc = -ENOMEM;
2549 		goto err_out;
2550 	}
2551 
2552 	strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2553 
2554 	/*
2555 	 * Zero(0) is a valid physical CS value and can be located at any
2556 	 * logical CS in the spi->chip_select[]. If all the physical CS
2557 	 * are initialized to 0 then It would be difficult to differentiate
2558 	 * between a valid physical CS 0 & an unused logical CS whose physical
2559 	 * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
2560 	 * Now all the unused logical CS will have 0xFF physical CS value & can be
2561 	 * ignore while performing physical CS validity checks.
2562 	 */
2563 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
2564 		spi_set_chipselect(ancillary, idx, 0xFF);
2565 
2566 	/* Use provided chip-select for ancillary device */
2567 	spi_set_chipselect(ancillary, 0, chip_select);
2568 
2569 	/* Take over SPI mode/speed from SPI main device */
2570 	ancillary->max_speed_hz = spi->max_speed_hz;
2571 	ancillary->mode = spi->mode;
2572 	/*
2573 	 * spi->chip_select[i] gives the corresponding physical CS for logical CS i
2574 	 * logical CS number is represented by setting the ith bit in spi->cs_index_mask
2575 	 * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
2576 	 * spi->chip_select[0] will give the physical CS.
2577 	 * By default spi->chip_select[0] will hold the physical CS number so, set
2578 	 * spi->cs_index_mask as 0x01.
2579 	 */
2580 	ancillary->cs_index_mask = 0x01;
2581 
2582 	WARN_ON(!mutex_is_locked(&ctlr->add_lock));
2583 
2584 	/* Register the new device */
2585 	rc = __spi_add_device(ancillary);
2586 	if (rc) {
2587 		dev_err(&spi->dev, "failed to register ancillary device\n");
2588 		goto err_out;
2589 	}
2590 
2591 	return ancillary;
2592 
2593 err_out:
2594 	spi_dev_put(ancillary);
2595 	return ERR_PTR(rc);
2596 }
2597 EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2598 
2599 #ifdef CONFIG_ACPI
2600 struct acpi_spi_lookup {
2601 	struct spi_controller 	*ctlr;
2602 	u32			max_speed_hz;
2603 	u32			mode;
2604 	int			irq;
2605 	u8			bits_per_word;
2606 	u8			chip_select;
2607 	int			n;
2608 	int			index;
2609 };
2610 
2611 static int acpi_spi_count(struct acpi_resource *ares, void *data)
2612 {
2613 	struct acpi_resource_spi_serialbus *sb;
2614 	int *count = data;
2615 
2616 	if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2617 		return 1;
2618 
2619 	sb = &ares->data.spi_serial_bus;
2620 	if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2621 		return 1;
2622 
2623 	*count = *count + 1;
2624 
2625 	return 1;
2626 }
2627 
2628 /**
2629  * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2630  * @adev:	ACPI device
2631  *
2632  * Return: the number of SpiSerialBus resources in the ACPI-device's
2633  * resource-list; or a negative error code.
2634  */
2635 int acpi_spi_count_resources(struct acpi_device *adev)
2636 {
2637 	LIST_HEAD(r);
2638 	int count = 0;
2639 	int ret;
2640 
2641 	ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2642 	if (ret < 0)
2643 		return ret;
2644 
2645 	acpi_dev_free_resource_list(&r);
2646 
2647 	return count;
2648 }
2649 EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2650 
2651 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2652 					    struct acpi_spi_lookup *lookup)
2653 {
2654 	const union acpi_object *obj;
2655 
2656 	if (!x86_apple_machine)
2657 		return;
2658 
2659 	if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2660 	    && obj->buffer.length >= 4)
2661 		lookup->max_speed_hz  = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2662 
2663 	if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2664 	    && obj->buffer.length == 8)
2665 		lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2666 
2667 	if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2668 	    && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2669 		lookup->mode |= SPI_LSB_FIRST;
2670 
2671 	if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2672 	    && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2673 		lookup->mode |= SPI_CPOL;
2674 
2675 	if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2676 	    && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2677 		lookup->mode |= SPI_CPHA;
2678 }
2679 
2680 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2681 {
2682 	struct acpi_spi_lookup *lookup = data;
2683 	struct spi_controller *ctlr = lookup->ctlr;
2684 
2685 	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2686 		struct acpi_resource_spi_serialbus *sb;
2687 		acpi_handle parent_handle;
2688 		acpi_status status;
2689 
2690 		sb = &ares->data.spi_serial_bus;
2691 		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2692 
2693 			if (lookup->index != -1 && lookup->n++ != lookup->index)
2694 				return 1;
2695 
2696 			status = acpi_get_handle(NULL,
2697 						 sb->resource_source.string_ptr,
2698 						 &parent_handle);
2699 
2700 			if (ACPI_FAILURE(status))
2701 				return -ENODEV;
2702 
2703 			if (ctlr) {
2704 				if (ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
2705 					return -ENODEV;
2706 			} else {
2707 				struct acpi_device *adev;
2708 
2709 				adev = acpi_fetch_acpi_dev(parent_handle);
2710 				if (!adev)
2711 					return -ENODEV;
2712 
2713 				ctlr = acpi_spi_find_controller_by_adev(adev);
2714 				if (!ctlr)
2715 					return -EPROBE_DEFER;
2716 
2717 				lookup->ctlr = ctlr;
2718 			}
2719 
2720 			/*
2721 			 * ACPI DeviceSelection numbering is handled by the
2722 			 * host controller driver in Windows and can vary
2723 			 * from driver to driver. In Linux we always expect
2724 			 * 0 .. max - 1 so we need to ask the driver to
2725 			 * translate between the two schemes.
2726 			 */
2727 			if (ctlr->fw_translate_cs) {
2728 				int cs = ctlr->fw_translate_cs(ctlr,
2729 						sb->device_selection);
2730 				if (cs < 0)
2731 					return cs;
2732 				lookup->chip_select = cs;
2733 			} else {
2734 				lookup->chip_select = sb->device_selection;
2735 			}
2736 
2737 			lookup->max_speed_hz = sb->connection_speed;
2738 			lookup->bits_per_word = sb->data_bit_length;
2739 
2740 			if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2741 				lookup->mode |= SPI_CPHA;
2742 			if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2743 				lookup->mode |= SPI_CPOL;
2744 			if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2745 				lookup->mode |= SPI_CS_HIGH;
2746 		}
2747 	} else if (lookup->irq < 0) {
2748 		struct resource r;
2749 
2750 		if (acpi_dev_resource_interrupt(ares, 0, &r))
2751 			lookup->irq = r.start;
2752 	}
2753 
2754 	/* Always tell the ACPI core to skip this resource */
2755 	return 1;
2756 }
2757 
2758 /**
2759  * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2760  * @ctlr: controller to which the spi device belongs
2761  * @adev: ACPI Device for the spi device
2762  * @index: Index of the spi resource inside the ACPI Node
2763  *
2764  * This should be used to allocate a new SPI device from and ACPI Device node.
2765  * The caller is responsible for calling spi_add_device to register the SPI device.
2766  *
2767  * If ctlr is set to NULL, the Controller for the SPI device will be looked up
2768  * using the resource.
2769  * If index is set to -1, index is not used.
2770  * Note: If index is -1, ctlr must be set.
2771  *
2772  * Return: a pointer to the new device, or ERR_PTR on error.
2773  */
2774 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2775 					 struct acpi_device *adev,
2776 					 int index)
2777 {
2778 	acpi_handle parent_handle = NULL;
2779 	struct list_head resource_list;
2780 	struct acpi_spi_lookup lookup = {};
2781 	struct spi_device *spi;
2782 	int ret;
2783 	u8 idx;
2784 
2785 	if (!ctlr && index == -1)
2786 		return ERR_PTR(-EINVAL);
2787 
2788 	lookup.ctlr		= ctlr;
2789 	lookup.irq		= -1;
2790 	lookup.index		= index;
2791 	lookup.n		= 0;
2792 
2793 	INIT_LIST_HEAD(&resource_list);
2794 	ret = acpi_dev_get_resources(adev, &resource_list,
2795 				     acpi_spi_add_resource, &lookup);
2796 	acpi_dev_free_resource_list(&resource_list);
2797 
2798 	if (ret < 0)
2799 		/* Found SPI in _CRS but it points to another controller */
2800 		return ERR_PTR(ret);
2801 
2802 	if (!lookup.max_speed_hz &&
2803 	    ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2804 	    ACPI_HANDLE(lookup.ctlr->dev.parent) == parent_handle) {
2805 		/* Apple does not use _CRS but nested devices for SPI slaves */
2806 		acpi_spi_parse_apple_properties(adev, &lookup);
2807 	}
2808 
2809 	if (!lookup.max_speed_hz)
2810 		return ERR_PTR(-ENODEV);
2811 
2812 	spi = spi_alloc_device(lookup.ctlr);
2813 	if (!spi) {
2814 		dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2815 			dev_name(&adev->dev));
2816 		return ERR_PTR(-ENOMEM);
2817 	}
2818 
2819 	/*
2820 	 * Zero(0) is a valid physical CS value and can be located at any
2821 	 * logical CS in the spi->chip_select[]. If all the physical CS
2822 	 * are initialized to 0 then It would be difficult to differentiate
2823 	 * between a valid physical CS 0 & an unused logical CS whose physical
2824 	 * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
2825 	 * Now all the unused logical CS will have 0xFF physical CS value & can be
2826 	 * ignore while performing physical CS validity checks.
2827 	 */
2828 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
2829 		spi_set_chipselect(spi, idx, 0xFF);
2830 
2831 	ACPI_COMPANION_SET(&spi->dev, adev);
2832 	spi->max_speed_hz	= lookup.max_speed_hz;
2833 	spi->mode		|= lookup.mode;
2834 	spi->irq		= lookup.irq;
2835 	spi->bits_per_word	= lookup.bits_per_word;
2836 	spi_set_chipselect(spi, 0, lookup.chip_select);
2837 	/*
2838 	 * spi->chip_select[i] gives the corresponding physical CS for logical CS i
2839 	 * logical CS number is represented by setting the ith bit in spi->cs_index_mask
2840 	 * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
2841 	 * spi->chip_select[0] will give the physical CS.
2842 	 * By default spi->chip_select[0] will hold the physical CS number so, set
2843 	 * spi->cs_index_mask as 0x01.
2844 	 */
2845 	spi->cs_index_mask	= 0x01;
2846 
2847 	return spi;
2848 }
2849 EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
2850 
2851 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2852 					    struct acpi_device *adev)
2853 {
2854 	struct spi_device *spi;
2855 
2856 	if (acpi_bus_get_status(adev) || !adev->status.present ||
2857 	    acpi_device_enumerated(adev))
2858 		return AE_OK;
2859 
2860 	spi = acpi_spi_device_alloc(ctlr, adev, -1);
2861 	if (IS_ERR(spi)) {
2862 		if (PTR_ERR(spi) == -ENOMEM)
2863 			return AE_NO_MEMORY;
2864 		else
2865 			return AE_OK;
2866 	}
2867 
2868 	acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2869 			  sizeof(spi->modalias));
2870 
2871 	if (spi->irq < 0)
2872 		spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2873 
2874 	acpi_device_set_enumerated(adev);
2875 
2876 	adev->power.flags.ignore_parent = true;
2877 	if (spi_add_device(spi)) {
2878 		adev->power.flags.ignore_parent = false;
2879 		dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2880 			dev_name(&adev->dev));
2881 		spi_dev_put(spi);
2882 	}
2883 
2884 	return AE_OK;
2885 }
2886 
2887 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2888 				       void *data, void **return_value)
2889 {
2890 	struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
2891 	struct spi_controller *ctlr = data;
2892 
2893 	if (!adev)
2894 		return AE_OK;
2895 
2896 	return acpi_register_spi_device(ctlr, adev);
2897 }
2898 
2899 #define SPI_ACPI_ENUMERATE_MAX_DEPTH		32
2900 
2901 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2902 {
2903 	acpi_status status;
2904 	acpi_handle handle;
2905 
2906 	handle = ACPI_HANDLE(ctlr->dev.parent);
2907 	if (!handle)
2908 		return;
2909 
2910 	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2911 				     SPI_ACPI_ENUMERATE_MAX_DEPTH,
2912 				     acpi_spi_add_device, NULL, ctlr, NULL);
2913 	if (ACPI_FAILURE(status))
2914 		dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2915 }
2916 #else
2917 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2918 #endif /* CONFIG_ACPI */
2919 
2920 static void spi_controller_release(struct device *dev)
2921 {
2922 	struct spi_controller *ctlr;
2923 
2924 	ctlr = container_of(dev, struct spi_controller, dev);
2925 	kfree(ctlr);
2926 }
2927 
2928 static struct class spi_master_class = {
2929 	.name		= "spi_master",
2930 	.dev_release	= spi_controller_release,
2931 	.dev_groups	= spi_master_groups,
2932 };
2933 
2934 #ifdef CONFIG_SPI_SLAVE
2935 /**
2936  * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2937  *		     controller
2938  * @spi: device used for the current transfer
2939  */
2940 int spi_slave_abort(struct spi_device *spi)
2941 {
2942 	struct spi_controller *ctlr = spi->controller;
2943 
2944 	if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2945 		return ctlr->slave_abort(ctlr);
2946 
2947 	return -ENOTSUPP;
2948 }
2949 EXPORT_SYMBOL_GPL(spi_slave_abort);
2950 
2951 int spi_target_abort(struct spi_device *spi)
2952 {
2953 	struct spi_controller *ctlr = spi->controller;
2954 
2955 	if (spi_controller_is_target(ctlr) && ctlr->target_abort)
2956 		return ctlr->target_abort(ctlr);
2957 
2958 	return -ENOTSUPP;
2959 }
2960 EXPORT_SYMBOL_GPL(spi_target_abort);
2961 
2962 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2963 			  char *buf)
2964 {
2965 	struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2966 						   dev);
2967 	struct device *child;
2968 
2969 	child = device_find_any_child(&ctlr->dev);
2970 	return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
2971 }
2972 
2973 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2974 			   const char *buf, size_t count)
2975 {
2976 	struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2977 						   dev);
2978 	struct spi_device *spi;
2979 	struct device *child;
2980 	char name[32];
2981 	int rc;
2982 
2983 	rc = sscanf(buf, "%31s", name);
2984 	if (rc != 1 || !name[0])
2985 		return -EINVAL;
2986 
2987 	child = device_find_any_child(&ctlr->dev);
2988 	if (child) {
2989 		/* Remove registered slave */
2990 		device_unregister(child);
2991 		put_device(child);
2992 	}
2993 
2994 	if (strcmp(name, "(null)")) {
2995 		/* Register new slave */
2996 		spi = spi_alloc_device(ctlr);
2997 		if (!spi)
2998 			return -ENOMEM;
2999 
3000 		strscpy(spi->modalias, name, sizeof(spi->modalias));
3001 
3002 		rc = spi_add_device(spi);
3003 		if (rc) {
3004 			spi_dev_put(spi);
3005 			return rc;
3006 		}
3007 	}
3008 
3009 	return count;
3010 }
3011 
3012 static DEVICE_ATTR_RW(slave);
3013 
3014 static struct attribute *spi_slave_attrs[] = {
3015 	&dev_attr_slave.attr,
3016 	NULL,
3017 };
3018 
3019 static const struct attribute_group spi_slave_group = {
3020 	.attrs = spi_slave_attrs,
3021 };
3022 
3023 static const struct attribute_group *spi_slave_groups[] = {
3024 	&spi_controller_statistics_group,
3025 	&spi_slave_group,
3026 	NULL,
3027 };
3028 
3029 static struct class spi_slave_class = {
3030 	.name		= "spi_slave",
3031 	.dev_release	= spi_controller_release,
3032 	.dev_groups	= spi_slave_groups,
3033 };
3034 #else
3035 extern struct class spi_slave_class;	/* dummy */
3036 #endif
3037 
3038 /**
3039  * __spi_alloc_controller - allocate an SPI master or slave controller
3040  * @dev: the controller, possibly using the platform_bus
3041  * @size: how much zeroed driver-private data to allocate; the pointer to this
3042  *	memory is in the driver_data field of the returned device, accessible
3043  *	with spi_controller_get_devdata(); the memory is cacheline aligned;
3044  *	drivers granting DMA access to portions of their private data need to
3045  *	round up @size using ALIGN(size, dma_get_cache_alignment()).
3046  * @slave: flag indicating whether to allocate an SPI master (false) or SPI
3047  *	slave (true) controller
3048  * Context: can sleep
3049  *
3050  * This call is used only by SPI controller drivers, which are the
3051  * only ones directly touching chip registers.  It's how they allocate
3052  * an spi_controller structure, prior to calling spi_register_controller().
3053  *
3054  * This must be called from context that can sleep.
3055  *
3056  * The caller is responsible for assigning the bus number and initializing the
3057  * controller's methods before calling spi_register_controller(); and (after
3058  * errors adding the device) calling spi_controller_put() to prevent a memory
3059  * leak.
3060  *
3061  * Return: the SPI controller structure on success, else NULL.
3062  */
3063 struct spi_controller *__spi_alloc_controller(struct device *dev,
3064 					      unsigned int size, bool slave)
3065 {
3066 	struct spi_controller	*ctlr;
3067 	size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
3068 
3069 	if (!dev)
3070 		return NULL;
3071 
3072 	ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
3073 	if (!ctlr)
3074 		return NULL;
3075 
3076 	device_initialize(&ctlr->dev);
3077 	INIT_LIST_HEAD(&ctlr->queue);
3078 	spin_lock_init(&ctlr->queue_lock);
3079 	spin_lock_init(&ctlr->bus_lock_spinlock);
3080 	mutex_init(&ctlr->bus_lock_mutex);
3081 	mutex_init(&ctlr->io_mutex);
3082 	mutex_init(&ctlr->add_lock);
3083 	ctlr->bus_num = -1;
3084 	ctlr->num_chipselect = 1;
3085 	ctlr->slave = slave;
3086 	if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
3087 		ctlr->dev.class = &spi_slave_class;
3088 	else
3089 		ctlr->dev.class = &spi_master_class;
3090 	ctlr->dev.parent = dev;
3091 	pm_suspend_ignore_children(&ctlr->dev, true);
3092 	spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
3093 
3094 	return ctlr;
3095 }
3096 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
3097 
3098 static void devm_spi_release_controller(struct device *dev, void *ctlr)
3099 {
3100 	spi_controller_put(*(struct spi_controller **)ctlr);
3101 }
3102 
3103 /**
3104  * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
3105  * @dev: physical device of SPI controller
3106  * @size: how much zeroed driver-private data to allocate
3107  * @slave: whether to allocate an SPI master (false) or SPI slave (true)
3108  * Context: can sleep
3109  *
3110  * Allocate an SPI controller and automatically release a reference on it
3111  * when @dev is unbound from its driver.  Drivers are thus relieved from
3112  * having to call spi_controller_put().
3113  *
3114  * The arguments to this function are identical to __spi_alloc_controller().
3115  *
3116  * Return: the SPI controller structure on success, else NULL.
3117  */
3118 struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
3119 						   unsigned int size,
3120 						   bool slave)
3121 {
3122 	struct spi_controller **ptr, *ctlr;
3123 
3124 	ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
3125 			   GFP_KERNEL);
3126 	if (!ptr)
3127 		return NULL;
3128 
3129 	ctlr = __spi_alloc_controller(dev, size, slave);
3130 	if (ctlr) {
3131 		ctlr->devm_allocated = true;
3132 		*ptr = ctlr;
3133 		devres_add(dev, ptr);
3134 	} else {
3135 		devres_free(ptr);
3136 	}
3137 
3138 	return ctlr;
3139 }
3140 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
3141 
3142 /**
3143  * spi_get_gpio_descs() - grab chip select GPIOs for the master
3144  * @ctlr: The SPI master to grab GPIO descriptors for
3145  */
3146 static int spi_get_gpio_descs(struct spi_controller *ctlr)
3147 {
3148 	int nb, i;
3149 	struct gpio_desc **cs;
3150 	struct device *dev = &ctlr->dev;
3151 	unsigned long native_cs_mask = 0;
3152 	unsigned int num_cs_gpios = 0;
3153 
3154 	nb = gpiod_count(dev, "cs");
3155 	if (nb < 0) {
3156 		/* No GPIOs at all is fine, else return the error */
3157 		if (nb == -ENOENT)
3158 			return 0;
3159 		return nb;
3160 	}
3161 
3162 	ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
3163 
3164 	cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
3165 			  GFP_KERNEL);
3166 	if (!cs)
3167 		return -ENOMEM;
3168 	ctlr->cs_gpiods = cs;
3169 
3170 	for (i = 0; i < nb; i++) {
3171 		/*
3172 		 * Most chipselects are active low, the inverted
3173 		 * semantics are handled by special quirks in gpiolib,
3174 		 * so initializing them GPIOD_OUT_LOW here means
3175 		 * "unasserted", in most cases this will drive the physical
3176 		 * line high.
3177 		 */
3178 		cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
3179 						      GPIOD_OUT_LOW);
3180 		if (IS_ERR(cs[i]))
3181 			return PTR_ERR(cs[i]);
3182 
3183 		if (cs[i]) {
3184 			/*
3185 			 * If we find a CS GPIO, name it after the device and
3186 			 * chip select line.
3187 			 */
3188 			char *gpioname;
3189 
3190 			gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
3191 						  dev_name(dev), i);
3192 			if (!gpioname)
3193 				return -ENOMEM;
3194 			gpiod_set_consumer_name(cs[i], gpioname);
3195 			num_cs_gpios++;
3196 			continue;
3197 		}
3198 
3199 		if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3200 			dev_err(dev, "Invalid native chip select %d\n", i);
3201 			return -EINVAL;
3202 		}
3203 		native_cs_mask |= BIT(i);
3204 	}
3205 
3206 	ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3207 
3208 	if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios &&
3209 	    ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3210 		dev_err(dev, "No unused native chip select available\n");
3211 		return -EINVAL;
3212 	}
3213 
3214 	return 0;
3215 }
3216 
3217 static int spi_controller_check_ops(struct spi_controller *ctlr)
3218 {
3219 	/*
3220 	 * The controller may implement only the high-level SPI-memory like
3221 	 * operations if it does not support regular SPI transfers, and this is
3222 	 * valid use case.
3223 	 * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least
3224 	 * one of the ->transfer_xxx() method be implemented.
3225 	 */
3226 	if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
3227 		if (!ctlr->transfer && !ctlr->transfer_one &&
3228 		   !ctlr->transfer_one_message) {
3229 			return -EINVAL;
3230 		}
3231 	}
3232 
3233 	return 0;
3234 }
3235 
3236 /* Allocate dynamic bus number using Linux idr */
3237 static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end)
3238 {
3239 	int id;
3240 
3241 	mutex_lock(&board_lock);
3242 	id = idr_alloc(&spi_master_idr, ctlr, start, end, GFP_KERNEL);
3243 	mutex_unlock(&board_lock);
3244 	if (WARN(id < 0, "couldn't get idr"))
3245 		return id == -ENOSPC ? -EBUSY : id;
3246 	ctlr->bus_num = id;
3247 	return 0;
3248 }
3249 
3250 /**
3251  * spi_register_controller - register SPI master or slave controller
3252  * @ctlr: initialized master, originally from spi_alloc_master() or
3253  *	spi_alloc_slave()
3254  * Context: can sleep
3255  *
3256  * SPI controllers connect to their drivers using some non-SPI bus,
3257  * such as the platform bus.  The final stage of probe() in that code
3258  * includes calling spi_register_controller() to hook up to this SPI bus glue.
3259  *
3260  * SPI controllers use board specific (often SOC specific) bus numbers,
3261  * and board-specific addressing for SPI devices combines those numbers
3262  * with chip select numbers.  Since SPI does not directly support dynamic
3263  * device identification, boards need configuration tables telling which
3264  * chip is at which address.
3265  *
3266  * This must be called from context that can sleep.  It returns zero on
3267  * success, else a negative error code (dropping the controller's refcount).
3268  * After a successful return, the caller is responsible for calling
3269  * spi_unregister_controller().
3270  *
3271  * Return: zero on success, else a negative error code.
3272  */
3273 int spi_register_controller(struct spi_controller *ctlr)
3274 {
3275 	struct device		*dev = ctlr->dev.parent;
3276 	struct boardinfo	*bi;
3277 	int			first_dynamic;
3278 	int			status;
3279 	int			idx;
3280 
3281 	if (!dev)
3282 		return -ENODEV;
3283 
3284 	/*
3285 	 * Make sure all necessary hooks are implemented before registering
3286 	 * the SPI controller.
3287 	 */
3288 	status = spi_controller_check_ops(ctlr);
3289 	if (status)
3290 		return status;
3291 
3292 	if (ctlr->bus_num < 0)
3293 		ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
3294 	if (ctlr->bus_num >= 0) {
3295 		/* Devices with a fixed bus num must check-in with the num */
3296 		status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1);
3297 		if (status)
3298 			return status;
3299 	}
3300 	if (ctlr->bus_num < 0) {
3301 		first_dynamic = of_alias_get_highest_id("spi");
3302 		if (first_dynamic < 0)
3303 			first_dynamic = 0;
3304 		else
3305 			first_dynamic++;
3306 
3307 		status = spi_controller_id_alloc(ctlr, first_dynamic, 0);
3308 		if (status)
3309 			return status;
3310 	}
3311 	ctlr->bus_lock_flag = 0;
3312 	init_completion(&ctlr->xfer_completion);
3313 	init_completion(&ctlr->cur_msg_completion);
3314 	if (!ctlr->max_dma_len)
3315 		ctlr->max_dma_len = INT_MAX;
3316 
3317 	/*
3318 	 * Register the device, then userspace will see it.
3319 	 * Registration fails if the bus ID is in use.
3320 	 */
3321 	dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3322 
3323 	if (!spi_controller_is_slave(ctlr) && ctlr->use_gpio_descriptors) {
3324 		status = spi_get_gpio_descs(ctlr);
3325 		if (status)
3326 			goto free_bus_id;
3327 		/*
3328 		 * A controller using GPIO descriptors always
3329 		 * supports SPI_CS_HIGH if need be.
3330 		 */
3331 		ctlr->mode_bits |= SPI_CS_HIGH;
3332 	}
3333 
3334 	/*
3335 	 * Even if it's just one always-selected device, there must
3336 	 * be at least one chipselect.
3337 	 */
3338 	if (!ctlr->num_chipselect) {
3339 		status = -EINVAL;
3340 		goto free_bus_id;
3341 	}
3342 
3343 	/* Setting last_cs to -1 means no chip selected */
3344 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
3345 		ctlr->last_cs[idx] = -1;
3346 
3347 	status = device_add(&ctlr->dev);
3348 	if (status < 0)
3349 		goto free_bus_id;
3350 	dev_dbg(dev, "registered %s %s\n",
3351 			spi_controller_is_slave(ctlr) ? "slave" : "master",
3352 			dev_name(&ctlr->dev));
3353 
3354 	/*
3355 	 * If we're using a queued driver, start the queue. Note that we don't
3356 	 * need the queueing logic if the driver is only supporting high-level
3357 	 * memory operations.
3358 	 */
3359 	if (ctlr->transfer) {
3360 		dev_info(dev, "controller is unqueued, this is deprecated\n");
3361 	} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3362 		status = spi_controller_initialize_queue(ctlr);
3363 		if (status) {
3364 			device_del(&ctlr->dev);
3365 			goto free_bus_id;
3366 		}
3367 	}
3368 	/* Add statistics */
3369 	ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3370 	if (!ctlr->pcpu_statistics) {
3371 		dev_err(dev, "Error allocating per-cpu statistics\n");
3372 		status = -ENOMEM;
3373 		goto destroy_queue;
3374 	}
3375 
3376 	mutex_lock(&board_lock);
3377 	list_add_tail(&ctlr->list, &spi_controller_list);
3378 	list_for_each_entry(bi, &board_list, list)
3379 		spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3380 	mutex_unlock(&board_lock);
3381 
3382 	/* Register devices from the device tree and ACPI */
3383 	of_register_spi_devices(ctlr);
3384 	acpi_register_spi_devices(ctlr);
3385 	return status;
3386 
3387 destroy_queue:
3388 	spi_destroy_queue(ctlr);
3389 free_bus_id:
3390 	mutex_lock(&board_lock);
3391 	idr_remove(&spi_master_idr, ctlr->bus_num);
3392 	mutex_unlock(&board_lock);
3393 	return status;
3394 }
3395 EXPORT_SYMBOL_GPL(spi_register_controller);
3396 
3397 static void devm_spi_unregister(struct device *dev, void *res)
3398 {
3399 	spi_unregister_controller(*(struct spi_controller **)res);
3400 }
3401 
3402 /**
3403  * devm_spi_register_controller - register managed SPI master or slave
3404  *	controller
3405  * @dev:    device managing SPI controller
3406  * @ctlr: initialized controller, originally from spi_alloc_master() or
3407  *	spi_alloc_slave()
3408  * Context: can sleep
3409  *
3410  * Register a SPI device as with spi_register_controller() which will
3411  * automatically be unregistered and freed.
3412  *
3413  * Return: zero on success, else a negative error code.
3414  */
3415 int devm_spi_register_controller(struct device *dev,
3416 				 struct spi_controller *ctlr)
3417 {
3418 	struct spi_controller **ptr;
3419 	int ret;
3420 
3421 	ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
3422 	if (!ptr)
3423 		return -ENOMEM;
3424 
3425 	ret = spi_register_controller(ctlr);
3426 	if (!ret) {
3427 		*ptr = ctlr;
3428 		devres_add(dev, ptr);
3429 	} else {
3430 		devres_free(ptr);
3431 	}
3432 
3433 	return ret;
3434 }
3435 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3436 
3437 static int __unregister(struct device *dev, void *null)
3438 {
3439 	spi_unregister_device(to_spi_device(dev));
3440 	return 0;
3441 }
3442 
3443 /**
3444  * spi_unregister_controller - unregister SPI master or slave controller
3445  * @ctlr: the controller being unregistered
3446  * Context: can sleep
3447  *
3448  * This call is used only by SPI controller drivers, which are the
3449  * only ones directly touching chip registers.
3450  *
3451  * This must be called from context that can sleep.
3452  *
3453  * Note that this function also drops a reference to the controller.
3454  */
3455 void spi_unregister_controller(struct spi_controller *ctlr)
3456 {
3457 	struct spi_controller *found;
3458 	int id = ctlr->bus_num;
3459 
3460 	/* Prevent addition of new devices, unregister existing ones */
3461 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3462 		mutex_lock(&ctlr->add_lock);
3463 
3464 	device_for_each_child(&ctlr->dev, NULL, __unregister);
3465 
3466 	/* First make sure that this controller was ever added */
3467 	mutex_lock(&board_lock);
3468 	found = idr_find(&spi_master_idr, id);
3469 	mutex_unlock(&board_lock);
3470 	if (ctlr->queued) {
3471 		if (spi_destroy_queue(ctlr))
3472 			dev_err(&ctlr->dev, "queue remove failed\n");
3473 	}
3474 	mutex_lock(&board_lock);
3475 	list_del(&ctlr->list);
3476 	mutex_unlock(&board_lock);
3477 
3478 	device_del(&ctlr->dev);
3479 
3480 	/* Free bus id */
3481 	mutex_lock(&board_lock);
3482 	if (found == ctlr)
3483 		idr_remove(&spi_master_idr, id);
3484 	mutex_unlock(&board_lock);
3485 
3486 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3487 		mutex_unlock(&ctlr->add_lock);
3488 
3489 	/*
3490 	 * Release the last reference on the controller if its driver
3491 	 * has not yet been converted to devm_spi_alloc_master/slave().
3492 	 */
3493 	if (!ctlr->devm_allocated)
3494 		put_device(&ctlr->dev);
3495 }
3496 EXPORT_SYMBOL_GPL(spi_unregister_controller);
3497 
3498 static inline int __spi_check_suspended(const struct spi_controller *ctlr)
3499 {
3500 	return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
3501 }
3502 
3503 static inline void __spi_mark_suspended(struct spi_controller *ctlr)
3504 {
3505 	mutex_lock(&ctlr->bus_lock_mutex);
3506 	ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
3507 	mutex_unlock(&ctlr->bus_lock_mutex);
3508 }
3509 
3510 static inline void __spi_mark_resumed(struct spi_controller *ctlr)
3511 {
3512 	mutex_lock(&ctlr->bus_lock_mutex);
3513 	ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
3514 	mutex_unlock(&ctlr->bus_lock_mutex);
3515 }
3516 
3517 int spi_controller_suspend(struct spi_controller *ctlr)
3518 {
3519 	int ret = 0;
3520 
3521 	/* Basically no-ops for non-queued controllers */
3522 	if (ctlr->queued) {
3523 		ret = spi_stop_queue(ctlr);
3524 		if (ret)
3525 			dev_err(&ctlr->dev, "queue stop failed\n");
3526 	}
3527 
3528 	__spi_mark_suspended(ctlr);
3529 	return ret;
3530 }
3531 EXPORT_SYMBOL_GPL(spi_controller_suspend);
3532 
3533 int spi_controller_resume(struct spi_controller *ctlr)
3534 {
3535 	int ret = 0;
3536 
3537 	__spi_mark_resumed(ctlr);
3538 
3539 	if (ctlr->queued) {
3540 		ret = spi_start_queue(ctlr);
3541 		if (ret)
3542 			dev_err(&ctlr->dev, "queue restart failed\n");
3543 	}
3544 	return ret;
3545 }
3546 EXPORT_SYMBOL_GPL(spi_controller_resume);
3547 
3548 /*-------------------------------------------------------------------------*/
3549 
3550 /* Core methods for spi_message alterations */
3551 
3552 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3553 					    struct spi_message *msg,
3554 					    void *res)
3555 {
3556 	struct spi_replaced_transfers *rxfer = res;
3557 	size_t i;
3558 
3559 	/* Call extra callback if requested */
3560 	if (rxfer->release)
3561 		rxfer->release(ctlr, msg, res);
3562 
3563 	/* Insert replaced transfers back into the message */
3564 	list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3565 
3566 	/* Remove the formerly inserted entries */
3567 	for (i = 0; i < rxfer->inserted; i++)
3568 		list_del(&rxfer->inserted_transfers[i].transfer_list);
3569 }
3570 
3571 /**
3572  * spi_replace_transfers - replace transfers with several transfers
3573  *                         and register change with spi_message.resources
3574  * @msg:           the spi_message we work upon
3575  * @xfer_first:    the first spi_transfer we want to replace
3576  * @remove:        number of transfers to remove
3577  * @insert:        the number of transfers we want to insert instead
3578  * @release:       extra release code necessary in some circumstances
3579  * @extradatasize: extra data to allocate (with alignment guarantees
3580  *                 of struct @spi_transfer)
3581  * @gfp:           gfp flags
3582  *
3583  * Returns: pointer to @spi_replaced_transfers,
3584  *          PTR_ERR(...) in case of errors.
3585  */
3586 static struct spi_replaced_transfers *spi_replace_transfers(
3587 	struct spi_message *msg,
3588 	struct spi_transfer *xfer_first,
3589 	size_t remove,
3590 	size_t insert,
3591 	spi_replaced_release_t release,
3592 	size_t extradatasize,
3593 	gfp_t gfp)
3594 {
3595 	struct spi_replaced_transfers *rxfer;
3596 	struct spi_transfer *xfer;
3597 	size_t i;
3598 
3599 	/* Allocate the structure using spi_res */
3600 	rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3601 			      struct_size(rxfer, inserted_transfers, insert)
3602 			      + extradatasize,
3603 			      gfp);
3604 	if (!rxfer)
3605 		return ERR_PTR(-ENOMEM);
3606 
3607 	/* The release code to invoke before running the generic release */
3608 	rxfer->release = release;
3609 
3610 	/* Assign extradata */
3611 	if (extradatasize)
3612 		rxfer->extradata =
3613 			&rxfer->inserted_transfers[insert];
3614 
3615 	/* Init the replaced_transfers list */
3616 	INIT_LIST_HEAD(&rxfer->replaced_transfers);
3617 
3618 	/*
3619 	 * Assign the list_entry after which we should reinsert
3620 	 * the @replaced_transfers - it may be spi_message.messages!
3621 	 */
3622 	rxfer->replaced_after = xfer_first->transfer_list.prev;
3623 
3624 	/* Remove the requested number of transfers */
3625 	for (i = 0; i < remove; i++) {
3626 		/*
3627 		 * If the entry after replaced_after it is msg->transfers
3628 		 * then we have been requested to remove more transfers
3629 		 * than are in the list.
3630 		 */
3631 		if (rxfer->replaced_after->next == &msg->transfers) {
3632 			dev_err(&msg->spi->dev,
3633 				"requested to remove more spi_transfers than are available\n");
3634 			/* Insert replaced transfers back into the message */
3635 			list_splice(&rxfer->replaced_transfers,
3636 				    rxfer->replaced_after);
3637 
3638 			/* Free the spi_replace_transfer structure... */
3639 			spi_res_free(rxfer);
3640 
3641 			/* ...and return with an error */
3642 			return ERR_PTR(-EINVAL);
3643 		}
3644 
3645 		/*
3646 		 * Remove the entry after replaced_after from list of
3647 		 * transfers and add it to list of replaced_transfers.
3648 		 */
3649 		list_move_tail(rxfer->replaced_after->next,
3650 			       &rxfer->replaced_transfers);
3651 	}
3652 
3653 	/*
3654 	 * Create copy of the given xfer with identical settings
3655 	 * based on the first transfer to get removed.
3656 	 */
3657 	for (i = 0; i < insert; i++) {
3658 		/* We need to run in reverse order */
3659 		xfer = &rxfer->inserted_transfers[insert - 1 - i];
3660 
3661 		/* Copy all spi_transfer data */
3662 		memcpy(xfer, xfer_first, sizeof(*xfer));
3663 
3664 		/* Add to list */
3665 		list_add(&xfer->transfer_list, rxfer->replaced_after);
3666 
3667 		/* Clear cs_change and delay for all but the last */
3668 		if (i) {
3669 			xfer->cs_change = false;
3670 			xfer->delay.value = 0;
3671 		}
3672 	}
3673 
3674 	/* Set up inserted... */
3675 	rxfer->inserted = insert;
3676 
3677 	/* ...and register it with spi_res/spi_message */
3678 	spi_res_add(msg, rxfer);
3679 
3680 	return rxfer;
3681 }
3682 
3683 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3684 					struct spi_message *msg,
3685 					struct spi_transfer **xferp,
3686 					size_t maxsize,
3687 					gfp_t gfp)
3688 {
3689 	struct spi_transfer *xfer = *xferp, *xfers;
3690 	struct spi_replaced_transfers *srt;
3691 	size_t offset;
3692 	size_t count, i;
3693 
3694 	/* Calculate how many we have to replace */
3695 	count = DIV_ROUND_UP(xfer->len, maxsize);
3696 
3697 	/* Create replacement */
3698 	srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
3699 	if (IS_ERR(srt))
3700 		return PTR_ERR(srt);
3701 	xfers = srt->inserted_transfers;
3702 
3703 	/*
3704 	 * Now handle each of those newly inserted spi_transfers.
3705 	 * Note that the replacements spi_transfers all are preset
3706 	 * to the same values as *xferp, so tx_buf, rx_buf and len
3707 	 * are all identical (as well as most others)
3708 	 * so we just have to fix up len and the pointers.
3709 	 *
3710 	 * This also includes support for the depreciated
3711 	 * spi_message.is_dma_mapped interface.
3712 	 */
3713 
3714 	/*
3715 	 * The first transfer just needs the length modified, so we
3716 	 * run it outside the loop.
3717 	 */
3718 	xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3719 
3720 	/* All the others need rx_buf/tx_buf also set */
3721 	for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3722 		/* Update rx_buf, tx_buf and DMA */
3723 		if (xfers[i].rx_buf)
3724 			xfers[i].rx_buf += offset;
3725 		if (xfers[i].rx_dma)
3726 			xfers[i].rx_dma += offset;
3727 		if (xfers[i].tx_buf)
3728 			xfers[i].tx_buf += offset;
3729 		if (xfers[i].tx_dma)
3730 			xfers[i].tx_dma += offset;
3731 
3732 		/* Update length */
3733 		xfers[i].len = min(maxsize, xfers[i].len - offset);
3734 	}
3735 
3736 	/*
3737 	 * We set up xferp to the last entry we have inserted,
3738 	 * so that we skip those already split transfers.
3739 	 */
3740 	*xferp = &xfers[count - 1];
3741 
3742 	/* Increment statistics counters */
3743 	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3744 				       transfers_split_maxsize);
3745 	SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3746 				       transfers_split_maxsize);
3747 
3748 	return 0;
3749 }
3750 
3751 /**
3752  * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3753  *                               when an individual transfer exceeds a
3754  *                               certain size
3755  * @ctlr:    the @spi_controller for this transfer
3756  * @msg:   the @spi_message to transform
3757  * @maxsize:  the maximum when to apply this
3758  * @gfp: GFP allocation flags
3759  *
3760  * Return: status of transformation
3761  */
3762 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3763 				struct spi_message *msg,
3764 				size_t maxsize,
3765 				gfp_t gfp)
3766 {
3767 	struct spi_transfer *xfer;
3768 	int ret;
3769 
3770 	/*
3771 	 * Iterate over the transfer_list,
3772 	 * but note that xfer is advanced to the last transfer inserted
3773 	 * to avoid checking sizes again unnecessarily (also xfer does
3774 	 * potentially belong to a different list by the time the
3775 	 * replacement has happened).
3776 	 */
3777 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3778 		if (xfer->len > maxsize) {
3779 			ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3780 							   maxsize, gfp);
3781 			if (ret)
3782 				return ret;
3783 		}
3784 	}
3785 
3786 	return 0;
3787 }
3788 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3789 
3790 
3791 /**
3792  * spi_split_transfers_maxwords - split SPI transfers into multiple transfers
3793  *                                when an individual transfer exceeds a
3794  *                                certain number of SPI words
3795  * @ctlr:     the @spi_controller for this transfer
3796  * @msg:      the @spi_message to transform
3797  * @maxwords: the number of words to limit each transfer to
3798  * @gfp:      GFP allocation flags
3799  *
3800  * Return: status of transformation
3801  */
3802 int spi_split_transfers_maxwords(struct spi_controller *ctlr,
3803 				 struct spi_message *msg,
3804 				 size_t maxwords,
3805 				 gfp_t gfp)
3806 {
3807 	struct spi_transfer *xfer;
3808 
3809 	/*
3810 	 * Iterate over the transfer_list,
3811 	 * but note that xfer is advanced to the last transfer inserted
3812 	 * to avoid checking sizes again unnecessarily (also xfer does
3813 	 * potentially belong to a different list by the time the
3814 	 * replacement has happened).
3815 	 */
3816 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3817 		size_t maxsize;
3818 		int ret;
3819 
3820 		maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word));
3821 		if (xfer->len > maxsize) {
3822 			ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3823 							   maxsize, gfp);
3824 			if (ret)
3825 				return ret;
3826 		}
3827 	}
3828 
3829 	return 0;
3830 }
3831 EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords);
3832 
3833 /*-------------------------------------------------------------------------*/
3834 
3835 /*
3836  * Core methods for SPI controller protocol drivers. Some of the
3837  * other core methods are currently defined as inline functions.
3838  */
3839 
3840 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3841 					u8 bits_per_word)
3842 {
3843 	if (ctlr->bits_per_word_mask) {
3844 		/* Only 32 bits fit in the mask */
3845 		if (bits_per_word > 32)
3846 			return -EINVAL;
3847 		if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3848 			return -EINVAL;
3849 	}
3850 
3851 	return 0;
3852 }
3853 
3854 /**
3855  * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3856  * @spi: the device that requires specific CS timing configuration
3857  *
3858  * Return: zero on success, else a negative error code.
3859  */
3860 static int spi_set_cs_timing(struct spi_device *spi)
3861 {
3862 	struct device *parent = spi->controller->dev.parent;
3863 	int status = 0;
3864 
3865 	if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
3866 		if (spi->controller->auto_runtime_pm) {
3867 			status = pm_runtime_get_sync(parent);
3868 			if (status < 0) {
3869 				pm_runtime_put_noidle(parent);
3870 				dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3871 					status);
3872 				return status;
3873 			}
3874 
3875 			status = spi->controller->set_cs_timing(spi);
3876 			pm_runtime_mark_last_busy(parent);
3877 			pm_runtime_put_autosuspend(parent);
3878 		} else {
3879 			status = spi->controller->set_cs_timing(spi);
3880 		}
3881 	}
3882 	return status;
3883 }
3884 
3885 /**
3886  * spi_setup - setup SPI mode and clock rate
3887  * @spi: the device whose settings are being modified
3888  * Context: can sleep, and no requests are queued to the device
3889  *
3890  * SPI protocol drivers may need to update the transfer mode if the
3891  * device doesn't work with its default.  They may likewise need
3892  * to update clock rates or word sizes from initial values.  This function
3893  * changes those settings, and must be called from a context that can sleep.
3894  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3895  * effect the next time the device is selected and data is transferred to
3896  * or from it.  When this function returns, the SPI device is deselected.
3897  *
3898  * Note that this call will fail if the protocol driver specifies an option
3899  * that the underlying controller or its driver does not support.  For
3900  * example, not all hardware supports wire transfers using nine bit words,
3901  * LSB-first wire encoding, or active-high chipselects.
3902  *
3903  * Return: zero on success, else a negative error code.
3904  */
3905 int spi_setup(struct spi_device *spi)
3906 {
3907 	unsigned	bad_bits, ugly_bits;
3908 	int		status = 0;
3909 
3910 	/*
3911 	 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3912 	 * are set at the same time.
3913 	 */
3914 	if ((hweight_long(spi->mode &
3915 		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3916 	    (hweight_long(spi->mode &
3917 		(SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3918 		dev_err(&spi->dev,
3919 		"setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3920 		return -EINVAL;
3921 	}
3922 	/* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
3923 	if ((spi->mode & SPI_3WIRE) && (spi->mode &
3924 		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3925 		 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3926 		return -EINVAL;
3927 	/*
3928 	 * Help drivers fail *cleanly* when they need options
3929 	 * that aren't supported with their current controller.
3930 	 * SPI_CS_WORD has a fallback software implementation,
3931 	 * so it is ignored here.
3932 	 */
3933 	bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3934 				 SPI_NO_TX | SPI_NO_RX);
3935 	ugly_bits = bad_bits &
3936 		    (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3937 		     SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3938 	if (ugly_bits) {
3939 		dev_warn(&spi->dev,
3940 			 "setup: ignoring unsupported mode bits %x\n",
3941 			 ugly_bits);
3942 		spi->mode &= ~ugly_bits;
3943 		bad_bits &= ~ugly_bits;
3944 	}
3945 	if (bad_bits) {
3946 		dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3947 			bad_bits);
3948 		return -EINVAL;
3949 	}
3950 
3951 	if (!spi->bits_per_word) {
3952 		spi->bits_per_word = 8;
3953 	} else {
3954 		/*
3955 		 * Some controllers may not support the default 8 bits-per-word
3956 		 * so only perform the check when this is explicitly provided.
3957 		 */
3958 		status = __spi_validate_bits_per_word(spi->controller,
3959 						      spi->bits_per_word);
3960 		if (status)
3961 			return status;
3962 	}
3963 
3964 	if (spi->controller->max_speed_hz &&
3965 	    (!spi->max_speed_hz ||
3966 	     spi->max_speed_hz > spi->controller->max_speed_hz))
3967 		spi->max_speed_hz = spi->controller->max_speed_hz;
3968 
3969 	mutex_lock(&spi->controller->io_mutex);
3970 
3971 	if (spi->controller->setup) {
3972 		status = spi->controller->setup(spi);
3973 		if (status) {
3974 			mutex_unlock(&spi->controller->io_mutex);
3975 			dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3976 				status);
3977 			return status;
3978 		}
3979 	}
3980 
3981 	status = spi_set_cs_timing(spi);
3982 	if (status) {
3983 		mutex_unlock(&spi->controller->io_mutex);
3984 		return status;
3985 	}
3986 
3987 	if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3988 		status = pm_runtime_resume_and_get(spi->controller->dev.parent);
3989 		if (status < 0) {
3990 			mutex_unlock(&spi->controller->io_mutex);
3991 			dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3992 				status);
3993 			return status;
3994 		}
3995 
3996 		/*
3997 		 * We do not want to return positive value from pm_runtime_get,
3998 		 * there are many instances of devices calling spi_setup() and
3999 		 * checking for a non-zero return value instead of a negative
4000 		 * return value.
4001 		 */
4002 		status = 0;
4003 
4004 		spi_set_cs(spi, false, true);
4005 		pm_runtime_mark_last_busy(spi->controller->dev.parent);
4006 		pm_runtime_put_autosuspend(spi->controller->dev.parent);
4007 	} else {
4008 		spi_set_cs(spi, false, true);
4009 	}
4010 
4011 	mutex_unlock(&spi->controller->io_mutex);
4012 
4013 	if (spi->rt && !spi->controller->rt) {
4014 		spi->controller->rt = true;
4015 		spi_set_thread_rt(spi->controller);
4016 	}
4017 
4018 	trace_spi_setup(spi, status);
4019 
4020 	dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
4021 			spi->mode & SPI_MODE_X_MASK,
4022 			(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
4023 			(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
4024 			(spi->mode & SPI_3WIRE) ? "3wire, " : "",
4025 			(spi->mode & SPI_LOOP) ? "loopback, " : "",
4026 			spi->bits_per_word, spi->max_speed_hz,
4027 			status);
4028 
4029 	return status;
4030 }
4031 EXPORT_SYMBOL_GPL(spi_setup);
4032 
4033 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
4034 				       struct spi_device *spi)
4035 {
4036 	int delay1, delay2;
4037 
4038 	delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
4039 	if (delay1 < 0)
4040 		return delay1;
4041 
4042 	delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
4043 	if (delay2 < 0)
4044 		return delay2;
4045 
4046 	if (delay1 < delay2)
4047 		memcpy(&xfer->word_delay, &spi->word_delay,
4048 		       sizeof(xfer->word_delay));
4049 
4050 	return 0;
4051 }
4052 
4053 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
4054 {
4055 	struct spi_controller *ctlr = spi->controller;
4056 	struct spi_transfer *xfer;
4057 	int w_size;
4058 
4059 	if (list_empty(&message->transfers))
4060 		return -EINVAL;
4061 
4062 	/*
4063 	 * If an SPI controller does not support toggling the CS line on each
4064 	 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
4065 	 * for the CS line, we can emulate the CS-per-word hardware function by
4066 	 * splitting transfers into one-word transfers and ensuring that
4067 	 * cs_change is set for each transfer.
4068 	 */
4069 	if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
4070 					  spi_is_csgpiod(spi))) {
4071 		size_t maxsize = BITS_TO_BYTES(spi->bits_per_word);
4072 		int ret;
4073 
4074 		/* spi_split_transfers_maxsize() requires message->spi */
4075 		message->spi = spi;
4076 
4077 		ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
4078 						  GFP_KERNEL);
4079 		if (ret)
4080 			return ret;
4081 
4082 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
4083 			/* Don't change cs_change on the last entry in the list */
4084 			if (list_is_last(&xfer->transfer_list, &message->transfers))
4085 				break;
4086 			xfer->cs_change = 1;
4087 		}
4088 	}
4089 
4090 	/*
4091 	 * Half-duplex links include original MicroWire, and ones with
4092 	 * only one data pin like SPI_3WIRE (switches direction) or where
4093 	 * either MOSI or MISO is missing.  They can also be caused by
4094 	 * software limitations.
4095 	 */
4096 	if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
4097 	    (spi->mode & SPI_3WIRE)) {
4098 		unsigned flags = ctlr->flags;
4099 
4100 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
4101 			if (xfer->rx_buf && xfer->tx_buf)
4102 				return -EINVAL;
4103 			if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
4104 				return -EINVAL;
4105 			if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
4106 				return -EINVAL;
4107 		}
4108 	}
4109 
4110 	/*
4111 	 * Set transfer bits_per_word and max speed as spi device default if
4112 	 * it is not set for this transfer.
4113 	 * Set transfer tx_nbits and rx_nbits as single transfer default
4114 	 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
4115 	 * Ensure transfer word_delay is at least as long as that required by
4116 	 * device itself.
4117 	 */
4118 	message->frame_length = 0;
4119 	list_for_each_entry(xfer, &message->transfers, transfer_list) {
4120 		xfer->effective_speed_hz = 0;
4121 		message->frame_length += xfer->len;
4122 		if (!xfer->bits_per_word)
4123 			xfer->bits_per_word = spi->bits_per_word;
4124 
4125 		if (!xfer->speed_hz)
4126 			xfer->speed_hz = spi->max_speed_hz;
4127 
4128 		if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
4129 			xfer->speed_hz = ctlr->max_speed_hz;
4130 
4131 		if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
4132 			return -EINVAL;
4133 
4134 		/*
4135 		 * SPI transfer length should be multiple of SPI word size
4136 		 * where SPI word size should be power-of-two multiple.
4137 		 */
4138 		if (xfer->bits_per_word <= 8)
4139 			w_size = 1;
4140 		else if (xfer->bits_per_word <= 16)
4141 			w_size = 2;
4142 		else
4143 			w_size = 4;
4144 
4145 		/* No partial transfers accepted */
4146 		if (xfer->len % w_size)
4147 			return -EINVAL;
4148 
4149 		if (xfer->speed_hz && ctlr->min_speed_hz &&
4150 		    xfer->speed_hz < ctlr->min_speed_hz)
4151 			return -EINVAL;
4152 
4153 		if (xfer->tx_buf && !xfer->tx_nbits)
4154 			xfer->tx_nbits = SPI_NBITS_SINGLE;
4155 		if (xfer->rx_buf && !xfer->rx_nbits)
4156 			xfer->rx_nbits = SPI_NBITS_SINGLE;
4157 		/*
4158 		 * Check transfer tx/rx_nbits:
4159 		 * 1. check the value matches one of single, dual and quad
4160 		 * 2. check tx/rx_nbits match the mode in spi_device
4161 		 */
4162 		if (xfer->tx_buf) {
4163 			if (spi->mode & SPI_NO_TX)
4164 				return -EINVAL;
4165 			if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
4166 				xfer->tx_nbits != SPI_NBITS_DUAL &&
4167 				xfer->tx_nbits != SPI_NBITS_QUAD)
4168 				return -EINVAL;
4169 			if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
4170 				!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
4171 				return -EINVAL;
4172 			if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
4173 				!(spi->mode & SPI_TX_QUAD))
4174 				return -EINVAL;
4175 		}
4176 		/* Check transfer rx_nbits */
4177 		if (xfer->rx_buf) {
4178 			if (spi->mode & SPI_NO_RX)
4179 				return -EINVAL;
4180 			if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
4181 				xfer->rx_nbits != SPI_NBITS_DUAL &&
4182 				xfer->rx_nbits != SPI_NBITS_QUAD)
4183 				return -EINVAL;
4184 			if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
4185 				!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
4186 				return -EINVAL;
4187 			if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
4188 				!(spi->mode & SPI_RX_QUAD))
4189 				return -EINVAL;
4190 		}
4191 
4192 		if (_spi_xfer_word_delay_update(xfer, spi))
4193 			return -EINVAL;
4194 	}
4195 
4196 	message->status = -EINPROGRESS;
4197 
4198 	return 0;
4199 }
4200 
4201 static int __spi_async(struct spi_device *spi, struct spi_message *message)
4202 {
4203 	struct spi_controller *ctlr = spi->controller;
4204 	struct spi_transfer *xfer;
4205 
4206 	/*
4207 	 * Some controllers do not support doing regular SPI transfers. Return
4208 	 * ENOTSUPP when this is the case.
4209 	 */
4210 	if (!ctlr->transfer)
4211 		return -ENOTSUPP;
4212 
4213 	message->spi = spi;
4214 
4215 	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
4216 	SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
4217 
4218 	trace_spi_message_submit(message);
4219 
4220 	if (!ctlr->ptp_sts_supported) {
4221 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
4222 			xfer->ptp_sts_word_pre = 0;
4223 			ptp_read_system_prets(xfer->ptp_sts);
4224 		}
4225 	}
4226 
4227 	return ctlr->transfer(spi, message);
4228 }
4229 
4230 /**
4231  * spi_async - asynchronous SPI transfer
4232  * @spi: device with which data will be exchanged
4233  * @message: describes the data transfers, including completion callback
4234  * Context: any (IRQs may be blocked, etc)
4235  *
4236  * This call may be used in_irq and other contexts which can't sleep,
4237  * as well as from task contexts which can sleep.
4238  *
4239  * The completion callback is invoked in a context which can't sleep.
4240  * Before that invocation, the value of message->status is undefined.
4241  * When the callback is issued, message->status holds either zero (to
4242  * indicate complete success) or a negative error code.  After that
4243  * callback returns, the driver which issued the transfer request may
4244  * deallocate the associated memory; it's no longer in use by any SPI
4245  * core or controller driver code.
4246  *
4247  * Note that although all messages to a spi_device are handled in
4248  * FIFO order, messages may go to different devices in other orders.
4249  * Some device might be higher priority, or have various "hard" access
4250  * time requirements, for example.
4251  *
4252  * On detection of any fault during the transfer, processing of
4253  * the entire message is aborted, and the device is deselected.
4254  * Until returning from the associated message completion callback,
4255  * no other spi_message queued to that device will be processed.
4256  * (This rule applies equally to all the synchronous transfer calls,
4257  * which are wrappers around this core asynchronous primitive.)
4258  *
4259  * Return: zero on success, else a negative error code.
4260  */
4261 int spi_async(struct spi_device *spi, struct spi_message *message)
4262 {
4263 	struct spi_controller *ctlr = spi->controller;
4264 	int ret;
4265 	unsigned long flags;
4266 
4267 	ret = __spi_validate(spi, message);
4268 	if (ret != 0)
4269 		return ret;
4270 
4271 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4272 
4273 	if (ctlr->bus_lock_flag)
4274 		ret = -EBUSY;
4275 	else
4276 		ret = __spi_async(spi, message);
4277 
4278 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4279 
4280 	return ret;
4281 }
4282 EXPORT_SYMBOL_GPL(spi_async);
4283 
4284 /**
4285  * spi_async_locked - version of spi_async with exclusive bus usage
4286  * @spi: device with which data will be exchanged
4287  * @message: describes the data transfers, including completion callback
4288  * Context: any (IRQs may be blocked, etc)
4289  *
4290  * This call may be used in_irq and other contexts which can't sleep,
4291  * as well as from task contexts which can sleep.
4292  *
4293  * The completion callback is invoked in a context which can't sleep.
4294  * Before that invocation, the value of message->status is undefined.
4295  * When the callback is issued, message->status holds either zero (to
4296  * indicate complete success) or a negative error code.  After that
4297  * callback returns, the driver which issued the transfer request may
4298  * deallocate the associated memory; it's no longer in use by any SPI
4299  * core or controller driver code.
4300  *
4301  * Note that although all messages to a spi_device are handled in
4302  * FIFO order, messages may go to different devices in other orders.
4303  * Some device might be higher priority, or have various "hard" access
4304  * time requirements, for example.
4305  *
4306  * On detection of any fault during the transfer, processing of
4307  * the entire message is aborted, and the device is deselected.
4308  * Until returning from the associated message completion callback,
4309  * no other spi_message queued to that device will be processed.
4310  * (This rule applies equally to all the synchronous transfer calls,
4311  * which are wrappers around this core asynchronous primitive.)
4312  *
4313  * Return: zero on success, else a negative error code.
4314  */
4315 static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
4316 {
4317 	struct spi_controller *ctlr = spi->controller;
4318 	int ret;
4319 	unsigned long flags;
4320 
4321 	ret = __spi_validate(spi, message);
4322 	if (ret != 0)
4323 		return ret;
4324 
4325 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4326 
4327 	ret = __spi_async(spi, message);
4328 
4329 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4330 
4331 	return ret;
4332 
4333 }
4334 
4335 static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
4336 {
4337 	bool was_busy;
4338 	int ret;
4339 
4340 	mutex_lock(&ctlr->io_mutex);
4341 
4342 	was_busy = ctlr->busy;
4343 
4344 	ctlr->cur_msg = msg;
4345 	ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
4346 	if (ret)
4347 		dev_err(&ctlr->dev, "noqueue transfer failed\n");
4348 	ctlr->cur_msg = NULL;
4349 	ctlr->fallback = false;
4350 
4351 	if (!was_busy) {
4352 		kfree(ctlr->dummy_rx);
4353 		ctlr->dummy_rx = NULL;
4354 		kfree(ctlr->dummy_tx);
4355 		ctlr->dummy_tx = NULL;
4356 		if (ctlr->unprepare_transfer_hardware &&
4357 		    ctlr->unprepare_transfer_hardware(ctlr))
4358 			dev_err(&ctlr->dev,
4359 				"failed to unprepare transfer hardware\n");
4360 		spi_idle_runtime_pm(ctlr);
4361 	}
4362 
4363 	mutex_unlock(&ctlr->io_mutex);
4364 }
4365 
4366 /*-------------------------------------------------------------------------*/
4367 
4368 /*
4369  * Utility methods for SPI protocol drivers, layered on
4370  * top of the core.  Some other utility methods are defined as
4371  * inline functions.
4372  */
4373 
4374 static void spi_complete(void *arg)
4375 {
4376 	complete(arg);
4377 }
4378 
4379 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
4380 {
4381 	DECLARE_COMPLETION_ONSTACK(done);
4382 	int status;
4383 	struct spi_controller *ctlr = spi->controller;
4384 
4385 	if (__spi_check_suspended(ctlr)) {
4386 		dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
4387 		return -ESHUTDOWN;
4388 	}
4389 
4390 	status = __spi_validate(spi, message);
4391 	if (status != 0)
4392 		return status;
4393 
4394 	message->spi = spi;
4395 
4396 	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4397 	SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4398 
4399 	/*
4400 	 * Checking queue_empty here only guarantees async/sync message
4401 	 * ordering when coming from the same context. It does not need to
4402 	 * guard against reentrancy from a different context. The io_mutex
4403 	 * will catch those cases.
4404 	 */
4405 	if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4406 		message->actual_length = 0;
4407 		message->status = -EINPROGRESS;
4408 
4409 		trace_spi_message_submit(message);
4410 
4411 		SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4412 		SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4413 
4414 		__spi_transfer_message_noqueue(ctlr, message);
4415 
4416 		return message->status;
4417 	}
4418 
4419 	/*
4420 	 * There are messages in the async queue that could have originated
4421 	 * from the same context, so we need to preserve ordering.
4422 	 * Therefor we send the message to the async queue and wait until they
4423 	 * are completed.
4424 	 */
4425 	message->complete = spi_complete;
4426 	message->context = &done;
4427 	status = spi_async_locked(spi, message);
4428 	if (status == 0) {
4429 		wait_for_completion(&done);
4430 		status = message->status;
4431 	}
4432 	message->context = NULL;
4433 
4434 	return status;
4435 }
4436 
4437 /**
4438  * spi_sync - blocking/synchronous SPI data transfers
4439  * @spi: device with which data will be exchanged
4440  * @message: describes the data transfers
4441  * Context: can sleep
4442  *
4443  * This call may only be used from a context that may sleep.  The sleep
4444  * is non-interruptible, and has no timeout.  Low-overhead controller
4445  * drivers may DMA directly into and out of the message buffers.
4446  *
4447  * Note that the SPI device's chip select is active during the message,
4448  * and then is normally disabled between messages.  Drivers for some
4449  * frequently-used devices may want to minimize costs of selecting a chip,
4450  * by leaving it selected in anticipation that the next message will go
4451  * to the same chip.  (That may increase power usage.)
4452  *
4453  * Also, the caller is guaranteeing that the memory associated with the
4454  * message will not be freed before this call returns.
4455  *
4456  * Return: zero on success, else a negative error code.
4457  */
4458 int spi_sync(struct spi_device *spi, struct spi_message *message)
4459 {
4460 	int ret;
4461 
4462 	mutex_lock(&spi->controller->bus_lock_mutex);
4463 	ret = __spi_sync(spi, message);
4464 	mutex_unlock(&spi->controller->bus_lock_mutex);
4465 
4466 	return ret;
4467 }
4468 EXPORT_SYMBOL_GPL(spi_sync);
4469 
4470 /**
4471  * spi_sync_locked - version of spi_sync with exclusive bus usage
4472  * @spi: device with which data will be exchanged
4473  * @message: describes the data transfers
4474  * Context: can sleep
4475  *
4476  * This call may only be used from a context that may sleep.  The sleep
4477  * is non-interruptible, and has no timeout.  Low-overhead controller
4478  * drivers may DMA directly into and out of the message buffers.
4479  *
4480  * This call should be used by drivers that require exclusive access to the
4481  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4482  * be released by a spi_bus_unlock call when the exclusive access is over.
4483  *
4484  * Return: zero on success, else a negative error code.
4485  */
4486 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4487 {
4488 	return __spi_sync(spi, message);
4489 }
4490 EXPORT_SYMBOL_GPL(spi_sync_locked);
4491 
4492 /**
4493  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4494  * @ctlr: SPI bus master that should be locked for exclusive bus access
4495  * Context: can sleep
4496  *
4497  * This call may only be used from a context that may sleep.  The sleep
4498  * is non-interruptible, and has no timeout.
4499  *
4500  * This call should be used by drivers that require exclusive access to the
4501  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4502  * exclusive access is over. Data transfer must be done by spi_sync_locked
4503  * and spi_async_locked calls when the SPI bus lock is held.
4504  *
4505  * Return: always zero.
4506  */
4507 int spi_bus_lock(struct spi_controller *ctlr)
4508 {
4509 	unsigned long flags;
4510 
4511 	mutex_lock(&ctlr->bus_lock_mutex);
4512 
4513 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4514 	ctlr->bus_lock_flag = 1;
4515 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4516 
4517 	/* Mutex remains locked until spi_bus_unlock() is called */
4518 
4519 	return 0;
4520 }
4521 EXPORT_SYMBOL_GPL(spi_bus_lock);
4522 
4523 /**
4524  * spi_bus_unlock - release the lock for exclusive SPI bus usage
4525  * @ctlr: SPI bus master that was locked for exclusive bus access
4526  * Context: can sleep
4527  *
4528  * This call may only be used from a context that may sleep.  The sleep
4529  * is non-interruptible, and has no timeout.
4530  *
4531  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4532  * call.
4533  *
4534  * Return: always zero.
4535  */
4536 int spi_bus_unlock(struct spi_controller *ctlr)
4537 {
4538 	ctlr->bus_lock_flag = 0;
4539 
4540 	mutex_unlock(&ctlr->bus_lock_mutex);
4541 
4542 	return 0;
4543 }
4544 EXPORT_SYMBOL_GPL(spi_bus_unlock);
4545 
4546 /* Portable code must never pass more than 32 bytes */
4547 #define	SPI_BUFSIZ	max(32, SMP_CACHE_BYTES)
4548 
4549 static u8	*buf;
4550 
4551 /**
4552  * spi_write_then_read - SPI synchronous write followed by read
4553  * @spi: device with which data will be exchanged
4554  * @txbuf: data to be written (need not be DMA-safe)
4555  * @n_tx: size of txbuf, in bytes
4556  * @rxbuf: buffer into which data will be read (need not be DMA-safe)
4557  * @n_rx: size of rxbuf, in bytes
4558  * Context: can sleep
4559  *
4560  * This performs a half duplex MicroWire style transaction with the
4561  * device, sending txbuf and then reading rxbuf.  The return value
4562  * is zero for success, else a negative errno status code.
4563  * This call may only be used from a context that may sleep.
4564  *
4565  * Parameters to this routine are always copied using a small buffer.
4566  * Performance-sensitive or bulk transfer code should instead use
4567  * spi_{async,sync}() calls with DMA-safe buffers.
4568  *
4569  * Return: zero on success, else a negative error code.
4570  */
4571 int spi_write_then_read(struct spi_device *spi,
4572 		const void *txbuf, unsigned n_tx,
4573 		void *rxbuf, unsigned n_rx)
4574 {
4575 	static DEFINE_MUTEX(lock);
4576 
4577 	int			status;
4578 	struct spi_message	message;
4579 	struct spi_transfer	x[2];
4580 	u8			*local_buf;
4581 
4582 	/*
4583 	 * Use preallocated DMA-safe buffer if we can. We can't avoid
4584 	 * copying here, (as a pure convenience thing), but we can
4585 	 * keep heap costs out of the hot path unless someone else is
4586 	 * using the pre-allocated buffer or the transfer is too large.
4587 	 */
4588 	if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4589 		local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4590 				    GFP_KERNEL | GFP_DMA);
4591 		if (!local_buf)
4592 			return -ENOMEM;
4593 	} else {
4594 		local_buf = buf;
4595 	}
4596 
4597 	spi_message_init(&message);
4598 	memset(x, 0, sizeof(x));
4599 	if (n_tx) {
4600 		x[0].len = n_tx;
4601 		spi_message_add_tail(&x[0], &message);
4602 	}
4603 	if (n_rx) {
4604 		x[1].len = n_rx;
4605 		spi_message_add_tail(&x[1], &message);
4606 	}
4607 
4608 	memcpy(local_buf, txbuf, n_tx);
4609 	x[0].tx_buf = local_buf;
4610 	x[1].rx_buf = local_buf + n_tx;
4611 
4612 	/* Do the I/O */
4613 	status = spi_sync(spi, &message);
4614 	if (status == 0)
4615 		memcpy(rxbuf, x[1].rx_buf, n_rx);
4616 
4617 	if (x[0].tx_buf == buf)
4618 		mutex_unlock(&lock);
4619 	else
4620 		kfree(local_buf);
4621 
4622 	return status;
4623 }
4624 EXPORT_SYMBOL_GPL(spi_write_then_read);
4625 
4626 /*-------------------------------------------------------------------------*/
4627 
4628 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
4629 /* Must call put_device() when done with returned spi_device device */
4630 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4631 {
4632 	struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4633 
4634 	return dev ? to_spi_device(dev) : NULL;
4635 }
4636 
4637 /* The spi controllers are not using spi_bus, so we find it with another way */
4638 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4639 {
4640 	struct device *dev;
4641 
4642 	dev = class_find_device_by_of_node(&spi_master_class, node);
4643 	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4644 		dev = class_find_device_by_of_node(&spi_slave_class, node);
4645 	if (!dev)
4646 		return NULL;
4647 
4648 	/* Reference got in class_find_device */
4649 	return container_of(dev, struct spi_controller, dev);
4650 }
4651 
4652 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4653 			 void *arg)
4654 {
4655 	struct of_reconfig_data *rd = arg;
4656 	struct spi_controller *ctlr;
4657 	struct spi_device *spi;
4658 
4659 	switch (of_reconfig_get_state_change(action, arg)) {
4660 	case OF_RECONFIG_CHANGE_ADD:
4661 		ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4662 		if (ctlr == NULL)
4663 			return NOTIFY_OK;	/* Not for us */
4664 
4665 		if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4666 			put_device(&ctlr->dev);
4667 			return NOTIFY_OK;
4668 		}
4669 
4670 		/*
4671 		 * Clear the flag before adding the device so that fw_devlink
4672 		 * doesn't skip adding consumers to this device.
4673 		 */
4674 		rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
4675 		spi = of_register_spi_device(ctlr, rd->dn);
4676 		put_device(&ctlr->dev);
4677 
4678 		if (IS_ERR(spi)) {
4679 			pr_err("%s: failed to create for '%pOF'\n",
4680 					__func__, rd->dn);
4681 			of_node_clear_flag(rd->dn, OF_POPULATED);
4682 			return notifier_from_errno(PTR_ERR(spi));
4683 		}
4684 		break;
4685 
4686 	case OF_RECONFIG_CHANGE_REMOVE:
4687 		/* Already depopulated? */
4688 		if (!of_node_check_flag(rd->dn, OF_POPULATED))
4689 			return NOTIFY_OK;
4690 
4691 		/* Find our device by node */
4692 		spi = of_find_spi_device_by_node(rd->dn);
4693 		if (spi == NULL)
4694 			return NOTIFY_OK;	/* No? not meant for us */
4695 
4696 		/* Unregister takes one ref away */
4697 		spi_unregister_device(spi);
4698 
4699 		/* And put the reference of the find */
4700 		put_device(&spi->dev);
4701 		break;
4702 	}
4703 
4704 	return NOTIFY_OK;
4705 }
4706 
4707 static struct notifier_block spi_of_notifier = {
4708 	.notifier_call = of_spi_notify,
4709 };
4710 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4711 extern struct notifier_block spi_of_notifier;
4712 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4713 
4714 #if IS_ENABLED(CONFIG_ACPI)
4715 static int spi_acpi_controller_match(struct device *dev, const void *data)
4716 {
4717 	return ACPI_COMPANION(dev->parent) == data;
4718 }
4719 
4720 struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4721 {
4722 	struct device *dev;
4723 
4724 	dev = class_find_device(&spi_master_class, NULL, adev,
4725 				spi_acpi_controller_match);
4726 	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4727 		dev = class_find_device(&spi_slave_class, NULL, adev,
4728 					spi_acpi_controller_match);
4729 	if (!dev)
4730 		return NULL;
4731 
4732 	return container_of(dev, struct spi_controller, dev);
4733 }
4734 EXPORT_SYMBOL_GPL(acpi_spi_find_controller_by_adev);
4735 
4736 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4737 {
4738 	struct device *dev;
4739 
4740 	dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4741 	return to_spi_device(dev);
4742 }
4743 
4744 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4745 			   void *arg)
4746 {
4747 	struct acpi_device *adev = arg;
4748 	struct spi_controller *ctlr;
4749 	struct spi_device *spi;
4750 
4751 	switch (value) {
4752 	case ACPI_RECONFIG_DEVICE_ADD:
4753 		ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
4754 		if (!ctlr)
4755 			break;
4756 
4757 		acpi_register_spi_device(ctlr, adev);
4758 		put_device(&ctlr->dev);
4759 		break;
4760 	case ACPI_RECONFIG_DEVICE_REMOVE:
4761 		if (!acpi_device_enumerated(adev))
4762 			break;
4763 
4764 		spi = acpi_spi_find_device_by_adev(adev);
4765 		if (!spi)
4766 			break;
4767 
4768 		spi_unregister_device(spi);
4769 		put_device(&spi->dev);
4770 		break;
4771 	}
4772 
4773 	return NOTIFY_OK;
4774 }
4775 
4776 static struct notifier_block spi_acpi_notifier = {
4777 	.notifier_call = acpi_spi_notify,
4778 };
4779 #else
4780 extern struct notifier_block spi_acpi_notifier;
4781 #endif
4782 
4783 static int __init spi_init(void)
4784 {
4785 	int	status;
4786 
4787 	buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4788 	if (!buf) {
4789 		status = -ENOMEM;
4790 		goto err0;
4791 	}
4792 
4793 	status = bus_register(&spi_bus_type);
4794 	if (status < 0)
4795 		goto err1;
4796 
4797 	status = class_register(&spi_master_class);
4798 	if (status < 0)
4799 		goto err2;
4800 
4801 	if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4802 		status = class_register(&spi_slave_class);
4803 		if (status < 0)
4804 			goto err3;
4805 	}
4806 
4807 	if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4808 		WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4809 	if (IS_ENABLED(CONFIG_ACPI))
4810 		WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4811 
4812 	return 0;
4813 
4814 err3:
4815 	class_unregister(&spi_master_class);
4816 err2:
4817 	bus_unregister(&spi_bus_type);
4818 err1:
4819 	kfree(buf);
4820 	buf = NULL;
4821 err0:
4822 	return status;
4823 }
4824 
4825 /*
4826  * A board_info is normally registered in arch_initcall(),
4827  * but even essential drivers wait till later.
4828  *
4829  * REVISIT only boardinfo really needs static linking. The rest (device and
4830  * driver registration) _could_ be dynamically linked (modular) ... Costs
4831  * include needing to have boardinfo data structures be much more public.
4832  */
4833 postcore_initcall(spi_init);
4834