xref: /linux/drivers/spi/spi.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
3 //
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
6 
7 #include <linux/acpi.h>
8 #include <linux/cache.h>
9 #include <linux/clk/clk-conf.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/dmaengine.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/export.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/highmem.h>
17 #include <linux/idr.h>
18 #include <linux/init.h>
19 #include <linux/ioport.h>
20 #include <linux/kernel.h>
21 #include <linux/kthread.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/mutex.h>
24 #include <linux/of_device.h>
25 #include <linux/of_irq.h>
26 #include <linux/percpu.h>
27 #include <linux/platform_data/x86/apple.h>
28 #include <linux/pm_domain.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/property.h>
31 #include <linux/ptp_clock_kernel.h>
32 #include <linux/sched/rt.h>
33 #include <linux/slab.h>
34 #include <linux/spi/offload/types.h>
35 #include <linux/spi/spi.h>
36 #include <linux/spi/spi-mem.h>
37 #include <uapi/linux/sched/types.h>
38 
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/spi.h>
41 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
42 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
43 
44 #include "internals.h"
45 
46 static DEFINE_IDR(spi_controller_idr);
47 
48 static void spidev_release(struct device *dev)
49 {
50 	struct spi_device	*spi = to_spi_device(dev);
51 
52 	spi_controller_put(spi->controller);
53 	kfree(spi->driver_override);
54 	free_percpu(spi->pcpu_statistics);
55 	kfree(spi);
56 }
57 
58 static ssize_t
59 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
60 {
61 	const struct spi_device	*spi = to_spi_device(dev);
62 	int len;
63 
64 	len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
65 	if (len != -ENODEV)
66 		return len;
67 
68 	return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
69 }
70 static DEVICE_ATTR_RO(modalias);
71 
72 static ssize_t driver_override_store(struct device *dev,
73 				     struct device_attribute *a,
74 				     const char *buf, size_t count)
75 {
76 	struct spi_device *spi = to_spi_device(dev);
77 	int ret;
78 
79 	ret = driver_set_override(dev, &spi->driver_override, buf, count);
80 	if (ret)
81 		return ret;
82 
83 	return count;
84 }
85 
86 static ssize_t driver_override_show(struct device *dev,
87 				    struct device_attribute *a, char *buf)
88 {
89 	const struct spi_device *spi = to_spi_device(dev);
90 	ssize_t len;
91 
92 	device_lock(dev);
93 	len = sysfs_emit(buf, "%s\n", spi->driver_override ? : "");
94 	device_unlock(dev);
95 	return len;
96 }
97 static DEVICE_ATTR_RW(driver_override);
98 
99 static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
100 {
101 	struct spi_statistics __percpu *pcpu_stats;
102 
103 	if (dev)
104 		pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
105 	else
106 		pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
107 
108 	if (pcpu_stats) {
109 		int cpu;
110 
111 		for_each_possible_cpu(cpu) {
112 			struct spi_statistics *stat;
113 
114 			stat = per_cpu_ptr(pcpu_stats, cpu);
115 			u64_stats_init(&stat->syncp);
116 		}
117 	}
118 	return pcpu_stats;
119 }
120 
121 static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat,
122 				   char *buf, size_t offset)
123 {
124 	u64 val = 0;
125 	int i;
126 
127 	for_each_possible_cpu(i) {
128 		const struct spi_statistics *pcpu_stats;
129 		u64_stats_t *field;
130 		unsigned int start;
131 		u64 inc;
132 
133 		pcpu_stats = per_cpu_ptr(stat, i);
134 		field = (void *)pcpu_stats + offset;
135 		do {
136 			start = u64_stats_fetch_begin(&pcpu_stats->syncp);
137 			inc = u64_stats_read(field);
138 		} while (u64_stats_fetch_retry(&pcpu_stats->syncp, start));
139 		val += inc;
140 	}
141 	return sysfs_emit(buf, "%llu\n", val);
142 }
143 
144 #define SPI_STATISTICS_ATTRS(field, file)				\
145 static ssize_t spi_controller_##field##_show(struct device *dev,	\
146 					     struct device_attribute *attr, \
147 					     char *buf)			\
148 {									\
149 	struct spi_controller *ctlr = container_of(dev,			\
150 					 struct spi_controller, dev);	\
151 	return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
152 }									\
153 static struct device_attribute dev_attr_spi_controller_##field = {	\
154 	.attr = { .name = file, .mode = 0444 },				\
155 	.show = spi_controller_##field##_show,				\
156 };									\
157 static ssize_t spi_device_##field##_show(struct device *dev,		\
158 					 struct device_attribute *attr,	\
159 					char *buf)			\
160 {									\
161 	struct spi_device *spi = to_spi_device(dev);			\
162 	return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
163 }									\
164 static struct device_attribute dev_attr_spi_device_##field = {		\
165 	.attr = { .name = file, .mode = 0444 },				\
166 	.show = spi_device_##field##_show,				\
167 }
168 
169 #define SPI_STATISTICS_SHOW_NAME(name, file, field)			\
170 static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
171 					    char *buf)			\
172 {									\
173 	return spi_emit_pcpu_stats(stat, buf,				\
174 			offsetof(struct spi_statistics, field));	\
175 }									\
176 SPI_STATISTICS_ATTRS(name, file)
177 
178 #define SPI_STATISTICS_SHOW(field)					\
179 	SPI_STATISTICS_SHOW_NAME(field, __stringify(field),		\
180 				 field)
181 
182 SPI_STATISTICS_SHOW(messages);
183 SPI_STATISTICS_SHOW(transfers);
184 SPI_STATISTICS_SHOW(errors);
185 SPI_STATISTICS_SHOW(timedout);
186 
187 SPI_STATISTICS_SHOW(spi_sync);
188 SPI_STATISTICS_SHOW(spi_sync_immediate);
189 SPI_STATISTICS_SHOW(spi_async);
190 
191 SPI_STATISTICS_SHOW(bytes);
192 SPI_STATISTICS_SHOW(bytes_rx);
193 SPI_STATISTICS_SHOW(bytes_tx);
194 
195 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)		\
196 	SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,		\
197 				 "transfer_bytes_histo_" number,	\
198 				 transfer_bytes_histo[index])
199 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
200 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
201 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
202 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
203 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
204 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
205 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
206 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
207 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
208 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
209 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
210 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
211 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
212 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
213 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
214 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
215 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
216 
217 SPI_STATISTICS_SHOW(transfers_split_maxsize);
218 
219 static struct attribute *spi_dev_attrs[] = {
220 	&dev_attr_modalias.attr,
221 	&dev_attr_driver_override.attr,
222 	NULL,
223 };
224 
225 static const struct attribute_group spi_dev_group = {
226 	.attrs  = spi_dev_attrs,
227 };
228 
229 static struct attribute *spi_device_statistics_attrs[] = {
230 	&dev_attr_spi_device_messages.attr,
231 	&dev_attr_spi_device_transfers.attr,
232 	&dev_attr_spi_device_errors.attr,
233 	&dev_attr_spi_device_timedout.attr,
234 	&dev_attr_spi_device_spi_sync.attr,
235 	&dev_attr_spi_device_spi_sync_immediate.attr,
236 	&dev_attr_spi_device_spi_async.attr,
237 	&dev_attr_spi_device_bytes.attr,
238 	&dev_attr_spi_device_bytes_rx.attr,
239 	&dev_attr_spi_device_bytes_tx.attr,
240 	&dev_attr_spi_device_transfer_bytes_histo0.attr,
241 	&dev_attr_spi_device_transfer_bytes_histo1.attr,
242 	&dev_attr_spi_device_transfer_bytes_histo2.attr,
243 	&dev_attr_spi_device_transfer_bytes_histo3.attr,
244 	&dev_attr_spi_device_transfer_bytes_histo4.attr,
245 	&dev_attr_spi_device_transfer_bytes_histo5.attr,
246 	&dev_attr_spi_device_transfer_bytes_histo6.attr,
247 	&dev_attr_spi_device_transfer_bytes_histo7.attr,
248 	&dev_attr_spi_device_transfer_bytes_histo8.attr,
249 	&dev_attr_spi_device_transfer_bytes_histo9.attr,
250 	&dev_attr_spi_device_transfer_bytes_histo10.attr,
251 	&dev_attr_spi_device_transfer_bytes_histo11.attr,
252 	&dev_attr_spi_device_transfer_bytes_histo12.attr,
253 	&dev_attr_spi_device_transfer_bytes_histo13.attr,
254 	&dev_attr_spi_device_transfer_bytes_histo14.attr,
255 	&dev_attr_spi_device_transfer_bytes_histo15.attr,
256 	&dev_attr_spi_device_transfer_bytes_histo16.attr,
257 	&dev_attr_spi_device_transfers_split_maxsize.attr,
258 	NULL,
259 };
260 
261 static const struct attribute_group spi_device_statistics_group = {
262 	.name  = "statistics",
263 	.attrs  = spi_device_statistics_attrs,
264 };
265 
266 static const struct attribute_group *spi_dev_groups[] = {
267 	&spi_dev_group,
268 	&spi_device_statistics_group,
269 	NULL,
270 };
271 
272 static struct attribute *spi_controller_statistics_attrs[] = {
273 	&dev_attr_spi_controller_messages.attr,
274 	&dev_attr_spi_controller_transfers.attr,
275 	&dev_attr_spi_controller_errors.attr,
276 	&dev_attr_spi_controller_timedout.attr,
277 	&dev_attr_spi_controller_spi_sync.attr,
278 	&dev_attr_spi_controller_spi_sync_immediate.attr,
279 	&dev_attr_spi_controller_spi_async.attr,
280 	&dev_attr_spi_controller_bytes.attr,
281 	&dev_attr_spi_controller_bytes_rx.attr,
282 	&dev_attr_spi_controller_bytes_tx.attr,
283 	&dev_attr_spi_controller_transfer_bytes_histo0.attr,
284 	&dev_attr_spi_controller_transfer_bytes_histo1.attr,
285 	&dev_attr_spi_controller_transfer_bytes_histo2.attr,
286 	&dev_attr_spi_controller_transfer_bytes_histo3.attr,
287 	&dev_attr_spi_controller_transfer_bytes_histo4.attr,
288 	&dev_attr_spi_controller_transfer_bytes_histo5.attr,
289 	&dev_attr_spi_controller_transfer_bytes_histo6.attr,
290 	&dev_attr_spi_controller_transfer_bytes_histo7.attr,
291 	&dev_attr_spi_controller_transfer_bytes_histo8.attr,
292 	&dev_attr_spi_controller_transfer_bytes_histo9.attr,
293 	&dev_attr_spi_controller_transfer_bytes_histo10.attr,
294 	&dev_attr_spi_controller_transfer_bytes_histo11.attr,
295 	&dev_attr_spi_controller_transfer_bytes_histo12.attr,
296 	&dev_attr_spi_controller_transfer_bytes_histo13.attr,
297 	&dev_attr_spi_controller_transfer_bytes_histo14.attr,
298 	&dev_attr_spi_controller_transfer_bytes_histo15.attr,
299 	&dev_attr_spi_controller_transfer_bytes_histo16.attr,
300 	&dev_attr_spi_controller_transfers_split_maxsize.attr,
301 	NULL,
302 };
303 
304 static const struct attribute_group spi_controller_statistics_group = {
305 	.name  = "statistics",
306 	.attrs  = spi_controller_statistics_attrs,
307 };
308 
309 static const struct attribute_group *spi_controller_groups[] = {
310 	&spi_controller_statistics_group,
311 	NULL,
312 };
313 
314 static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
315 					      struct spi_transfer *xfer,
316 					      struct spi_message *msg)
317 {
318 	int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
319 	struct spi_statistics *stats;
320 
321 	if (l2len < 0)
322 		l2len = 0;
323 
324 	get_cpu();
325 	stats = this_cpu_ptr(pcpu_stats);
326 	u64_stats_update_begin(&stats->syncp);
327 
328 	u64_stats_inc(&stats->transfers);
329 	u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
330 
331 	u64_stats_add(&stats->bytes, xfer->len);
332 	if (spi_valid_txbuf(msg, xfer))
333 		u64_stats_add(&stats->bytes_tx, xfer->len);
334 	if (spi_valid_rxbuf(msg, xfer))
335 		u64_stats_add(&stats->bytes_rx, xfer->len);
336 
337 	u64_stats_update_end(&stats->syncp);
338 	put_cpu();
339 }
340 
341 /*
342  * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
343  * and the sysfs version makes coldplug work too.
344  */
345 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
346 {
347 	while (id->name[0]) {
348 		if (!strcmp(name, id->name))
349 			return id;
350 		id++;
351 	}
352 	return NULL;
353 }
354 
355 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
356 {
357 	const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
358 
359 	return spi_match_id(sdrv->id_table, sdev->modalias);
360 }
361 EXPORT_SYMBOL_GPL(spi_get_device_id);
362 
363 const void *spi_get_device_match_data(const struct spi_device *sdev)
364 {
365 	const void *match;
366 
367 	match = device_get_match_data(&sdev->dev);
368 	if (match)
369 		return match;
370 
371 	return (const void *)spi_get_device_id(sdev)->driver_data;
372 }
373 EXPORT_SYMBOL_GPL(spi_get_device_match_data);
374 
375 static int spi_match_device(struct device *dev, const struct device_driver *drv)
376 {
377 	const struct spi_device	*spi = to_spi_device(dev);
378 	const struct spi_driver	*sdrv = to_spi_driver(drv);
379 
380 	/* Check override first, and if set, only use the named driver */
381 	if (spi->driver_override)
382 		return strcmp(spi->driver_override, drv->name) == 0;
383 
384 	/* Attempt an OF style match */
385 	if (of_driver_match_device(dev, drv))
386 		return 1;
387 
388 	/* Then try ACPI */
389 	if (acpi_driver_match_device(dev, drv))
390 		return 1;
391 
392 	if (sdrv->id_table)
393 		return !!spi_match_id(sdrv->id_table, spi->modalias);
394 
395 	return strcmp(spi->modalias, drv->name) == 0;
396 }
397 
398 static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env)
399 {
400 	const struct spi_device		*spi = to_spi_device(dev);
401 	int rc;
402 
403 	rc = acpi_device_uevent_modalias(dev, env);
404 	if (rc != -ENODEV)
405 		return rc;
406 
407 	return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
408 }
409 
410 static int spi_probe(struct device *dev)
411 {
412 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
413 	struct spi_device		*spi = to_spi_device(dev);
414 	struct fwnode_handle		*fwnode = dev_fwnode(dev);
415 	int ret;
416 
417 	ret = of_clk_set_defaults(dev->of_node, false);
418 	if (ret)
419 		return ret;
420 
421 	if (is_of_node(fwnode))
422 		spi->irq = of_irq_get(dev->of_node, 0);
423 	else if (is_acpi_device_node(fwnode) && spi->irq < 0)
424 		spi->irq = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), 0);
425 	if (spi->irq == -EPROBE_DEFER)
426 		return dev_err_probe(dev, spi->irq, "Failed to get irq\n");
427 	if (spi->irq < 0)
428 		spi->irq = 0;
429 
430 	ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON |
431 					PD_FLAG_DETACH_POWER_OFF);
432 	if (ret)
433 		return ret;
434 
435 	if (sdrv->probe)
436 		ret = sdrv->probe(spi);
437 
438 	return ret;
439 }
440 
441 static void spi_remove(struct device *dev)
442 {
443 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
444 
445 	if (sdrv->remove)
446 		sdrv->remove(to_spi_device(dev));
447 }
448 
449 static void spi_shutdown(struct device *dev)
450 {
451 	if (dev->driver) {
452 		const struct spi_driver	*sdrv = to_spi_driver(dev->driver);
453 
454 		if (sdrv->shutdown)
455 			sdrv->shutdown(to_spi_device(dev));
456 	}
457 }
458 
459 const struct bus_type spi_bus_type = {
460 	.name		= "spi",
461 	.dev_groups	= spi_dev_groups,
462 	.match		= spi_match_device,
463 	.uevent		= spi_uevent,
464 	.probe		= spi_probe,
465 	.remove		= spi_remove,
466 	.shutdown	= spi_shutdown,
467 };
468 EXPORT_SYMBOL_GPL(spi_bus_type);
469 
470 /**
471  * __spi_register_driver - register a SPI driver
472  * @owner: owner module of the driver to register
473  * @sdrv: the driver to register
474  * Context: can sleep
475  *
476  * Return: zero on success, else a negative error code.
477  */
478 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
479 {
480 	sdrv->driver.owner = owner;
481 	sdrv->driver.bus = &spi_bus_type;
482 
483 	/*
484 	 * For Really Good Reasons we use spi: modaliases not of:
485 	 * modaliases for DT so module autoloading won't work if we
486 	 * don't have a spi_device_id as well as a compatible string.
487 	 */
488 	if (sdrv->driver.of_match_table) {
489 		const struct of_device_id *of_id;
490 
491 		for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
492 		     of_id++) {
493 			const char *of_name;
494 
495 			/* Strip off any vendor prefix */
496 			of_name = strnchr(of_id->compatible,
497 					  sizeof(of_id->compatible), ',');
498 			if (of_name)
499 				of_name++;
500 			else
501 				of_name = of_id->compatible;
502 
503 			if (sdrv->id_table) {
504 				const struct spi_device_id *spi_id;
505 
506 				spi_id = spi_match_id(sdrv->id_table, of_name);
507 				if (spi_id)
508 					continue;
509 			} else {
510 				if (strcmp(sdrv->driver.name, of_name) == 0)
511 					continue;
512 			}
513 
514 			pr_warn("SPI driver %s has no spi_device_id for %s\n",
515 				sdrv->driver.name, of_id->compatible);
516 		}
517 	}
518 
519 	return driver_register(&sdrv->driver);
520 }
521 EXPORT_SYMBOL_GPL(__spi_register_driver);
522 
523 /*-------------------------------------------------------------------------*/
524 
525 /*
526  * SPI devices should normally not be created by SPI device drivers; that
527  * would make them board-specific.  Similarly with SPI controller drivers.
528  * Device registration normally goes into like arch/.../mach.../board-YYY.c
529  * with other readonly (flashable) information about mainboard devices.
530  */
531 
532 struct boardinfo {
533 	struct list_head	list;
534 	struct spi_board_info	board_info;
535 };
536 
537 static LIST_HEAD(board_list);
538 static LIST_HEAD(spi_controller_list);
539 
540 /*
541  * Used to protect add/del operation for board_info list and
542  * spi_controller list, and their matching process also used
543  * to protect object of type struct idr.
544  */
545 static DEFINE_MUTEX(board_lock);
546 
547 /**
548  * spi_alloc_device - Allocate a new SPI device
549  * @ctlr: Controller to which device is connected
550  * Context: can sleep
551  *
552  * Allows a driver to allocate and initialize a spi_device without
553  * registering it immediately.  This allows a driver to directly
554  * fill the spi_device with device parameters before calling
555  * spi_add_device() on it.
556  *
557  * Caller is responsible to call spi_add_device() on the returned
558  * spi_device structure to add it to the SPI controller.  If the caller
559  * needs to discard the spi_device without adding it, then it should
560  * call spi_dev_put() on it.
561  *
562  * Return: a pointer to the new device, or NULL.
563  */
564 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
565 {
566 	struct spi_device	*spi;
567 
568 	if (!spi_controller_get(ctlr))
569 		return NULL;
570 
571 	spi = kzalloc(sizeof(*spi), GFP_KERNEL);
572 	if (!spi) {
573 		spi_controller_put(ctlr);
574 		return NULL;
575 	}
576 
577 	spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
578 	if (!spi->pcpu_statistics) {
579 		kfree(spi);
580 		spi_controller_put(ctlr);
581 		return NULL;
582 	}
583 
584 	spi->controller = ctlr;
585 	spi->dev.parent = &ctlr->dev;
586 	spi->dev.bus = &spi_bus_type;
587 	spi->dev.release = spidev_release;
588 	spi->mode = ctlr->buswidth_override_bits;
589 	spi->num_chipselect = 1;
590 
591 	device_initialize(&spi->dev);
592 	return spi;
593 }
594 EXPORT_SYMBOL_GPL(spi_alloc_device);
595 
596 static void spi_dev_set_name(struct spi_device *spi)
597 {
598 	struct device *dev = &spi->dev;
599 	struct fwnode_handle *fwnode = dev_fwnode(dev);
600 
601 	if (is_acpi_device_node(fwnode)) {
602 		dev_set_name(dev, "spi-%s", acpi_dev_name(to_acpi_device_node(fwnode)));
603 		return;
604 	}
605 
606 	if (is_software_node(fwnode)) {
607 		dev_set_name(dev, "spi-%pfwP", fwnode);
608 		return;
609 	}
610 
611 	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
612 		     spi_get_chipselect(spi, 0));
613 }
614 
615 /*
616  * Zero(0) is a valid physical CS value and can be located at any
617  * logical CS in the spi->chip_select[]. If all the physical CS
618  * are initialized to 0 then It would be difficult to differentiate
619  * between a valid physical CS 0 & an unused logical CS whose physical
620  * CS can be 0. As a solution to this issue initialize all the CS to -1.
621  * Now all the unused logical CS will have -1 physical CS value & can be
622  * ignored while performing physical CS validity checks.
623  */
624 #define SPI_INVALID_CS		((s8)-1)
625 
626 static inline int spi_dev_check_cs(struct device *dev,
627 				   struct spi_device *spi, u8 idx,
628 				   struct spi_device *new_spi, u8 new_idx)
629 {
630 	u8 cs, cs_new;
631 	u8 idx_new;
632 
633 	cs = spi_get_chipselect(spi, idx);
634 	for (idx_new = new_idx; idx_new < new_spi->num_chipselect; idx_new++) {
635 		cs_new = spi_get_chipselect(new_spi, idx_new);
636 		if (cs == cs_new) {
637 			dev_err(dev, "chipselect %u already in use\n", cs_new);
638 			return -EBUSY;
639 		}
640 	}
641 	return 0;
642 }
643 
644 static int spi_dev_check(struct device *dev, void *data)
645 {
646 	struct spi_device *spi = to_spi_device(dev);
647 	struct spi_device *new_spi = data;
648 	int status, idx;
649 
650 	if (spi->controller == new_spi->controller) {
651 		for (idx = 0; idx < spi->num_chipselect; idx++) {
652 			status = spi_dev_check_cs(dev, spi, idx, new_spi, 0);
653 			if (status)
654 				return status;
655 		}
656 	}
657 	return 0;
658 }
659 
660 static void spi_cleanup(struct spi_device *spi)
661 {
662 	if (spi->controller->cleanup)
663 		spi->controller->cleanup(spi);
664 }
665 
666 static int __spi_add_device(struct spi_device *spi)
667 {
668 	struct spi_controller *ctlr = spi->controller;
669 	struct device *dev = ctlr->dev.parent;
670 	int status, idx;
671 	u8 cs;
672 
673 	if (spi->num_chipselect > SPI_DEVICE_CS_CNT_MAX) {
674 		dev_err(dev, "num_cs %d > max %d\n", spi->num_chipselect,
675 			SPI_DEVICE_CS_CNT_MAX);
676 		return -EOVERFLOW;
677 	}
678 
679 	for (idx = 0; idx < spi->num_chipselect; idx++) {
680 		/* Chipselects are numbered 0..max; validate. */
681 		cs = spi_get_chipselect(spi, idx);
682 		if (cs >= ctlr->num_chipselect) {
683 			dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx),
684 				ctlr->num_chipselect);
685 			return -EINVAL;
686 		}
687 	}
688 
689 	/*
690 	 * Make sure that multiple logical CS doesn't map to the same physical CS.
691 	 * For example, spi->chip_select[0] != spi->chip_select[1] and so on.
692 	 */
693 	if (!spi_controller_is_target(ctlr)) {
694 		for (idx = 0; idx < spi->num_chipselect; idx++) {
695 			status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
696 			if (status)
697 				return status;
698 		}
699 	}
700 
701 	/* Initialize unused logical CS as invalid */
702 	for (idx = spi->num_chipselect; idx < SPI_DEVICE_CS_CNT_MAX; idx++)
703 		spi_set_chipselect(spi, idx, SPI_INVALID_CS);
704 
705 	/* Set the bus ID string */
706 	spi_dev_set_name(spi);
707 
708 	/*
709 	 * We need to make sure there's no other device with this
710 	 * chipselect **BEFORE** we call setup(), else we'll trash
711 	 * its configuration.
712 	 */
713 	status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
714 	if (status)
715 		return status;
716 
717 	/* Controller may unregister concurrently */
718 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
719 	    !device_is_registered(&ctlr->dev)) {
720 		return -ENODEV;
721 	}
722 
723 	if (ctlr->cs_gpiods) {
724 		u8 cs;
725 
726 		for (idx = 0; idx < spi->num_chipselect; idx++) {
727 			cs = spi_get_chipselect(spi, idx);
728 			spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
729 		}
730 	}
731 
732 	/*
733 	 * Drivers may modify this initial i/o setup, but will
734 	 * normally rely on the device being setup.  Devices
735 	 * using SPI_CS_HIGH can't coexist well otherwise...
736 	 */
737 	status = spi_setup(spi);
738 	if (status < 0) {
739 		dev_err(dev, "can't setup %s, status %d\n",
740 				dev_name(&spi->dev), status);
741 		return status;
742 	}
743 
744 	/* Device may be bound to an active driver when this returns */
745 	status = device_add(&spi->dev);
746 	if (status < 0) {
747 		dev_err(dev, "can't add %s, status %d\n",
748 				dev_name(&spi->dev), status);
749 		spi_cleanup(spi);
750 	} else {
751 		dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
752 	}
753 
754 	return status;
755 }
756 
757 /**
758  * spi_add_device - Add spi_device allocated with spi_alloc_device
759  * @spi: spi_device to register
760  *
761  * Companion function to spi_alloc_device.  Devices allocated with
762  * spi_alloc_device can be added onto the SPI bus with this function.
763  *
764  * Return: 0 on success; negative errno on failure
765  */
766 int spi_add_device(struct spi_device *spi)
767 {
768 	struct spi_controller *ctlr = spi->controller;
769 	int status;
770 
771 	/* Set the bus ID string */
772 	spi_dev_set_name(spi);
773 
774 	mutex_lock(&ctlr->add_lock);
775 	status = __spi_add_device(spi);
776 	mutex_unlock(&ctlr->add_lock);
777 	return status;
778 }
779 EXPORT_SYMBOL_GPL(spi_add_device);
780 
781 /**
782  * spi_new_device - instantiate one new SPI device
783  * @ctlr: Controller to which device is connected
784  * @chip: Describes the SPI device
785  * Context: can sleep
786  *
787  * On typical mainboards, this is purely internal; and it's not needed
788  * after board init creates the hard-wired devices.  Some development
789  * platforms may not be able to use spi_register_board_info though, and
790  * this is exported so that for example a USB or parport based adapter
791  * driver could add devices (which it would learn about out-of-band).
792  *
793  * Return: the new device, or NULL.
794  */
795 struct spi_device *spi_new_device(struct spi_controller *ctlr,
796 				  struct spi_board_info *chip)
797 {
798 	struct spi_device	*proxy;
799 	int			status;
800 
801 	/*
802 	 * NOTE:  caller did any chip->bus_num checks necessary.
803 	 *
804 	 * Also, unless we change the return value convention to use
805 	 * error-or-pointer (not NULL-or-pointer), troubleshootability
806 	 * suggests syslogged diagnostics are best here (ugh).
807 	 */
808 
809 	proxy = spi_alloc_device(ctlr);
810 	if (!proxy)
811 		return NULL;
812 
813 	WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
814 
815 	/* Use provided chip-select for proxy device */
816 	spi_set_chipselect(proxy, 0, chip->chip_select);
817 
818 	proxy->max_speed_hz = chip->max_speed_hz;
819 	proxy->mode = chip->mode;
820 	proxy->irq = chip->irq;
821 	strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
822 	proxy->dev.platform_data = (void *) chip->platform_data;
823 	proxy->controller_data = chip->controller_data;
824 	proxy->controller_state = NULL;
825 	/*
826 	 * By default spi->chip_select[0] will hold the physical CS number,
827 	 * so set bit 0 in spi->cs_index_mask.
828 	 */
829 	proxy->cs_index_mask = BIT(0);
830 
831 	if (chip->swnode) {
832 		status = device_add_software_node(&proxy->dev, chip->swnode);
833 		if (status) {
834 			dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
835 				chip->modalias, status);
836 			goto err_dev_put;
837 		}
838 	}
839 
840 	status = spi_add_device(proxy);
841 	if (status < 0)
842 		goto err_dev_put;
843 
844 	return proxy;
845 
846 err_dev_put:
847 	device_remove_software_node(&proxy->dev);
848 	spi_dev_put(proxy);
849 	return NULL;
850 }
851 EXPORT_SYMBOL_GPL(spi_new_device);
852 
853 /**
854  * spi_unregister_device - unregister a single SPI device
855  * @spi: spi_device to unregister
856  *
857  * Start making the passed SPI device vanish. Normally this would be handled
858  * by spi_unregister_controller().
859  */
860 void spi_unregister_device(struct spi_device *spi)
861 {
862 	struct fwnode_handle *fwnode;
863 
864 	if (!spi)
865 		return;
866 
867 	fwnode = dev_fwnode(&spi->dev);
868 	if (is_of_node(fwnode)) {
869 		of_node_clear_flag(to_of_node(fwnode), OF_POPULATED);
870 		of_node_put(to_of_node(fwnode));
871 	} else if (is_acpi_device_node(fwnode)) {
872 		acpi_device_clear_enumerated(to_acpi_device_node(fwnode));
873 	}
874 	device_remove_software_node(&spi->dev);
875 	device_del(&spi->dev);
876 	spi_cleanup(spi);
877 	put_device(&spi->dev);
878 }
879 EXPORT_SYMBOL_GPL(spi_unregister_device);
880 
881 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
882 					      struct spi_board_info *bi)
883 {
884 	struct spi_device *dev;
885 
886 	if (ctlr->bus_num != bi->bus_num)
887 		return;
888 
889 	dev = spi_new_device(ctlr, bi);
890 	if (!dev)
891 		dev_err(ctlr->dev.parent, "can't create new device for %s\n",
892 			bi->modalias);
893 }
894 
895 /**
896  * spi_register_board_info - register SPI devices for a given board
897  * @info: array of chip descriptors
898  * @n: how many descriptors are provided
899  * Context: can sleep
900  *
901  * Board-specific early init code calls this (probably during arch_initcall)
902  * with segments of the SPI device table.  Any device nodes are created later,
903  * after the relevant parent SPI controller (bus_num) is defined.  We keep
904  * this table of devices forever, so that reloading a controller driver will
905  * not make Linux forget about these hard-wired devices.
906  *
907  * Other code can also call this, e.g. a particular add-on board might provide
908  * SPI devices through its expansion connector, so code initializing that board
909  * would naturally declare its SPI devices.
910  *
911  * The board info passed can safely be __initdata ... but be careful of
912  * any embedded pointers (platform_data, etc), they're copied as-is.
913  *
914  * Return: zero on success, else a negative error code.
915  */
916 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
917 {
918 	struct boardinfo *bi;
919 	int i;
920 
921 	if (!n)
922 		return 0;
923 
924 	bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
925 	if (!bi)
926 		return -ENOMEM;
927 
928 	for (i = 0; i < n; i++, bi++, info++) {
929 		struct spi_controller *ctlr;
930 
931 		memcpy(&bi->board_info, info, sizeof(*info));
932 
933 		mutex_lock(&board_lock);
934 		list_add_tail(&bi->list, &board_list);
935 		list_for_each_entry(ctlr, &spi_controller_list, list)
936 			spi_match_controller_to_boardinfo(ctlr,
937 							  &bi->board_info);
938 		mutex_unlock(&board_lock);
939 	}
940 
941 	return 0;
942 }
943 
944 /*-------------------------------------------------------------------------*/
945 
946 /* Core methods for SPI resource management */
947 
948 /**
949  * spi_res_alloc - allocate a spi resource that is life-cycle managed
950  *                 during the processing of a spi_message while using
951  *                 spi_transfer_one
952  * @spi:     the SPI device for which we allocate memory
953  * @release: the release code to execute for this resource
954  * @size:    size to alloc and return
955  * @gfp:     GFP allocation flags
956  *
957  * Return: the pointer to the allocated data
958  *
959  * This may get enhanced in the future to allocate from a memory pool
960  * of the @spi_device or @spi_controller to avoid repeated allocations.
961  */
962 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
963 			   size_t size, gfp_t gfp)
964 {
965 	struct spi_res *sres;
966 
967 	sres = kzalloc(sizeof(*sres) + size, gfp);
968 	if (!sres)
969 		return NULL;
970 
971 	INIT_LIST_HEAD(&sres->entry);
972 	sres->release = release;
973 
974 	return sres->data;
975 }
976 
977 /**
978  * spi_res_free - free an SPI resource
979  * @res: pointer to the custom data of a resource
980  */
981 static void spi_res_free(void *res)
982 {
983 	struct spi_res *sres = container_of(res, struct spi_res, data);
984 
985 	WARN_ON(!list_empty(&sres->entry));
986 	kfree(sres);
987 }
988 
989 /**
990  * spi_res_add - add a spi_res to the spi_message
991  * @message: the SPI message
992  * @res:     the spi_resource
993  */
994 static void spi_res_add(struct spi_message *message, void *res)
995 {
996 	struct spi_res *sres = container_of(res, struct spi_res, data);
997 
998 	WARN_ON(!list_empty(&sres->entry));
999 	list_add_tail(&sres->entry, &message->resources);
1000 }
1001 
1002 /**
1003  * spi_res_release - release all SPI resources for this message
1004  * @ctlr:  the @spi_controller
1005  * @message: the @spi_message
1006  */
1007 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
1008 {
1009 	struct spi_res *res, *tmp;
1010 
1011 	list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
1012 		if (res->release)
1013 			res->release(ctlr, message, res->data);
1014 
1015 		list_del(&res->entry);
1016 
1017 		kfree(res);
1018 	}
1019 }
1020 
1021 /*-------------------------------------------------------------------------*/
1022 #define spi_for_each_valid_cs(spi, idx)				\
1023 	for (idx = 0; idx < spi->num_chipselect; idx++)		\
1024 		if (!(spi->cs_index_mask & BIT(idx))) {} else
1025 
1026 static inline bool spi_is_last_cs(struct spi_device *spi)
1027 {
1028 	u8 idx;
1029 	bool last = false;
1030 
1031 	spi_for_each_valid_cs(spi, idx) {
1032 		if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx))
1033 			last = true;
1034 	}
1035 	return last;
1036 }
1037 
1038 static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool activate)
1039 {
1040 	/*
1041 	 * Historically ACPI has no means of the GPIO polarity and
1042 	 * thus the SPISerialBus() resource defines it on the per-chip
1043 	 * basis. In order to avoid a chain of negations, the GPIO
1044 	 * polarity is considered being Active High. Even for the cases
1045 	 * when _DSD() is involved (in the updated versions of ACPI)
1046 	 * the GPIO CS polarity must be defined Active High to avoid
1047 	 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
1048 	 * into account.
1049 	 */
1050 	if (is_acpi_device_node(dev_fwnode(&spi->dev)))
1051 		gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable);
1052 	else
1053 		/* Polarity handled by GPIO library */
1054 		gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), activate);
1055 
1056 	if (activate)
1057 		spi_delay_exec(&spi->cs_setup, NULL);
1058 	else
1059 		spi_delay_exec(&spi->cs_inactive, NULL);
1060 }
1061 
1062 static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
1063 {
1064 	bool activate = enable;
1065 	u8 idx;
1066 
1067 	/*
1068 	 * Avoid calling into the driver (or doing delays) if the chip select
1069 	 * isn't actually changing from the last time this was called.
1070 	 */
1071 	if (!force && (enable == spi_is_last_cs(spi)) &&
1072 	    (spi->controller->last_cs_index_mask == spi->cs_index_mask) &&
1073 	    (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
1074 		return;
1075 
1076 	trace_spi_set_cs(spi, activate);
1077 
1078 	spi->controller->last_cs_index_mask = spi->cs_index_mask;
1079 	for (idx = 0; idx < SPI_DEVICE_CS_CNT_MAX; idx++) {
1080 		if (enable && idx < spi->num_chipselect)
1081 			spi->controller->last_cs[idx] = spi_get_chipselect(spi, 0);
1082 		else
1083 			spi->controller->last_cs[idx] = SPI_INVALID_CS;
1084 	}
1085 
1086 	spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
1087 	if (spi->controller->last_cs_mode_high)
1088 		enable = !enable;
1089 
1090 	/*
1091 	 * Handle chip select delays for GPIO based CS or controllers without
1092 	 * programmable chip select timing.
1093 	 */
1094 	if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate)
1095 		spi_delay_exec(&spi->cs_hold, NULL);
1096 
1097 	if (spi_is_csgpiod(spi)) {
1098 		if (!(spi->mode & SPI_NO_CS)) {
1099 			spi_for_each_valid_cs(spi, idx) {
1100 				if (spi_get_csgpiod(spi, idx))
1101 					spi_toggle_csgpiod(spi, idx, enable, activate);
1102 			}
1103 		}
1104 		/* Some SPI controllers need both GPIO CS & ->set_cs() */
1105 		if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
1106 		    spi->controller->set_cs)
1107 			spi->controller->set_cs(spi, !enable);
1108 	} else if (spi->controller->set_cs) {
1109 		spi->controller->set_cs(spi, !enable);
1110 	}
1111 
1112 	if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) {
1113 		if (activate)
1114 			spi_delay_exec(&spi->cs_setup, NULL);
1115 		else
1116 			spi_delay_exec(&spi->cs_inactive, NULL);
1117 	}
1118 }
1119 
1120 #ifdef CONFIG_HAS_DMA
1121 static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
1122 			     struct sg_table *sgt, void *buf, size_t len,
1123 			     enum dma_data_direction dir, unsigned long attrs)
1124 {
1125 	const bool vmalloced_buf = is_vmalloc_addr(buf);
1126 	unsigned int max_seg_size = dma_get_max_seg_size(dev);
1127 #ifdef CONFIG_HIGHMEM
1128 	const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
1129 				(unsigned long)buf < (PKMAP_BASE +
1130 					(LAST_PKMAP * PAGE_SIZE)));
1131 #else
1132 	const bool kmap_buf = false;
1133 #endif
1134 	int desc_len;
1135 	int sgs;
1136 	struct page *vm_page;
1137 	struct scatterlist *sg;
1138 	void *sg_buf;
1139 	size_t min;
1140 	int i, ret;
1141 
1142 	if (vmalloced_buf || kmap_buf) {
1143 		desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
1144 		sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1145 	} else if (virt_addr_valid(buf)) {
1146 		desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1147 		sgs = DIV_ROUND_UP(len, desc_len);
1148 	} else {
1149 		return -EINVAL;
1150 	}
1151 
1152 	ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1153 	if (ret != 0)
1154 		return ret;
1155 
1156 	sg = &sgt->sgl[0];
1157 	for (i = 0; i < sgs; i++) {
1158 
1159 		if (vmalloced_buf || kmap_buf) {
1160 			/*
1161 			 * Next scatterlist entry size is the minimum between
1162 			 * the desc_len and the remaining buffer length that
1163 			 * fits in a page.
1164 			 */
1165 			min = min_t(size_t, desc_len,
1166 				    min_t(size_t, len,
1167 					  PAGE_SIZE - offset_in_page(buf)));
1168 			if (vmalloced_buf)
1169 				vm_page = vmalloc_to_page(buf);
1170 			else
1171 				vm_page = kmap_to_page(buf);
1172 			if (!vm_page) {
1173 				sg_free_table(sgt);
1174 				return -ENOMEM;
1175 			}
1176 			sg_set_page(sg, vm_page,
1177 				    min, offset_in_page(buf));
1178 		} else {
1179 			min = min_t(size_t, len, desc_len);
1180 			sg_buf = buf;
1181 			sg_set_buf(sg, sg_buf, min);
1182 		}
1183 
1184 		buf += min;
1185 		len -= min;
1186 		sg = sg_next(sg);
1187 	}
1188 
1189 	ret = dma_map_sgtable(dev, sgt, dir, attrs);
1190 	if (ret < 0) {
1191 		sg_free_table(sgt);
1192 		return ret;
1193 	}
1194 
1195 	return 0;
1196 }
1197 
1198 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1199 		struct sg_table *sgt, void *buf, size_t len,
1200 		enum dma_data_direction dir)
1201 {
1202 	return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
1203 }
1204 
1205 static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
1206 				struct device *dev, struct sg_table *sgt,
1207 				enum dma_data_direction dir,
1208 				unsigned long attrs)
1209 {
1210 	dma_unmap_sgtable(dev, sgt, dir, attrs);
1211 	sg_free_table(sgt);
1212 	sgt->orig_nents = 0;
1213 	sgt->nents = 0;
1214 }
1215 
1216 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1217 		   struct sg_table *sgt, enum dma_data_direction dir)
1218 {
1219 	spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
1220 }
1221 
1222 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1223 {
1224 	struct device *tx_dev, *rx_dev;
1225 	struct spi_transfer *xfer;
1226 	int ret;
1227 
1228 	if (!ctlr->can_dma)
1229 		return 0;
1230 
1231 	if (ctlr->dma_tx)
1232 		tx_dev = ctlr->dma_tx->device->dev;
1233 	else if (ctlr->dma_map_dev)
1234 		tx_dev = ctlr->dma_map_dev;
1235 	else
1236 		tx_dev = ctlr->dev.parent;
1237 
1238 	if (ctlr->dma_rx)
1239 		rx_dev = ctlr->dma_rx->device->dev;
1240 	else if (ctlr->dma_map_dev)
1241 		rx_dev = ctlr->dma_map_dev;
1242 	else
1243 		rx_dev = ctlr->dev.parent;
1244 
1245 	ret = -ENOMSG;
1246 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1247 		/* The sync is done before each transfer. */
1248 		unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1249 
1250 		if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1251 			continue;
1252 
1253 		if (xfer->tx_buf != NULL) {
1254 			ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1255 						(void *)xfer->tx_buf,
1256 						xfer->len, DMA_TO_DEVICE,
1257 						attrs);
1258 			if (ret != 0)
1259 				return ret;
1260 
1261 			xfer->tx_sg_mapped = true;
1262 		}
1263 
1264 		if (xfer->rx_buf != NULL) {
1265 			ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1266 						xfer->rx_buf, xfer->len,
1267 						DMA_FROM_DEVICE, attrs);
1268 			if (ret != 0) {
1269 				spi_unmap_buf_attrs(ctlr, tx_dev,
1270 						&xfer->tx_sg, DMA_TO_DEVICE,
1271 						attrs);
1272 
1273 				return ret;
1274 			}
1275 
1276 			xfer->rx_sg_mapped = true;
1277 		}
1278 	}
1279 	/* No transfer has been mapped, bail out with success */
1280 	if (ret)
1281 		return 0;
1282 
1283 	ctlr->cur_rx_dma_dev = rx_dev;
1284 	ctlr->cur_tx_dma_dev = tx_dev;
1285 
1286 	return 0;
1287 }
1288 
1289 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1290 {
1291 	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1292 	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1293 	struct spi_transfer *xfer;
1294 
1295 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1296 		/* The sync has already been done after each transfer. */
1297 		unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1298 
1299 		if (xfer->rx_sg_mapped)
1300 			spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1301 					    DMA_FROM_DEVICE, attrs);
1302 		xfer->rx_sg_mapped = false;
1303 
1304 		if (xfer->tx_sg_mapped)
1305 			spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1306 					    DMA_TO_DEVICE, attrs);
1307 		xfer->tx_sg_mapped = false;
1308 	}
1309 
1310 	return 0;
1311 }
1312 
1313 static void spi_dma_sync_for_device(struct spi_controller *ctlr,
1314 				    struct spi_transfer *xfer)
1315 {
1316 	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1317 	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1318 
1319 	if (xfer->tx_sg_mapped)
1320 		dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1321 	if (xfer->rx_sg_mapped)
1322 		dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1323 }
1324 
1325 static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
1326 				 struct spi_transfer *xfer)
1327 {
1328 	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1329 	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1330 
1331 	if (xfer->rx_sg_mapped)
1332 		dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1333 	if (xfer->tx_sg_mapped)
1334 		dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1335 }
1336 #else /* !CONFIG_HAS_DMA */
1337 static inline int __spi_map_msg(struct spi_controller *ctlr,
1338 				struct spi_message *msg)
1339 {
1340 	return 0;
1341 }
1342 
1343 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1344 				  struct spi_message *msg)
1345 {
1346 	return 0;
1347 }
1348 
1349 static void spi_dma_sync_for_device(struct spi_controller *ctrl,
1350 				    struct spi_transfer *xfer)
1351 {
1352 }
1353 
1354 static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
1355 				 struct spi_transfer *xfer)
1356 {
1357 }
1358 #endif /* !CONFIG_HAS_DMA */
1359 
1360 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1361 				struct spi_message *msg)
1362 {
1363 	struct spi_transfer *xfer;
1364 
1365 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1366 		/*
1367 		 * Restore the original value of tx_buf or rx_buf if they are
1368 		 * NULL.
1369 		 */
1370 		if (xfer->tx_buf == ctlr->dummy_tx)
1371 			xfer->tx_buf = NULL;
1372 		if (xfer->rx_buf == ctlr->dummy_rx)
1373 			xfer->rx_buf = NULL;
1374 	}
1375 
1376 	return __spi_unmap_msg(ctlr, msg);
1377 }
1378 
1379 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1380 {
1381 	struct spi_transfer *xfer;
1382 	void *tmp;
1383 	unsigned int max_tx, max_rx;
1384 
1385 	if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1386 		&& !(msg->spi->mode & SPI_3WIRE)) {
1387 		max_tx = 0;
1388 		max_rx = 0;
1389 
1390 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1391 			if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1392 			    !xfer->tx_buf)
1393 				max_tx = max(xfer->len, max_tx);
1394 			if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1395 			    !xfer->rx_buf)
1396 				max_rx = max(xfer->len, max_rx);
1397 		}
1398 
1399 		if (max_tx) {
1400 			tmp = krealloc(ctlr->dummy_tx, max_tx,
1401 				       GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1402 			if (!tmp)
1403 				return -ENOMEM;
1404 			ctlr->dummy_tx = tmp;
1405 		}
1406 
1407 		if (max_rx) {
1408 			tmp = krealloc(ctlr->dummy_rx, max_rx,
1409 				       GFP_KERNEL | GFP_DMA);
1410 			if (!tmp)
1411 				return -ENOMEM;
1412 			ctlr->dummy_rx = tmp;
1413 		}
1414 
1415 		if (max_tx || max_rx) {
1416 			list_for_each_entry(xfer, &msg->transfers,
1417 					    transfer_list) {
1418 				if (!xfer->len)
1419 					continue;
1420 				if (!xfer->tx_buf)
1421 					xfer->tx_buf = ctlr->dummy_tx;
1422 				if (!xfer->rx_buf)
1423 					xfer->rx_buf = ctlr->dummy_rx;
1424 			}
1425 		}
1426 	}
1427 
1428 	return __spi_map_msg(ctlr, msg);
1429 }
1430 
1431 static int spi_transfer_wait(struct spi_controller *ctlr,
1432 			     struct spi_message *msg,
1433 			     struct spi_transfer *xfer)
1434 {
1435 	struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1436 	struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1437 	u32 speed_hz = xfer->speed_hz;
1438 	unsigned long long ms;
1439 
1440 	if (spi_controller_is_target(ctlr)) {
1441 		if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1442 			dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1443 			return -EINTR;
1444 		}
1445 	} else {
1446 		if (!speed_hz)
1447 			speed_hz = 100000;
1448 
1449 		/*
1450 		 * For each byte we wait for 8 cycles of the SPI clock.
1451 		 * Since speed is defined in Hz and we want milliseconds,
1452 		 * use respective multiplier, but before the division,
1453 		 * otherwise we may get 0 for short transfers.
1454 		 */
1455 		ms = 8LL * MSEC_PER_SEC * xfer->len;
1456 		do_div(ms, speed_hz);
1457 
1458 		/*
1459 		 * Increase it twice and add 200 ms tolerance, use
1460 		 * predefined maximum in case of overflow.
1461 		 */
1462 		ms += ms + 200;
1463 		if (ms > UINT_MAX)
1464 			ms = UINT_MAX;
1465 
1466 		ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1467 						 msecs_to_jiffies(ms));
1468 
1469 		if (ms == 0) {
1470 			SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1471 			SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1472 			dev_err(&msg->spi->dev,
1473 				"SPI transfer timed out\n");
1474 			return -ETIMEDOUT;
1475 		}
1476 
1477 		if (xfer->error & SPI_TRANS_FAIL_IO)
1478 			return -EIO;
1479 	}
1480 
1481 	return 0;
1482 }
1483 
1484 static void _spi_transfer_delay_ns(u32 ns)
1485 {
1486 	if (!ns)
1487 		return;
1488 	if (ns <= NSEC_PER_USEC) {
1489 		ndelay(ns);
1490 	} else {
1491 		u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1492 
1493 		fsleep(us);
1494 	}
1495 }
1496 
1497 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1498 {
1499 	u32 delay = _delay->value;
1500 	u32 unit = _delay->unit;
1501 	u32 hz;
1502 
1503 	if (!delay)
1504 		return 0;
1505 
1506 	switch (unit) {
1507 	case SPI_DELAY_UNIT_USECS:
1508 		delay *= NSEC_PER_USEC;
1509 		break;
1510 	case SPI_DELAY_UNIT_NSECS:
1511 		/* Nothing to do here */
1512 		break;
1513 	case SPI_DELAY_UNIT_SCK:
1514 		/* Clock cycles need to be obtained from spi_transfer */
1515 		if (!xfer)
1516 			return -EINVAL;
1517 		/*
1518 		 * If there is unknown effective speed, approximate it
1519 		 * by underestimating with half of the requested Hz.
1520 		 */
1521 		hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1522 		if (!hz)
1523 			return -EINVAL;
1524 
1525 		/* Convert delay to nanoseconds */
1526 		delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1527 		break;
1528 	default:
1529 		return -EINVAL;
1530 	}
1531 
1532 	return delay;
1533 }
1534 EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1535 
1536 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1537 {
1538 	int delay;
1539 
1540 	might_sleep();
1541 
1542 	if (!_delay)
1543 		return -EINVAL;
1544 
1545 	delay = spi_delay_to_ns(_delay, xfer);
1546 	if (delay < 0)
1547 		return delay;
1548 
1549 	_spi_transfer_delay_ns(delay);
1550 
1551 	return 0;
1552 }
1553 EXPORT_SYMBOL_GPL(spi_delay_exec);
1554 
1555 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1556 					  struct spi_transfer *xfer)
1557 {
1558 	u32 default_delay_ns = 10 * NSEC_PER_USEC;
1559 	u32 delay = xfer->cs_change_delay.value;
1560 	u32 unit = xfer->cs_change_delay.unit;
1561 	int ret;
1562 
1563 	/* Return early on "fast" mode - for everything but USECS */
1564 	if (!delay) {
1565 		if (unit == SPI_DELAY_UNIT_USECS)
1566 			_spi_transfer_delay_ns(default_delay_ns);
1567 		return;
1568 	}
1569 
1570 	ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1571 	if (ret) {
1572 		dev_err_once(&msg->spi->dev,
1573 			     "Use of unsupported delay unit %i, using default of %luus\n",
1574 			     unit, default_delay_ns / NSEC_PER_USEC);
1575 		_spi_transfer_delay_ns(default_delay_ns);
1576 	}
1577 }
1578 
1579 void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
1580 						  struct spi_transfer *xfer)
1581 {
1582 	_spi_transfer_cs_change_delay(msg, xfer);
1583 }
1584 EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec);
1585 
1586 /*
1587  * spi_transfer_one_message - Default implementation of transfer_one_message()
1588  *
1589  * This is a standard implementation of transfer_one_message() for
1590  * drivers which implement a transfer_one() operation.  It provides
1591  * standard handling of delays and chip select management.
1592  */
1593 static int spi_transfer_one_message(struct spi_controller *ctlr,
1594 				    struct spi_message *msg)
1595 {
1596 	struct spi_transfer *xfer;
1597 	bool keep_cs = false;
1598 	int ret = 0;
1599 	struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1600 	struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1601 
1602 	xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
1603 	spi_set_cs(msg->spi, !xfer->cs_off, false);
1604 
1605 	SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1606 	SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1607 
1608 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1609 		trace_spi_transfer_start(msg, xfer);
1610 
1611 		spi_statistics_add_transfer_stats(statm, xfer, msg);
1612 		spi_statistics_add_transfer_stats(stats, xfer, msg);
1613 
1614 		if (!ctlr->ptp_sts_supported) {
1615 			xfer->ptp_sts_word_pre = 0;
1616 			ptp_read_system_prets(xfer->ptp_sts);
1617 		}
1618 
1619 		if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1620 			reinit_completion(&ctlr->xfer_completion);
1621 
1622 fallback_pio:
1623 			spi_dma_sync_for_device(ctlr, xfer);
1624 			ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1625 			if (ret < 0) {
1626 				spi_dma_sync_for_cpu(ctlr, xfer);
1627 
1628 				if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) &&
1629 				    (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1630 					__spi_unmap_msg(ctlr, msg);
1631 					ctlr->fallback = true;
1632 					xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1633 					goto fallback_pio;
1634 				}
1635 
1636 				SPI_STATISTICS_INCREMENT_FIELD(statm,
1637 							       errors);
1638 				SPI_STATISTICS_INCREMENT_FIELD(stats,
1639 							       errors);
1640 				dev_err(&msg->spi->dev,
1641 					"SPI transfer failed: %d\n", ret);
1642 				goto out;
1643 			}
1644 
1645 			if (ret > 0) {
1646 				ret = spi_transfer_wait(ctlr, msg, xfer);
1647 				if (ret < 0)
1648 					msg->status = ret;
1649 			}
1650 
1651 			spi_dma_sync_for_cpu(ctlr, xfer);
1652 		} else {
1653 			if (xfer->len)
1654 				dev_err(&msg->spi->dev,
1655 					"Bufferless transfer has length %u\n",
1656 					xfer->len);
1657 		}
1658 
1659 		if (!ctlr->ptp_sts_supported) {
1660 			ptp_read_system_postts(xfer->ptp_sts);
1661 			xfer->ptp_sts_word_post = xfer->len;
1662 		}
1663 
1664 		trace_spi_transfer_stop(msg, xfer);
1665 
1666 		if (msg->status != -EINPROGRESS)
1667 			goto out;
1668 
1669 		spi_transfer_delay_exec(xfer);
1670 
1671 		if (xfer->cs_change) {
1672 			if (list_is_last(&xfer->transfer_list,
1673 					 &msg->transfers)) {
1674 				keep_cs = true;
1675 			} else {
1676 				if (!xfer->cs_off)
1677 					spi_set_cs(msg->spi, false, false);
1678 				_spi_transfer_cs_change_delay(msg, xfer);
1679 				if (!list_next_entry(xfer, transfer_list)->cs_off)
1680 					spi_set_cs(msg->spi, true, false);
1681 			}
1682 		} else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
1683 			   xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
1684 			spi_set_cs(msg->spi, xfer->cs_off, false);
1685 		}
1686 
1687 		msg->actual_length += xfer->len;
1688 	}
1689 
1690 out:
1691 	if (ret != 0 || !keep_cs)
1692 		spi_set_cs(msg->spi, false, false);
1693 
1694 	if (msg->status == -EINPROGRESS)
1695 		msg->status = ret;
1696 
1697 	if (msg->status && ctlr->handle_err)
1698 		ctlr->handle_err(ctlr, msg);
1699 
1700 	spi_finalize_current_message(ctlr);
1701 
1702 	return ret;
1703 }
1704 
1705 /**
1706  * spi_finalize_current_transfer - report completion of a transfer
1707  * @ctlr: the controller reporting completion
1708  *
1709  * Called by SPI drivers using the core transfer_one_message()
1710  * implementation to notify it that the current interrupt driven
1711  * transfer has finished and the next one may be scheduled.
1712  */
1713 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1714 {
1715 	complete(&ctlr->xfer_completion);
1716 }
1717 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1718 
1719 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1720 {
1721 	if (ctlr->auto_runtime_pm) {
1722 		pm_runtime_put_autosuspend(ctlr->dev.parent);
1723 	}
1724 }
1725 
1726 static int __spi_pump_transfer_message(struct spi_controller *ctlr,
1727 		struct spi_message *msg, bool was_busy)
1728 {
1729 	struct spi_transfer *xfer;
1730 	int ret;
1731 
1732 	if (!was_busy && ctlr->auto_runtime_pm) {
1733 		ret = pm_runtime_get_sync(ctlr->dev.parent);
1734 		if (ret < 0) {
1735 			pm_runtime_put_noidle(ctlr->dev.parent);
1736 			dev_err(&ctlr->dev, "Failed to power device: %d\n",
1737 				ret);
1738 
1739 			msg->status = ret;
1740 			spi_finalize_current_message(ctlr);
1741 
1742 			return ret;
1743 		}
1744 	}
1745 
1746 	if (!was_busy)
1747 		trace_spi_controller_busy(ctlr);
1748 
1749 	if (!was_busy && ctlr->prepare_transfer_hardware) {
1750 		ret = ctlr->prepare_transfer_hardware(ctlr);
1751 		if (ret) {
1752 			dev_err(&ctlr->dev,
1753 				"failed to prepare transfer hardware: %d\n",
1754 				ret);
1755 
1756 			if (ctlr->auto_runtime_pm)
1757 				pm_runtime_put(ctlr->dev.parent);
1758 
1759 			msg->status = ret;
1760 			spi_finalize_current_message(ctlr);
1761 
1762 			return ret;
1763 		}
1764 	}
1765 
1766 	trace_spi_message_start(msg);
1767 
1768 	if (ctlr->prepare_message) {
1769 		ret = ctlr->prepare_message(ctlr, msg);
1770 		if (ret) {
1771 			dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1772 				ret);
1773 			msg->status = ret;
1774 			spi_finalize_current_message(ctlr);
1775 			return ret;
1776 		}
1777 		msg->prepared = true;
1778 	}
1779 
1780 	ret = spi_map_msg(ctlr, msg);
1781 	if (ret) {
1782 		msg->status = ret;
1783 		spi_finalize_current_message(ctlr);
1784 		return ret;
1785 	}
1786 
1787 	if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1788 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1789 			xfer->ptp_sts_word_pre = 0;
1790 			ptp_read_system_prets(xfer->ptp_sts);
1791 		}
1792 	}
1793 
1794 	/*
1795 	 * Drivers implementation of transfer_one_message() must arrange for
1796 	 * spi_finalize_current_message() to get called. Most drivers will do
1797 	 * this in the calling context, but some don't. For those cases, a
1798 	 * completion is used to guarantee that this function does not return
1799 	 * until spi_finalize_current_message() is done accessing
1800 	 * ctlr->cur_msg.
1801 	 * Use of the following two flags enable to opportunistically skip the
1802 	 * use of the completion since its use involves expensive spin locks.
1803 	 * In case of a race with the context that calls
1804 	 * spi_finalize_current_message() the completion will always be used,
1805 	 * due to strict ordering of these flags using barriers.
1806 	 */
1807 	WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1808 	WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1809 	reinit_completion(&ctlr->cur_msg_completion);
1810 	smp_wmb(); /* Make these available to spi_finalize_current_message() */
1811 
1812 	ret = ctlr->transfer_one_message(ctlr, msg);
1813 	if (ret) {
1814 		dev_err(&ctlr->dev,
1815 			"failed to transfer one message from queue\n");
1816 		return ret;
1817 	}
1818 
1819 	WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1820 	smp_mb(); /* See spi_finalize_current_message()... */
1821 	if (READ_ONCE(ctlr->cur_msg_incomplete))
1822 		wait_for_completion(&ctlr->cur_msg_completion);
1823 
1824 	return 0;
1825 }
1826 
1827 /**
1828  * __spi_pump_messages - function which processes SPI message queue
1829  * @ctlr: controller to process queue for
1830  * @in_kthread: true if we are in the context of the message pump thread
1831  *
1832  * This function checks if there is any SPI message in the queue that
1833  * needs processing and if so call out to the driver to initialize hardware
1834  * and transfer each message.
1835  *
1836  * Note that it is called both from the kthread itself and also from
1837  * inside spi_sync(); the queue extraction handling at the top of the
1838  * function should deal with this safely.
1839  */
1840 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1841 {
1842 	struct spi_message *msg;
1843 	bool was_busy = false;
1844 	unsigned long flags;
1845 	int ret;
1846 
1847 	/* Take the I/O mutex */
1848 	mutex_lock(&ctlr->io_mutex);
1849 
1850 	/* Lock queue */
1851 	spin_lock_irqsave(&ctlr->queue_lock, flags);
1852 
1853 	/* Make sure we are not already running a message */
1854 	if (ctlr->cur_msg)
1855 		goto out_unlock;
1856 
1857 	/* Check if the queue is idle */
1858 	if (list_empty(&ctlr->queue) || !ctlr->running) {
1859 		if (!ctlr->busy)
1860 			goto out_unlock;
1861 
1862 		/* Defer any non-atomic teardown to the thread */
1863 		if (!in_kthread) {
1864 			if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1865 			    !ctlr->unprepare_transfer_hardware) {
1866 				spi_idle_runtime_pm(ctlr);
1867 				ctlr->busy = false;
1868 				ctlr->queue_empty = true;
1869 				trace_spi_controller_idle(ctlr);
1870 			} else {
1871 				kthread_queue_work(ctlr->kworker,
1872 						   &ctlr->pump_messages);
1873 			}
1874 			goto out_unlock;
1875 		}
1876 
1877 		ctlr->busy = false;
1878 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1879 
1880 		kfree(ctlr->dummy_rx);
1881 		ctlr->dummy_rx = NULL;
1882 		kfree(ctlr->dummy_tx);
1883 		ctlr->dummy_tx = NULL;
1884 		if (ctlr->unprepare_transfer_hardware &&
1885 		    ctlr->unprepare_transfer_hardware(ctlr))
1886 			dev_err(&ctlr->dev,
1887 				"failed to unprepare transfer hardware\n");
1888 		spi_idle_runtime_pm(ctlr);
1889 		trace_spi_controller_idle(ctlr);
1890 
1891 		spin_lock_irqsave(&ctlr->queue_lock, flags);
1892 		ctlr->queue_empty = true;
1893 		goto out_unlock;
1894 	}
1895 
1896 	/* Extract head of queue */
1897 	msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1898 	ctlr->cur_msg = msg;
1899 
1900 	list_del_init(&msg->queue);
1901 	if (ctlr->busy)
1902 		was_busy = true;
1903 	else
1904 		ctlr->busy = true;
1905 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1906 
1907 	ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
1908 	kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1909 
1910 	ctlr->cur_msg = NULL;
1911 	ctlr->fallback = false;
1912 
1913 	mutex_unlock(&ctlr->io_mutex);
1914 
1915 	/* Prod the scheduler in case transfer_one() was busy waiting */
1916 	if (!ret)
1917 		cond_resched();
1918 	return;
1919 
1920 out_unlock:
1921 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1922 	mutex_unlock(&ctlr->io_mutex);
1923 }
1924 
1925 /**
1926  * spi_pump_messages - kthread work function which processes spi message queue
1927  * @work: pointer to kthread work struct contained in the controller struct
1928  */
1929 static void spi_pump_messages(struct kthread_work *work)
1930 {
1931 	struct spi_controller *ctlr =
1932 		container_of(work, struct spi_controller, pump_messages);
1933 
1934 	__spi_pump_messages(ctlr, true);
1935 }
1936 
1937 /**
1938  * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1939  * @ctlr: Pointer to the spi_controller structure of the driver
1940  * @xfer: Pointer to the transfer being timestamped
1941  * @progress: How many words (not bytes) have been transferred so far
1942  * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1943  *	      transfer, for less jitter in time measurement. Only compatible
1944  *	      with PIO drivers. If true, must follow up with
1945  *	      spi_take_timestamp_post or otherwise system will crash.
1946  *	      WARNING: for fully predictable results, the CPU frequency must
1947  *	      also be under control (governor).
1948  *
1949  * This is a helper for drivers to collect the beginning of the TX timestamp
1950  * for the requested byte from the SPI transfer. The frequency with which this
1951  * function must be called (once per word, once for the whole transfer, once
1952  * per batch of words etc) is arbitrary as long as the @tx buffer offset is
1953  * greater than or equal to the requested byte at the time of the call. The
1954  * timestamp is only taken once, at the first such call. It is assumed that
1955  * the driver advances its @tx buffer pointer monotonically.
1956  */
1957 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1958 			    struct spi_transfer *xfer,
1959 			    size_t progress, bool irqs_off)
1960 {
1961 	if (!xfer->ptp_sts)
1962 		return;
1963 
1964 	if (xfer->timestamped)
1965 		return;
1966 
1967 	if (progress > xfer->ptp_sts_word_pre)
1968 		return;
1969 
1970 	/* Capture the resolution of the timestamp */
1971 	xfer->ptp_sts_word_pre = progress;
1972 
1973 	if (irqs_off) {
1974 		local_irq_save(ctlr->irq_flags);
1975 		preempt_disable();
1976 	}
1977 
1978 	ptp_read_system_prets(xfer->ptp_sts);
1979 }
1980 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1981 
1982 /**
1983  * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1984  * @ctlr: Pointer to the spi_controller structure of the driver
1985  * @xfer: Pointer to the transfer being timestamped
1986  * @progress: How many words (not bytes) have been transferred so far
1987  * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1988  *
1989  * This is a helper for drivers to collect the end of the TX timestamp for
1990  * the requested byte from the SPI transfer. Can be called with an arbitrary
1991  * frequency: only the first call where @tx exceeds or is equal to the
1992  * requested word will be timestamped.
1993  */
1994 void spi_take_timestamp_post(struct spi_controller *ctlr,
1995 			     struct spi_transfer *xfer,
1996 			     size_t progress, bool irqs_off)
1997 {
1998 	if (!xfer->ptp_sts)
1999 		return;
2000 
2001 	if (xfer->timestamped)
2002 		return;
2003 
2004 	if (progress < xfer->ptp_sts_word_post)
2005 		return;
2006 
2007 	ptp_read_system_postts(xfer->ptp_sts);
2008 
2009 	if (irqs_off) {
2010 		local_irq_restore(ctlr->irq_flags);
2011 		preempt_enable();
2012 	}
2013 
2014 	/* Capture the resolution of the timestamp */
2015 	xfer->ptp_sts_word_post = progress;
2016 
2017 	xfer->timestamped = 1;
2018 }
2019 EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
2020 
2021 /**
2022  * spi_set_thread_rt - set the controller to pump at realtime priority
2023  * @ctlr: controller to boost priority of
2024  *
2025  * This can be called because the controller requested realtime priority
2026  * (by setting the ->rt value before calling spi_register_controller()) or
2027  * because a device on the bus said that its transfers needed realtime
2028  * priority.
2029  *
2030  * NOTE: at the moment if any device on a bus says it needs realtime then
2031  * the thread will be at realtime priority for all transfers on that
2032  * controller.  If this eventually becomes a problem we may see if we can
2033  * find a way to boost the priority only temporarily during relevant
2034  * transfers.
2035  */
2036 static void spi_set_thread_rt(struct spi_controller *ctlr)
2037 {
2038 	dev_info(&ctlr->dev,
2039 		"will run message pump with realtime priority\n");
2040 	sched_set_fifo(ctlr->kworker->task);
2041 }
2042 
2043 static int spi_init_queue(struct spi_controller *ctlr)
2044 {
2045 	ctlr->running = false;
2046 	ctlr->busy = false;
2047 	ctlr->queue_empty = true;
2048 
2049 	ctlr->kworker = kthread_run_worker(0, dev_name(&ctlr->dev));
2050 	if (IS_ERR(ctlr->kworker)) {
2051 		dev_err(&ctlr->dev, "failed to create message pump kworker\n");
2052 		return PTR_ERR(ctlr->kworker);
2053 	}
2054 
2055 	kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
2056 
2057 	/*
2058 	 * Controller config will indicate if this controller should run the
2059 	 * message pump with high (realtime) priority to reduce the transfer
2060 	 * latency on the bus by minimising the delay between a transfer
2061 	 * request and the scheduling of the message pump thread. Without this
2062 	 * setting the message pump thread will remain at default priority.
2063 	 */
2064 	if (ctlr->rt)
2065 		spi_set_thread_rt(ctlr);
2066 
2067 	return 0;
2068 }
2069 
2070 /**
2071  * spi_get_next_queued_message() - called by driver to check for queued
2072  * messages
2073  * @ctlr: the controller to check for queued messages
2074  *
2075  * If there are more messages in the queue, the next message is returned from
2076  * this call.
2077  *
2078  * Return: the next message in the queue, else NULL if the queue is empty.
2079  */
2080 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
2081 {
2082 	struct spi_message *next;
2083 	unsigned long flags;
2084 
2085 	/* Get a pointer to the next message, if any */
2086 	spin_lock_irqsave(&ctlr->queue_lock, flags);
2087 	next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
2088 					queue);
2089 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2090 
2091 	return next;
2092 }
2093 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
2094 
2095 /*
2096  * __spi_unoptimize_message - shared implementation of spi_unoptimize_message()
2097  *                            and spi_maybe_unoptimize_message()
2098  * @msg: the message to unoptimize
2099  *
2100  * Peripheral drivers should use spi_unoptimize_message() and callers inside
2101  * core should use spi_maybe_unoptimize_message() rather than calling this
2102  * function directly.
2103  *
2104  * It is not valid to call this on a message that is not currently optimized.
2105  */
2106 static void __spi_unoptimize_message(struct spi_message *msg)
2107 {
2108 	struct spi_controller *ctlr = msg->spi->controller;
2109 
2110 	if (ctlr->unoptimize_message)
2111 		ctlr->unoptimize_message(msg);
2112 
2113 	spi_res_release(ctlr, msg);
2114 
2115 	msg->optimized = false;
2116 	msg->opt_state = NULL;
2117 }
2118 
2119 /*
2120  * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral
2121  * @msg: the message to unoptimize
2122  *
2123  * This function is used to unoptimize a message if and only if it was
2124  * optimized by the core (via spi_maybe_optimize_message()).
2125  */
2126 static void spi_maybe_unoptimize_message(struct spi_message *msg)
2127 {
2128 	if (!msg->pre_optimized && msg->optimized &&
2129 	    !msg->spi->controller->defer_optimize_message)
2130 		__spi_unoptimize_message(msg);
2131 }
2132 
2133 /**
2134  * spi_finalize_current_message() - the current message is complete
2135  * @ctlr: the controller to return the message to
2136  *
2137  * Called by the driver to notify the core that the message in the front of the
2138  * queue is complete and can be removed from the queue.
2139  */
2140 void spi_finalize_current_message(struct spi_controller *ctlr)
2141 {
2142 	struct spi_transfer *xfer;
2143 	struct spi_message *mesg;
2144 	int ret;
2145 
2146 	mesg = ctlr->cur_msg;
2147 
2148 	if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2149 		list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
2150 			ptp_read_system_postts(xfer->ptp_sts);
2151 			xfer->ptp_sts_word_post = xfer->len;
2152 		}
2153 	}
2154 
2155 	if (unlikely(ctlr->ptp_sts_supported))
2156 		list_for_each_entry(xfer, &mesg->transfers, transfer_list)
2157 			WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
2158 
2159 	spi_unmap_msg(ctlr, mesg);
2160 
2161 	if (mesg->prepared && ctlr->unprepare_message) {
2162 		ret = ctlr->unprepare_message(ctlr, mesg);
2163 		if (ret) {
2164 			dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2165 				ret);
2166 		}
2167 	}
2168 
2169 	mesg->prepared = false;
2170 
2171 	spi_maybe_unoptimize_message(mesg);
2172 
2173 	WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2174 	smp_mb(); /* See __spi_pump_transfer_message()... */
2175 	if (READ_ONCE(ctlr->cur_msg_need_completion))
2176 		complete(&ctlr->cur_msg_completion);
2177 
2178 	trace_spi_message_done(mesg);
2179 
2180 	mesg->state = NULL;
2181 	if (mesg->complete)
2182 		mesg->complete(mesg->context);
2183 }
2184 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
2185 
2186 static int spi_start_queue(struct spi_controller *ctlr)
2187 {
2188 	unsigned long flags;
2189 
2190 	spin_lock_irqsave(&ctlr->queue_lock, flags);
2191 
2192 	if (ctlr->running || ctlr->busy) {
2193 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2194 		return -EBUSY;
2195 	}
2196 
2197 	ctlr->running = true;
2198 	ctlr->cur_msg = NULL;
2199 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2200 
2201 	kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2202 
2203 	return 0;
2204 }
2205 
2206 static int spi_stop_queue(struct spi_controller *ctlr)
2207 {
2208 	unsigned int limit = 500;
2209 	unsigned long flags;
2210 
2211 	/*
2212 	 * This is a bit lame, but is optimized for the common execution path.
2213 	 * A wait_queue on the ctlr->busy could be used, but then the common
2214 	 * execution path (pump_messages) would be required to call wake_up or
2215 	 * friends on every SPI message. Do this instead.
2216 	 */
2217 	do {
2218 		spin_lock_irqsave(&ctlr->queue_lock, flags);
2219 		if (list_empty(&ctlr->queue) && !ctlr->busy) {
2220 			ctlr->running = false;
2221 			spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2222 			return 0;
2223 		}
2224 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2225 		usleep_range(10000, 11000);
2226 	} while (--limit);
2227 
2228 	return -EBUSY;
2229 }
2230 
2231 static int spi_destroy_queue(struct spi_controller *ctlr)
2232 {
2233 	int ret;
2234 
2235 	ret = spi_stop_queue(ctlr);
2236 
2237 	/*
2238 	 * kthread_flush_worker will block until all work is done.
2239 	 * If the reason that stop_queue timed out is that the work will never
2240 	 * finish, then it does no good to call flush/stop thread, so
2241 	 * return anyway.
2242 	 */
2243 	if (ret) {
2244 		dev_err(&ctlr->dev, "problem destroying queue\n");
2245 		return ret;
2246 	}
2247 
2248 	kthread_destroy_worker(ctlr->kworker);
2249 
2250 	return 0;
2251 }
2252 
2253 static int __spi_queued_transfer(struct spi_device *spi,
2254 				 struct spi_message *msg,
2255 				 bool need_pump)
2256 {
2257 	struct spi_controller *ctlr = spi->controller;
2258 	unsigned long flags;
2259 
2260 	spin_lock_irqsave(&ctlr->queue_lock, flags);
2261 
2262 	if (!ctlr->running) {
2263 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2264 		return -ESHUTDOWN;
2265 	}
2266 	msg->actual_length = 0;
2267 	msg->status = -EINPROGRESS;
2268 
2269 	list_add_tail(&msg->queue, &ctlr->queue);
2270 	ctlr->queue_empty = false;
2271 	if (!ctlr->busy && need_pump)
2272 		kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2273 
2274 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2275 	return 0;
2276 }
2277 
2278 /**
2279  * spi_queued_transfer - transfer function for queued transfers
2280  * @spi: SPI device which is requesting transfer
2281  * @msg: SPI message which is to handled is queued to driver queue
2282  *
2283  * Return: zero on success, else a negative error code.
2284  */
2285 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2286 {
2287 	return __spi_queued_transfer(spi, msg, true);
2288 }
2289 
2290 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2291 {
2292 	int ret;
2293 
2294 	ctlr->transfer = spi_queued_transfer;
2295 	if (!ctlr->transfer_one_message)
2296 		ctlr->transfer_one_message = spi_transfer_one_message;
2297 
2298 	/* Initialize and start queue */
2299 	ret = spi_init_queue(ctlr);
2300 	if (ret) {
2301 		dev_err(&ctlr->dev, "problem initializing queue\n");
2302 		goto err_init_queue;
2303 	}
2304 	ctlr->queued = true;
2305 	ret = spi_start_queue(ctlr);
2306 	if (ret) {
2307 		dev_err(&ctlr->dev, "problem starting queue\n");
2308 		goto err_start_queue;
2309 	}
2310 
2311 	return 0;
2312 
2313 err_start_queue:
2314 	spi_destroy_queue(ctlr);
2315 err_init_queue:
2316 	return ret;
2317 }
2318 
2319 /**
2320  * spi_flush_queue - Send all pending messages in the queue from the callers'
2321  *		     context
2322  * @ctlr: controller to process queue for
2323  *
2324  * This should be used when one wants to ensure all pending messages have been
2325  * sent before doing something. Is used by the spi-mem code to make sure SPI
2326  * memory operations do not preempt regular SPI transfers that have been queued
2327  * before the spi-mem operation.
2328  */
2329 void spi_flush_queue(struct spi_controller *ctlr)
2330 {
2331 	if (ctlr->transfer == spi_queued_transfer)
2332 		__spi_pump_messages(ctlr, false);
2333 }
2334 
2335 /*-------------------------------------------------------------------------*/
2336 
2337 #if defined(CONFIG_OF)
2338 static void of_spi_parse_dt_cs_delay(struct device_node *nc,
2339 				     struct spi_delay *delay, const char *prop)
2340 {
2341 	u32 value;
2342 
2343 	if (!of_property_read_u32(nc, prop, &value)) {
2344 		if (value > U16_MAX) {
2345 			delay->value = DIV_ROUND_UP(value, 1000);
2346 			delay->unit = SPI_DELAY_UNIT_USECS;
2347 		} else {
2348 			delay->value = value;
2349 			delay->unit = SPI_DELAY_UNIT_NSECS;
2350 		}
2351 	}
2352 }
2353 
2354 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2355 			   struct device_node *nc)
2356 {
2357 	u32 value, cs[SPI_DEVICE_CS_CNT_MAX];
2358 	int rc, idx;
2359 
2360 	/* Mode (clock phase/polarity/etc.) */
2361 	if (of_property_read_bool(nc, "spi-cpha"))
2362 		spi->mode |= SPI_CPHA;
2363 	if (of_property_read_bool(nc, "spi-cpol"))
2364 		spi->mode |= SPI_CPOL;
2365 	if (of_property_read_bool(nc, "spi-3wire"))
2366 		spi->mode |= SPI_3WIRE;
2367 	if (of_property_read_bool(nc, "spi-lsb-first"))
2368 		spi->mode |= SPI_LSB_FIRST;
2369 	if (of_property_read_bool(nc, "spi-cs-high"))
2370 		spi->mode |= SPI_CS_HIGH;
2371 
2372 	/* Device DUAL/QUAD mode */
2373 	if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2374 		switch (value) {
2375 		case 0:
2376 			spi->mode |= SPI_NO_TX;
2377 			break;
2378 		case 1:
2379 			break;
2380 		case 2:
2381 			spi->mode |= SPI_TX_DUAL;
2382 			break;
2383 		case 4:
2384 			spi->mode |= SPI_TX_QUAD;
2385 			break;
2386 		case 8:
2387 			spi->mode |= SPI_TX_OCTAL;
2388 			break;
2389 		default:
2390 			dev_warn(&ctlr->dev,
2391 				"spi-tx-bus-width %d not supported\n",
2392 				value);
2393 			break;
2394 		}
2395 	}
2396 
2397 	if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2398 		switch (value) {
2399 		case 0:
2400 			spi->mode |= SPI_NO_RX;
2401 			break;
2402 		case 1:
2403 			break;
2404 		case 2:
2405 			spi->mode |= SPI_RX_DUAL;
2406 			break;
2407 		case 4:
2408 			spi->mode |= SPI_RX_QUAD;
2409 			break;
2410 		case 8:
2411 			spi->mode |= SPI_RX_OCTAL;
2412 			break;
2413 		default:
2414 			dev_warn(&ctlr->dev,
2415 				"spi-rx-bus-width %d not supported\n",
2416 				value);
2417 			break;
2418 		}
2419 	}
2420 
2421 	if (spi_controller_is_target(ctlr)) {
2422 		if (!of_node_name_eq(nc, "slave")) {
2423 			dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2424 				nc);
2425 			return -EINVAL;
2426 		}
2427 		return 0;
2428 	}
2429 
2430 	/* Device address */
2431 	rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1,
2432 						 SPI_DEVICE_CS_CNT_MAX);
2433 	if (rc < 0) {
2434 		dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2435 			nc, rc);
2436 		return rc;
2437 	}
2438 
2439 	if ((of_property_present(nc, "parallel-memories")) &&
2440 	    (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) {
2441 		dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n");
2442 		return -EINVAL;
2443 	}
2444 
2445 	spi->num_chipselect = rc;
2446 	for (idx = 0; idx < rc; idx++)
2447 		spi_set_chipselect(spi, idx, cs[idx]);
2448 
2449 	/*
2450 	 * By default spi->chip_select[0] will hold the physical CS number,
2451 	 * so set bit 0 in spi->cs_index_mask.
2452 	 */
2453 	spi->cs_index_mask = BIT(0);
2454 
2455 	/* Device speed */
2456 	if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2457 		spi->max_speed_hz = value;
2458 
2459 	/* Device CS delays */
2460 	of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
2461 	of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
2462 	of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
2463 
2464 	return 0;
2465 }
2466 
2467 static struct spi_device *
2468 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2469 {
2470 	struct spi_device *spi;
2471 	int rc;
2472 
2473 	/* Alloc an spi_device */
2474 	spi = spi_alloc_device(ctlr);
2475 	if (!spi) {
2476 		dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2477 		rc = -ENOMEM;
2478 		goto err_out;
2479 	}
2480 
2481 	/* Select device driver */
2482 	rc = of_alias_from_compatible(nc, spi->modalias,
2483 				      sizeof(spi->modalias));
2484 	if (rc < 0) {
2485 		dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2486 		goto err_out;
2487 	}
2488 
2489 	rc = of_spi_parse_dt(ctlr, spi, nc);
2490 	if (rc)
2491 		goto err_out;
2492 
2493 	/* Store a pointer to the node in the device structure */
2494 	of_node_get(nc);
2495 
2496 	device_set_node(&spi->dev, of_fwnode_handle(nc));
2497 
2498 	/* Register the new device */
2499 	rc = spi_add_device(spi);
2500 	if (rc) {
2501 		dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2502 		goto err_of_node_put;
2503 	}
2504 
2505 	return spi;
2506 
2507 err_of_node_put:
2508 	of_node_put(nc);
2509 err_out:
2510 	spi_dev_put(spi);
2511 	return ERR_PTR(rc);
2512 }
2513 
2514 /**
2515  * of_register_spi_devices() - Register child devices onto the SPI bus
2516  * @ctlr:	Pointer to spi_controller device
2517  *
2518  * Registers an spi_device for each child node of controller node which
2519  * represents a valid SPI target device.
2520  */
2521 static void of_register_spi_devices(struct spi_controller *ctlr)
2522 {
2523 	struct spi_device *spi;
2524 	struct device_node *nc;
2525 
2526 	for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2527 		if (of_node_test_and_set_flag(nc, OF_POPULATED))
2528 			continue;
2529 		spi = of_register_spi_device(ctlr, nc);
2530 		if (IS_ERR(spi)) {
2531 			dev_warn(&ctlr->dev,
2532 				 "Failed to create SPI device for %pOF\n", nc);
2533 			of_node_clear_flag(nc, OF_POPULATED);
2534 		}
2535 	}
2536 }
2537 #else
2538 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2539 #endif
2540 
2541 /**
2542  * spi_new_ancillary_device() - Register ancillary SPI device
2543  * @spi:         Pointer to the main SPI device registering the ancillary device
2544  * @chip_select: Chip Select of the ancillary device
2545  *
2546  * Register an ancillary SPI device; for example some chips have a chip-select
2547  * for normal device usage and another one for setup/firmware upload.
2548  *
2549  * This may only be called from main SPI device's probe routine.
2550  *
2551  * Return: 0 on success; negative errno on failure
2552  */
2553 struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2554 					     u8 chip_select)
2555 {
2556 	struct spi_controller *ctlr = spi->controller;
2557 	struct spi_device *ancillary;
2558 	int rc;
2559 
2560 	/* Alloc an spi_device */
2561 	ancillary = spi_alloc_device(ctlr);
2562 	if (!ancillary) {
2563 		rc = -ENOMEM;
2564 		goto err_out;
2565 	}
2566 
2567 	strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2568 
2569 	/* Use provided chip-select for ancillary device */
2570 	spi_set_chipselect(ancillary, 0, chip_select);
2571 
2572 	/* Take over SPI mode/speed from SPI main device */
2573 	ancillary->max_speed_hz = spi->max_speed_hz;
2574 	ancillary->mode = spi->mode;
2575 	/*
2576 	 * By default spi->chip_select[0] will hold the physical CS number,
2577 	 * so set bit 0 in spi->cs_index_mask.
2578 	 */
2579 	ancillary->cs_index_mask = BIT(0);
2580 
2581 	WARN_ON(!mutex_is_locked(&ctlr->add_lock));
2582 
2583 	/* Register the new device */
2584 	rc = __spi_add_device(ancillary);
2585 	if (rc) {
2586 		dev_err(&spi->dev, "failed to register ancillary device\n");
2587 		goto err_out;
2588 	}
2589 
2590 	return ancillary;
2591 
2592 err_out:
2593 	spi_dev_put(ancillary);
2594 	return ERR_PTR(rc);
2595 }
2596 EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2597 
2598 #ifdef CONFIG_ACPI
2599 struct acpi_spi_lookup {
2600 	struct spi_controller 	*ctlr;
2601 	u32			max_speed_hz;
2602 	u32			mode;
2603 	int			irq;
2604 	u8			bits_per_word;
2605 	u8			chip_select;
2606 	int			n;
2607 	int			index;
2608 };
2609 
2610 static int acpi_spi_count(struct acpi_resource *ares, void *data)
2611 {
2612 	struct acpi_resource_spi_serialbus *sb;
2613 	int *count = data;
2614 
2615 	if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2616 		return 1;
2617 
2618 	sb = &ares->data.spi_serial_bus;
2619 	if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2620 		return 1;
2621 
2622 	*count = *count + 1;
2623 
2624 	return 1;
2625 }
2626 
2627 /**
2628  * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2629  * @adev:	ACPI device
2630  *
2631  * Return: the number of SpiSerialBus resources in the ACPI-device's
2632  * resource-list; or a negative error code.
2633  */
2634 int acpi_spi_count_resources(struct acpi_device *adev)
2635 {
2636 	LIST_HEAD(r);
2637 	int count = 0;
2638 	int ret;
2639 
2640 	ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2641 	if (ret < 0)
2642 		return ret;
2643 
2644 	acpi_dev_free_resource_list(&r);
2645 
2646 	return count;
2647 }
2648 EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2649 
2650 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2651 					    struct acpi_spi_lookup *lookup)
2652 {
2653 	const union acpi_object *obj;
2654 
2655 	if (!x86_apple_machine)
2656 		return;
2657 
2658 	if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2659 	    && obj->buffer.length >= 4)
2660 		lookup->max_speed_hz  = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2661 
2662 	if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2663 	    && obj->buffer.length == 8)
2664 		lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2665 
2666 	if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2667 	    && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2668 		lookup->mode |= SPI_LSB_FIRST;
2669 
2670 	if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2671 	    && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2672 		lookup->mode |= SPI_CPOL;
2673 
2674 	if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2675 	    && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2676 		lookup->mode |= SPI_CPHA;
2677 }
2678 
2679 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2680 {
2681 	struct acpi_spi_lookup *lookup = data;
2682 	struct spi_controller *ctlr = lookup->ctlr;
2683 
2684 	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2685 		struct acpi_resource_spi_serialbus *sb;
2686 		acpi_handle parent_handle;
2687 		acpi_status status;
2688 
2689 		sb = &ares->data.spi_serial_bus;
2690 		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2691 
2692 			if (lookup->index != -1 && lookup->n++ != lookup->index)
2693 				return 1;
2694 
2695 			status = acpi_get_handle(NULL,
2696 						 sb->resource_source.string_ptr,
2697 						 &parent_handle);
2698 
2699 			if (ACPI_FAILURE(status))
2700 				return -ENODEV;
2701 
2702 			if (ctlr) {
2703 				if (!device_match_acpi_handle(ctlr->dev.parent, parent_handle))
2704 					return -ENODEV;
2705 			} else {
2706 				struct acpi_device *adev;
2707 
2708 				adev = acpi_fetch_acpi_dev(parent_handle);
2709 				if (!adev)
2710 					return -ENODEV;
2711 
2712 				ctlr = acpi_spi_find_controller_by_adev(adev);
2713 				if (!ctlr)
2714 					return -EPROBE_DEFER;
2715 
2716 				lookup->ctlr = ctlr;
2717 			}
2718 
2719 			/*
2720 			 * ACPI DeviceSelection numbering is handled by the
2721 			 * host controller driver in Windows and can vary
2722 			 * from driver to driver. In Linux we always expect
2723 			 * 0 .. max - 1 so we need to ask the driver to
2724 			 * translate between the two schemes.
2725 			 */
2726 			if (ctlr->fw_translate_cs) {
2727 				int cs = ctlr->fw_translate_cs(ctlr,
2728 						sb->device_selection);
2729 				if (cs < 0)
2730 					return cs;
2731 				lookup->chip_select = cs;
2732 			} else {
2733 				lookup->chip_select = sb->device_selection;
2734 			}
2735 
2736 			lookup->max_speed_hz = sb->connection_speed;
2737 			lookup->bits_per_word = sb->data_bit_length;
2738 
2739 			if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2740 				lookup->mode |= SPI_CPHA;
2741 			if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2742 				lookup->mode |= SPI_CPOL;
2743 			if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2744 				lookup->mode |= SPI_CS_HIGH;
2745 		}
2746 	} else if (lookup->irq < 0) {
2747 		struct resource r;
2748 
2749 		if (acpi_dev_resource_interrupt(ares, 0, &r))
2750 			lookup->irq = r.start;
2751 	}
2752 
2753 	/* Always tell the ACPI core to skip this resource */
2754 	return 1;
2755 }
2756 
2757 /**
2758  * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2759  * @ctlr: controller to which the spi device belongs
2760  * @adev: ACPI Device for the spi device
2761  * @index: Index of the spi resource inside the ACPI Node
2762  *
2763  * This should be used to allocate a new SPI device from and ACPI Device node.
2764  * The caller is responsible for calling spi_add_device to register the SPI device.
2765  *
2766  * If ctlr is set to NULL, the Controller for the SPI device will be looked up
2767  * using the resource.
2768  * If index is set to -1, index is not used.
2769  * Note: If index is -1, ctlr must be set.
2770  *
2771  * Return: a pointer to the new device, or ERR_PTR on error.
2772  */
2773 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2774 					 struct acpi_device *adev,
2775 					 int index)
2776 {
2777 	acpi_handle parent_handle = NULL;
2778 	struct list_head resource_list;
2779 	struct acpi_spi_lookup lookup = {};
2780 	struct spi_device *spi;
2781 	int ret;
2782 
2783 	if (!ctlr && index == -1)
2784 		return ERR_PTR(-EINVAL);
2785 
2786 	lookup.ctlr		= ctlr;
2787 	lookup.irq		= -1;
2788 	lookup.index		= index;
2789 	lookup.n		= 0;
2790 
2791 	INIT_LIST_HEAD(&resource_list);
2792 	ret = acpi_dev_get_resources(adev, &resource_list,
2793 				     acpi_spi_add_resource, &lookup);
2794 	acpi_dev_free_resource_list(&resource_list);
2795 
2796 	if (ret < 0)
2797 		/* Found SPI in _CRS but it points to another controller */
2798 		return ERR_PTR(ret);
2799 
2800 	if (!lookup.max_speed_hz &&
2801 	    ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2802 	    device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) {
2803 		/* Apple does not use _CRS but nested devices for SPI target devices */
2804 		acpi_spi_parse_apple_properties(adev, &lookup);
2805 	}
2806 
2807 	if (!lookup.max_speed_hz)
2808 		return ERR_PTR(-ENODEV);
2809 
2810 	spi = spi_alloc_device(lookup.ctlr);
2811 	if (!spi) {
2812 		dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2813 			dev_name(&adev->dev));
2814 		return ERR_PTR(-ENOMEM);
2815 	}
2816 
2817 	spi_set_chipselect(spi, 0, lookup.chip_select);
2818 
2819 	ACPI_COMPANION_SET(&spi->dev, adev);
2820 	spi->max_speed_hz	= lookup.max_speed_hz;
2821 	spi->mode		|= lookup.mode;
2822 	spi->irq		= lookup.irq;
2823 	spi->bits_per_word	= lookup.bits_per_word;
2824 	/*
2825 	 * By default spi->chip_select[0] will hold the physical CS number,
2826 	 * so set bit 0 in spi->cs_index_mask.
2827 	 */
2828 	spi->cs_index_mask	= BIT(0);
2829 
2830 	return spi;
2831 }
2832 EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
2833 
2834 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2835 					    struct acpi_device *adev)
2836 {
2837 	struct spi_device *spi;
2838 
2839 	if (acpi_bus_get_status(adev) || !adev->status.present ||
2840 	    acpi_device_enumerated(adev))
2841 		return AE_OK;
2842 
2843 	spi = acpi_spi_device_alloc(ctlr, adev, -1);
2844 	if (IS_ERR(spi)) {
2845 		if (PTR_ERR(spi) == -ENOMEM)
2846 			return AE_NO_MEMORY;
2847 		else
2848 			return AE_OK;
2849 	}
2850 
2851 	acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2852 			  sizeof(spi->modalias));
2853 
2854 	acpi_device_set_enumerated(adev);
2855 
2856 	adev->power.flags.ignore_parent = true;
2857 	if (spi_add_device(spi)) {
2858 		adev->power.flags.ignore_parent = false;
2859 		dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2860 			dev_name(&adev->dev));
2861 		spi_dev_put(spi);
2862 	}
2863 
2864 	return AE_OK;
2865 }
2866 
2867 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2868 				       void *data, void **return_value)
2869 {
2870 	struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
2871 	struct spi_controller *ctlr = data;
2872 
2873 	if (!adev)
2874 		return AE_OK;
2875 
2876 	return acpi_register_spi_device(ctlr, adev);
2877 }
2878 
2879 #define SPI_ACPI_ENUMERATE_MAX_DEPTH		32
2880 
2881 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2882 {
2883 	acpi_status status;
2884 	acpi_handle handle;
2885 
2886 	handle = ACPI_HANDLE(ctlr->dev.parent);
2887 	if (!handle)
2888 		return;
2889 
2890 	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2891 				     SPI_ACPI_ENUMERATE_MAX_DEPTH,
2892 				     acpi_spi_add_device, NULL, ctlr, NULL);
2893 	if (ACPI_FAILURE(status))
2894 		dev_warn(&ctlr->dev, "failed to enumerate SPI target devices\n");
2895 }
2896 #else
2897 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2898 #endif /* CONFIG_ACPI */
2899 
2900 static void spi_controller_release(struct device *dev)
2901 {
2902 	struct spi_controller *ctlr;
2903 
2904 	ctlr = container_of(dev, struct spi_controller, dev);
2905 	kfree(ctlr);
2906 }
2907 
2908 static const struct class spi_controller_class = {
2909 	.name		= "spi_master",
2910 	.dev_release	= spi_controller_release,
2911 	.dev_groups	= spi_controller_groups,
2912 };
2913 
2914 #ifdef CONFIG_SPI_SLAVE
2915 /**
2916  * spi_target_abort - abort the ongoing transfer request on an SPI target controller
2917  * @spi: device used for the current transfer
2918  */
2919 int spi_target_abort(struct spi_device *spi)
2920 {
2921 	struct spi_controller *ctlr = spi->controller;
2922 
2923 	if (spi_controller_is_target(ctlr) && ctlr->target_abort)
2924 		return ctlr->target_abort(ctlr);
2925 
2926 	return -ENOTSUPP;
2927 }
2928 EXPORT_SYMBOL_GPL(spi_target_abort);
2929 
2930 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2931 			  char *buf)
2932 {
2933 	struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2934 						   dev);
2935 	struct device *child;
2936 	int ret;
2937 
2938 	child = device_find_any_child(&ctlr->dev);
2939 	ret = sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
2940 	put_device(child);
2941 
2942 	return ret;
2943 }
2944 
2945 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2946 			   const char *buf, size_t count)
2947 {
2948 	struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2949 						   dev);
2950 	struct spi_device *spi;
2951 	struct device *child;
2952 	char name[32];
2953 	int rc;
2954 
2955 	rc = sscanf(buf, "%31s", name);
2956 	if (rc != 1 || !name[0])
2957 		return -EINVAL;
2958 
2959 	child = device_find_any_child(&ctlr->dev);
2960 	if (child) {
2961 		/* Remove registered target device */
2962 		device_unregister(child);
2963 		put_device(child);
2964 	}
2965 
2966 	if (strcmp(name, "(null)")) {
2967 		/* Register new target device */
2968 		spi = spi_alloc_device(ctlr);
2969 		if (!spi)
2970 			return -ENOMEM;
2971 
2972 		strscpy(spi->modalias, name, sizeof(spi->modalias));
2973 
2974 		rc = spi_add_device(spi);
2975 		if (rc) {
2976 			spi_dev_put(spi);
2977 			return rc;
2978 		}
2979 	}
2980 
2981 	return count;
2982 }
2983 
2984 static DEVICE_ATTR_RW(slave);
2985 
2986 static struct attribute *spi_target_attrs[] = {
2987 	&dev_attr_slave.attr,
2988 	NULL,
2989 };
2990 
2991 static const struct attribute_group spi_target_group = {
2992 	.attrs = spi_target_attrs,
2993 };
2994 
2995 static const struct attribute_group *spi_target_groups[] = {
2996 	&spi_controller_statistics_group,
2997 	&spi_target_group,
2998 	NULL,
2999 };
3000 
3001 static const struct class spi_target_class = {
3002 	.name		= "spi_slave",
3003 	.dev_release	= spi_controller_release,
3004 	.dev_groups	= spi_target_groups,
3005 };
3006 #else
3007 extern struct class spi_target_class;	/* dummy */
3008 #endif
3009 
3010 /**
3011  * __spi_alloc_controller - allocate an SPI host or target controller
3012  * @dev: the controller, possibly using the platform_bus
3013  * @size: how much zeroed driver-private data to allocate; the pointer to this
3014  *	memory is in the driver_data field of the returned device, accessible
3015  *	with spi_controller_get_devdata(); the memory is cacheline aligned;
3016  *	drivers granting DMA access to portions of their private data need to
3017  *	round up @size using ALIGN(size, dma_get_cache_alignment()).
3018  * @target: flag indicating whether to allocate an SPI host (false) or SPI target (true)
3019  *	controller
3020  * Context: can sleep
3021  *
3022  * This call is used only by SPI controller drivers, which are the
3023  * only ones directly touching chip registers.  It's how they allocate
3024  * an spi_controller structure, prior to calling spi_register_controller().
3025  *
3026  * This must be called from context that can sleep.
3027  *
3028  * The caller is responsible for assigning the bus number and initializing the
3029  * controller's methods before calling spi_register_controller(); and (after
3030  * errors adding the device) calling spi_controller_put() to prevent a memory
3031  * leak.
3032  *
3033  * Return: the SPI controller structure on success, else NULL.
3034  */
3035 struct spi_controller *__spi_alloc_controller(struct device *dev,
3036 					      unsigned int size, bool target)
3037 {
3038 	struct spi_controller	*ctlr;
3039 	size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
3040 
3041 	if (!dev)
3042 		return NULL;
3043 
3044 	ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
3045 	if (!ctlr)
3046 		return NULL;
3047 
3048 	device_initialize(&ctlr->dev);
3049 	INIT_LIST_HEAD(&ctlr->queue);
3050 	spin_lock_init(&ctlr->queue_lock);
3051 	spin_lock_init(&ctlr->bus_lock_spinlock);
3052 	mutex_init(&ctlr->bus_lock_mutex);
3053 	mutex_init(&ctlr->io_mutex);
3054 	mutex_init(&ctlr->add_lock);
3055 	ctlr->bus_num = -1;
3056 	ctlr->num_chipselect = 1;
3057 	ctlr->target = target;
3058 	if (IS_ENABLED(CONFIG_SPI_SLAVE) && target)
3059 		ctlr->dev.class = &spi_target_class;
3060 	else
3061 		ctlr->dev.class = &spi_controller_class;
3062 	ctlr->dev.parent = dev;
3063 	pm_suspend_ignore_children(&ctlr->dev, true);
3064 	spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
3065 
3066 	return ctlr;
3067 }
3068 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
3069 
3070 static void devm_spi_release_controller(struct device *dev, void *ctlr)
3071 {
3072 	spi_controller_put(*(struct spi_controller **)ctlr);
3073 }
3074 
3075 /**
3076  * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
3077  * @dev: physical device of SPI controller
3078  * @size: how much zeroed driver-private data to allocate
3079  * @target: whether to allocate an SPI host (false) or SPI target (true) controller
3080  * Context: can sleep
3081  *
3082  * Allocate an SPI controller and automatically release a reference on it
3083  * when @dev is unbound from its driver.  Drivers are thus relieved from
3084  * having to call spi_controller_put().
3085  *
3086  * The arguments to this function are identical to __spi_alloc_controller().
3087  *
3088  * Return: the SPI controller structure on success, else NULL.
3089  */
3090 struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
3091 						   unsigned int size,
3092 						   bool target)
3093 {
3094 	struct spi_controller **ptr, *ctlr;
3095 
3096 	ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
3097 			   GFP_KERNEL);
3098 	if (!ptr)
3099 		return NULL;
3100 
3101 	ctlr = __spi_alloc_controller(dev, size, target);
3102 	if (ctlr) {
3103 		ctlr->devm_allocated = true;
3104 		*ptr = ctlr;
3105 		devres_add(dev, ptr);
3106 	} else {
3107 		devres_free(ptr);
3108 	}
3109 
3110 	return ctlr;
3111 }
3112 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
3113 
3114 /**
3115  * spi_get_gpio_descs() - grab chip select GPIOs for the controller
3116  * @ctlr: The SPI controller to grab GPIO descriptors for
3117  */
3118 static int spi_get_gpio_descs(struct spi_controller *ctlr)
3119 {
3120 	int nb, i;
3121 	struct gpio_desc **cs;
3122 	struct device *dev = &ctlr->dev;
3123 	unsigned long native_cs_mask = 0;
3124 	unsigned int num_cs_gpios = 0;
3125 
3126 	nb = gpiod_count(dev, "cs");
3127 	if (nb < 0) {
3128 		/* No GPIOs at all is fine, else return the error */
3129 		if (nb == -ENOENT)
3130 			return 0;
3131 		return nb;
3132 	}
3133 
3134 	ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
3135 
3136 	cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
3137 			  GFP_KERNEL);
3138 	if (!cs)
3139 		return -ENOMEM;
3140 	ctlr->cs_gpiods = cs;
3141 
3142 	for (i = 0; i < nb; i++) {
3143 		/*
3144 		 * Most chipselects are active low, the inverted
3145 		 * semantics are handled by special quirks in gpiolib,
3146 		 * so initializing them GPIOD_OUT_LOW here means
3147 		 * "unasserted", in most cases this will drive the physical
3148 		 * line high.
3149 		 */
3150 		cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
3151 						      GPIOD_OUT_LOW);
3152 		if (IS_ERR(cs[i]))
3153 			return PTR_ERR(cs[i]);
3154 
3155 		if (cs[i]) {
3156 			/*
3157 			 * If we find a CS GPIO, name it after the device and
3158 			 * chip select line.
3159 			 */
3160 			char *gpioname;
3161 
3162 			gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
3163 						  dev_name(dev), i);
3164 			if (!gpioname)
3165 				return -ENOMEM;
3166 			gpiod_set_consumer_name(cs[i], gpioname);
3167 			num_cs_gpios++;
3168 			continue;
3169 		}
3170 
3171 		if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3172 			dev_err(dev, "Invalid native chip select %d\n", i);
3173 			return -EINVAL;
3174 		}
3175 		native_cs_mask |= BIT(i);
3176 	}
3177 
3178 	ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3179 
3180 	if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios &&
3181 	    ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3182 		dev_err(dev, "No unused native chip select available\n");
3183 		return -EINVAL;
3184 	}
3185 
3186 	return 0;
3187 }
3188 
3189 static int spi_controller_check_ops(struct spi_controller *ctlr)
3190 {
3191 	/*
3192 	 * The controller may implement only the high-level SPI-memory like
3193 	 * operations if it does not support regular SPI transfers, and this is
3194 	 * valid use case.
3195 	 * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least
3196 	 * one of the ->transfer_xxx() method be implemented.
3197 	 */
3198 	if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
3199 		if (!ctlr->transfer && !ctlr->transfer_one &&
3200 		   !ctlr->transfer_one_message) {
3201 			return -EINVAL;
3202 		}
3203 	}
3204 
3205 	return 0;
3206 }
3207 
3208 /* Allocate dynamic bus number using Linux idr */
3209 static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end)
3210 {
3211 	int id;
3212 
3213 	mutex_lock(&board_lock);
3214 	id = idr_alloc(&spi_controller_idr, ctlr, start, end, GFP_KERNEL);
3215 	mutex_unlock(&board_lock);
3216 	if (WARN(id < 0, "couldn't get idr"))
3217 		return id == -ENOSPC ? -EBUSY : id;
3218 	ctlr->bus_num = id;
3219 	return 0;
3220 }
3221 
3222 /**
3223  * spi_register_controller - register SPI host or target controller
3224  * @ctlr: initialized controller, originally from spi_alloc_host() or
3225  *	spi_alloc_target()
3226  * Context: can sleep
3227  *
3228  * SPI controllers connect to their drivers using some non-SPI bus,
3229  * such as the platform bus.  The final stage of probe() in that code
3230  * includes calling spi_register_controller() to hook up to this SPI bus glue.
3231  *
3232  * SPI controllers use board specific (often SOC specific) bus numbers,
3233  * and board-specific addressing for SPI devices combines those numbers
3234  * with chip select numbers.  Since SPI does not directly support dynamic
3235  * device identification, boards need configuration tables telling which
3236  * chip is at which address.
3237  *
3238  * This must be called from context that can sleep.  It returns zero on
3239  * success, else a negative error code (dropping the controller's refcount).
3240  * After a successful return, the caller is responsible for calling
3241  * spi_unregister_controller().
3242  *
3243  * Return: zero on success, else a negative error code.
3244  */
3245 int spi_register_controller(struct spi_controller *ctlr)
3246 {
3247 	struct device		*dev = ctlr->dev.parent;
3248 	struct boardinfo	*bi;
3249 	int			first_dynamic;
3250 	int			status;
3251 	int			idx;
3252 
3253 	if (!dev)
3254 		return -ENODEV;
3255 
3256 	/*
3257 	 * Make sure all necessary hooks are implemented before registering
3258 	 * the SPI controller.
3259 	 */
3260 	status = spi_controller_check_ops(ctlr);
3261 	if (status)
3262 		return status;
3263 
3264 	if (ctlr->bus_num < 0)
3265 		ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
3266 	if (ctlr->bus_num >= 0) {
3267 		/* Devices with a fixed bus num must check-in with the num */
3268 		status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1);
3269 		if (status)
3270 			return status;
3271 	}
3272 	if (ctlr->bus_num < 0) {
3273 		first_dynamic = of_alias_get_highest_id("spi");
3274 		if (first_dynamic < 0)
3275 			first_dynamic = 0;
3276 		else
3277 			first_dynamic++;
3278 
3279 		status = spi_controller_id_alloc(ctlr, first_dynamic, 0);
3280 		if (status)
3281 			return status;
3282 	}
3283 	ctlr->bus_lock_flag = 0;
3284 	init_completion(&ctlr->xfer_completion);
3285 	init_completion(&ctlr->cur_msg_completion);
3286 	if (!ctlr->max_dma_len)
3287 		ctlr->max_dma_len = INT_MAX;
3288 
3289 	/*
3290 	 * Register the device, then userspace will see it.
3291 	 * Registration fails if the bus ID is in use.
3292 	 */
3293 	dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3294 
3295 	if (!spi_controller_is_target(ctlr) && ctlr->use_gpio_descriptors) {
3296 		status = spi_get_gpio_descs(ctlr);
3297 		if (status)
3298 			goto free_bus_id;
3299 		/*
3300 		 * A controller using GPIO descriptors always
3301 		 * supports SPI_CS_HIGH if need be.
3302 		 */
3303 		ctlr->mode_bits |= SPI_CS_HIGH;
3304 	}
3305 
3306 	/*
3307 	 * Even if it's just one always-selected device, there must
3308 	 * be at least one chipselect.
3309 	 */
3310 	if (!ctlr->num_chipselect) {
3311 		status = -EINVAL;
3312 		goto free_bus_id;
3313 	}
3314 
3315 	/* Setting last_cs to SPI_INVALID_CS means no chip selected */
3316 	for (idx = 0; idx < SPI_DEVICE_CS_CNT_MAX; idx++)
3317 		ctlr->last_cs[idx] = SPI_INVALID_CS;
3318 
3319 	status = device_add(&ctlr->dev);
3320 	if (status < 0)
3321 		goto free_bus_id;
3322 	dev_dbg(dev, "registered %s %s\n",
3323 			spi_controller_is_target(ctlr) ? "target" : "host",
3324 			dev_name(&ctlr->dev));
3325 
3326 	/*
3327 	 * If we're using a queued driver, start the queue. Note that we don't
3328 	 * need the queueing logic if the driver is only supporting high-level
3329 	 * memory operations.
3330 	 */
3331 	if (ctlr->transfer) {
3332 		dev_info(dev, "controller is unqueued, this is deprecated\n");
3333 	} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3334 		status = spi_controller_initialize_queue(ctlr);
3335 		if (status) {
3336 			device_del(&ctlr->dev);
3337 			goto free_bus_id;
3338 		}
3339 	}
3340 	/* Add statistics */
3341 	ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3342 	if (!ctlr->pcpu_statistics) {
3343 		dev_err(dev, "Error allocating per-cpu statistics\n");
3344 		status = -ENOMEM;
3345 		goto destroy_queue;
3346 	}
3347 
3348 	mutex_lock(&board_lock);
3349 	list_add_tail(&ctlr->list, &spi_controller_list);
3350 	list_for_each_entry(bi, &board_list, list)
3351 		spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3352 	mutex_unlock(&board_lock);
3353 
3354 	/* Register devices from the device tree and ACPI */
3355 	of_register_spi_devices(ctlr);
3356 	acpi_register_spi_devices(ctlr);
3357 	return status;
3358 
3359 destroy_queue:
3360 	spi_destroy_queue(ctlr);
3361 free_bus_id:
3362 	mutex_lock(&board_lock);
3363 	idr_remove(&spi_controller_idr, ctlr->bus_num);
3364 	mutex_unlock(&board_lock);
3365 	return status;
3366 }
3367 EXPORT_SYMBOL_GPL(spi_register_controller);
3368 
3369 static void devm_spi_unregister(struct device *dev, void *res)
3370 {
3371 	spi_unregister_controller(*(struct spi_controller **)res);
3372 }
3373 
3374 /**
3375  * devm_spi_register_controller - register managed SPI host or target controller
3376  * @dev:    device managing SPI controller
3377  * @ctlr: initialized controller, originally from spi_alloc_host() or
3378  *	spi_alloc_target()
3379  * Context: can sleep
3380  *
3381  * Register a SPI device as with spi_register_controller() which will
3382  * automatically be unregistered and freed.
3383  *
3384  * Return: zero on success, else a negative error code.
3385  */
3386 int devm_spi_register_controller(struct device *dev,
3387 				 struct spi_controller *ctlr)
3388 {
3389 	struct spi_controller **ptr;
3390 	int ret;
3391 
3392 	ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
3393 	if (!ptr)
3394 		return -ENOMEM;
3395 
3396 	ret = spi_register_controller(ctlr);
3397 	if (!ret) {
3398 		*ptr = ctlr;
3399 		devres_add(dev, ptr);
3400 	} else {
3401 		devres_free(ptr);
3402 	}
3403 
3404 	return ret;
3405 }
3406 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3407 
3408 static int __unregister(struct device *dev, void *null)
3409 {
3410 	spi_unregister_device(to_spi_device(dev));
3411 	return 0;
3412 }
3413 
3414 /**
3415  * spi_unregister_controller - unregister SPI host or target controller
3416  * @ctlr: the controller being unregistered
3417  * Context: can sleep
3418  *
3419  * This call is used only by SPI controller drivers, which are the
3420  * only ones directly touching chip registers.
3421  *
3422  * This must be called from context that can sleep.
3423  *
3424  * Note that this function also drops a reference to the controller.
3425  */
3426 void spi_unregister_controller(struct spi_controller *ctlr)
3427 {
3428 	struct spi_controller *found;
3429 	int id = ctlr->bus_num;
3430 
3431 	/* Prevent addition of new devices, unregister existing ones */
3432 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3433 		mutex_lock(&ctlr->add_lock);
3434 
3435 	device_for_each_child(&ctlr->dev, NULL, __unregister);
3436 
3437 	/* First make sure that this controller was ever added */
3438 	mutex_lock(&board_lock);
3439 	found = idr_find(&spi_controller_idr, id);
3440 	mutex_unlock(&board_lock);
3441 	if (ctlr->queued) {
3442 		if (spi_destroy_queue(ctlr))
3443 			dev_err(&ctlr->dev, "queue remove failed\n");
3444 	}
3445 	mutex_lock(&board_lock);
3446 	list_del(&ctlr->list);
3447 	mutex_unlock(&board_lock);
3448 
3449 	device_del(&ctlr->dev);
3450 
3451 	/* Free bus id */
3452 	mutex_lock(&board_lock);
3453 	if (found == ctlr)
3454 		idr_remove(&spi_controller_idr, id);
3455 	mutex_unlock(&board_lock);
3456 
3457 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3458 		mutex_unlock(&ctlr->add_lock);
3459 
3460 	/*
3461 	 * Release the last reference on the controller if its driver
3462 	 * has not yet been converted to devm_spi_alloc_host/target().
3463 	 */
3464 	if (!ctlr->devm_allocated)
3465 		put_device(&ctlr->dev);
3466 }
3467 EXPORT_SYMBOL_GPL(spi_unregister_controller);
3468 
3469 static inline int __spi_check_suspended(const struct spi_controller *ctlr)
3470 {
3471 	return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
3472 }
3473 
3474 static inline void __spi_mark_suspended(struct spi_controller *ctlr)
3475 {
3476 	mutex_lock(&ctlr->bus_lock_mutex);
3477 	ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
3478 	mutex_unlock(&ctlr->bus_lock_mutex);
3479 }
3480 
3481 static inline void __spi_mark_resumed(struct spi_controller *ctlr)
3482 {
3483 	mutex_lock(&ctlr->bus_lock_mutex);
3484 	ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
3485 	mutex_unlock(&ctlr->bus_lock_mutex);
3486 }
3487 
3488 int spi_controller_suspend(struct spi_controller *ctlr)
3489 {
3490 	int ret = 0;
3491 
3492 	/* Basically no-ops for non-queued controllers */
3493 	if (ctlr->queued) {
3494 		ret = spi_stop_queue(ctlr);
3495 		if (ret)
3496 			dev_err(&ctlr->dev, "queue stop failed\n");
3497 	}
3498 
3499 	__spi_mark_suspended(ctlr);
3500 	return ret;
3501 }
3502 EXPORT_SYMBOL_GPL(spi_controller_suspend);
3503 
3504 int spi_controller_resume(struct spi_controller *ctlr)
3505 {
3506 	int ret = 0;
3507 
3508 	__spi_mark_resumed(ctlr);
3509 
3510 	if (ctlr->queued) {
3511 		ret = spi_start_queue(ctlr);
3512 		if (ret)
3513 			dev_err(&ctlr->dev, "queue restart failed\n");
3514 	}
3515 	return ret;
3516 }
3517 EXPORT_SYMBOL_GPL(spi_controller_resume);
3518 
3519 /*-------------------------------------------------------------------------*/
3520 
3521 /* Core methods for spi_message alterations */
3522 
3523 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3524 					    struct spi_message *msg,
3525 					    void *res)
3526 {
3527 	struct spi_replaced_transfers *rxfer = res;
3528 	size_t i;
3529 
3530 	/* Call extra callback if requested */
3531 	if (rxfer->release)
3532 		rxfer->release(ctlr, msg, res);
3533 
3534 	/* Insert replaced transfers back into the message */
3535 	list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3536 
3537 	/* Remove the formerly inserted entries */
3538 	for (i = 0; i < rxfer->inserted; i++)
3539 		list_del(&rxfer->inserted_transfers[i].transfer_list);
3540 }
3541 
3542 /**
3543  * spi_replace_transfers - replace transfers with several transfers
3544  *                         and register change with spi_message.resources
3545  * @msg:           the spi_message we work upon
3546  * @xfer_first:    the first spi_transfer we want to replace
3547  * @remove:        number of transfers to remove
3548  * @insert:        the number of transfers we want to insert instead
3549  * @release:       extra release code necessary in some circumstances
3550  * @extradatasize: extra data to allocate (with alignment guarantees
3551  *                 of struct @spi_transfer)
3552  * @gfp:           gfp flags
3553  *
3554  * Returns: pointer to @spi_replaced_transfers,
3555  *          PTR_ERR(...) in case of errors.
3556  */
3557 static struct spi_replaced_transfers *spi_replace_transfers(
3558 	struct spi_message *msg,
3559 	struct spi_transfer *xfer_first,
3560 	size_t remove,
3561 	size_t insert,
3562 	spi_replaced_release_t release,
3563 	size_t extradatasize,
3564 	gfp_t gfp)
3565 {
3566 	struct spi_replaced_transfers *rxfer;
3567 	struct spi_transfer *xfer;
3568 	size_t i;
3569 
3570 	/* Allocate the structure using spi_res */
3571 	rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3572 			      struct_size(rxfer, inserted_transfers, insert)
3573 			      + extradatasize,
3574 			      gfp);
3575 	if (!rxfer)
3576 		return ERR_PTR(-ENOMEM);
3577 
3578 	/* The release code to invoke before running the generic release */
3579 	rxfer->release = release;
3580 
3581 	/* Assign extradata */
3582 	if (extradatasize)
3583 		rxfer->extradata =
3584 			&rxfer->inserted_transfers[insert];
3585 
3586 	/* Init the replaced_transfers list */
3587 	INIT_LIST_HEAD(&rxfer->replaced_transfers);
3588 
3589 	/*
3590 	 * Assign the list_entry after which we should reinsert
3591 	 * the @replaced_transfers - it may be spi_message.messages!
3592 	 */
3593 	rxfer->replaced_after = xfer_first->transfer_list.prev;
3594 
3595 	/* Remove the requested number of transfers */
3596 	for (i = 0; i < remove; i++) {
3597 		/*
3598 		 * If the entry after replaced_after it is msg->transfers
3599 		 * then we have been requested to remove more transfers
3600 		 * than are in the list.
3601 		 */
3602 		if (rxfer->replaced_after->next == &msg->transfers) {
3603 			dev_err(&msg->spi->dev,
3604 				"requested to remove more spi_transfers than are available\n");
3605 			/* Insert replaced transfers back into the message */
3606 			list_splice(&rxfer->replaced_transfers,
3607 				    rxfer->replaced_after);
3608 
3609 			/* Free the spi_replace_transfer structure... */
3610 			spi_res_free(rxfer);
3611 
3612 			/* ...and return with an error */
3613 			return ERR_PTR(-EINVAL);
3614 		}
3615 
3616 		/*
3617 		 * Remove the entry after replaced_after from list of
3618 		 * transfers and add it to list of replaced_transfers.
3619 		 */
3620 		list_move_tail(rxfer->replaced_after->next,
3621 			       &rxfer->replaced_transfers);
3622 	}
3623 
3624 	/*
3625 	 * Create copy of the given xfer with identical settings
3626 	 * based on the first transfer to get removed.
3627 	 */
3628 	for (i = 0; i < insert; i++) {
3629 		/* We need to run in reverse order */
3630 		xfer = &rxfer->inserted_transfers[insert - 1 - i];
3631 
3632 		/* Copy all spi_transfer data */
3633 		memcpy(xfer, xfer_first, sizeof(*xfer));
3634 
3635 		/* Add to list */
3636 		list_add(&xfer->transfer_list, rxfer->replaced_after);
3637 
3638 		/* Clear cs_change and delay for all but the last */
3639 		if (i) {
3640 			xfer->cs_change = false;
3641 			xfer->delay.value = 0;
3642 		}
3643 	}
3644 
3645 	/* Set up inserted... */
3646 	rxfer->inserted = insert;
3647 
3648 	/* ...and register it with spi_res/spi_message */
3649 	spi_res_add(msg, rxfer);
3650 
3651 	return rxfer;
3652 }
3653 
3654 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3655 					struct spi_message *msg,
3656 					struct spi_transfer **xferp,
3657 					size_t maxsize)
3658 {
3659 	struct spi_transfer *xfer = *xferp, *xfers;
3660 	struct spi_replaced_transfers *srt;
3661 	size_t offset;
3662 	size_t count, i;
3663 
3664 	/* Calculate how many we have to replace */
3665 	count = DIV_ROUND_UP(xfer->len, maxsize);
3666 
3667 	/* Create replacement */
3668 	srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, GFP_KERNEL);
3669 	if (IS_ERR(srt))
3670 		return PTR_ERR(srt);
3671 	xfers = srt->inserted_transfers;
3672 
3673 	/*
3674 	 * Now handle each of those newly inserted spi_transfers.
3675 	 * Note that the replacements spi_transfers all are preset
3676 	 * to the same values as *xferp, so tx_buf, rx_buf and len
3677 	 * are all identical (as well as most others)
3678 	 * so we just have to fix up len and the pointers.
3679 	 */
3680 
3681 	/*
3682 	 * The first transfer just needs the length modified, so we
3683 	 * run it outside the loop.
3684 	 */
3685 	xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3686 
3687 	/* All the others need rx_buf/tx_buf also set */
3688 	for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3689 		/* Update rx_buf, tx_buf and DMA */
3690 		if (xfers[i].rx_buf)
3691 			xfers[i].rx_buf += offset;
3692 		if (xfers[i].tx_buf)
3693 			xfers[i].tx_buf += offset;
3694 
3695 		/* Update length */
3696 		xfers[i].len = min(maxsize, xfers[i].len - offset);
3697 	}
3698 
3699 	/*
3700 	 * We set up xferp to the last entry we have inserted,
3701 	 * so that we skip those already split transfers.
3702 	 */
3703 	*xferp = &xfers[count - 1];
3704 
3705 	/* Increment statistics counters */
3706 	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3707 				       transfers_split_maxsize);
3708 	SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3709 				       transfers_split_maxsize);
3710 
3711 	return 0;
3712 }
3713 
3714 /**
3715  * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3716  *                               when an individual transfer exceeds a
3717  *                               certain size
3718  * @ctlr:    the @spi_controller for this transfer
3719  * @msg:   the @spi_message to transform
3720  * @maxsize:  the maximum when to apply this
3721  *
3722  * This function allocates resources that are automatically freed during the
3723  * spi message unoptimize phase so this function should only be called from
3724  * optimize_message callbacks.
3725  *
3726  * Return: status of transformation
3727  */
3728 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3729 				struct spi_message *msg,
3730 				size_t maxsize)
3731 {
3732 	struct spi_transfer *xfer;
3733 	int ret;
3734 
3735 	/*
3736 	 * Iterate over the transfer_list,
3737 	 * but note that xfer is advanced to the last transfer inserted
3738 	 * to avoid checking sizes again unnecessarily (also xfer does
3739 	 * potentially belong to a different list by the time the
3740 	 * replacement has happened).
3741 	 */
3742 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3743 		if (xfer->len > maxsize) {
3744 			ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3745 							   maxsize);
3746 			if (ret)
3747 				return ret;
3748 		}
3749 	}
3750 
3751 	return 0;
3752 }
3753 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3754 
3755 
3756 /**
3757  * spi_split_transfers_maxwords - split SPI transfers into multiple transfers
3758  *                                when an individual transfer exceeds a
3759  *                                certain number of SPI words
3760  * @ctlr:     the @spi_controller for this transfer
3761  * @msg:      the @spi_message to transform
3762  * @maxwords: the number of words to limit each transfer to
3763  *
3764  * This function allocates resources that are automatically freed during the
3765  * spi message unoptimize phase so this function should only be called from
3766  * optimize_message callbacks.
3767  *
3768  * Return: status of transformation
3769  */
3770 int spi_split_transfers_maxwords(struct spi_controller *ctlr,
3771 				 struct spi_message *msg,
3772 				 size_t maxwords)
3773 {
3774 	struct spi_transfer *xfer;
3775 
3776 	/*
3777 	 * Iterate over the transfer_list,
3778 	 * but note that xfer is advanced to the last transfer inserted
3779 	 * to avoid checking sizes again unnecessarily (also xfer does
3780 	 * potentially belong to a different list by the time the
3781 	 * replacement has happened).
3782 	 */
3783 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3784 		size_t maxsize;
3785 		int ret;
3786 
3787 		maxsize = maxwords * spi_bpw_to_bytes(xfer->bits_per_word);
3788 		if (xfer->len > maxsize) {
3789 			ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3790 							   maxsize);
3791 			if (ret)
3792 				return ret;
3793 		}
3794 	}
3795 
3796 	return 0;
3797 }
3798 EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords);
3799 
3800 /*-------------------------------------------------------------------------*/
3801 
3802 /*
3803  * Core methods for SPI controller protocol drivers. Some of the
3804  * other core methods are currently defined as inline functions.
3805  */
3806 
3807 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3808 					u8 bits_per_word)
3809 {
3810 	if (ctlr->bits_per_word_mask) {
3811 		/* Only 32 bits fit in the mask */
3812 		if (bits_per_word > 32)
3813 			return -EINVAL;
3814 		if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3815 			return -EINVAL;
3816 	}
3817 
3818 	return 0;
3819 }
3820 
3821 /**
3822  * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3823  * @spi: the device that requires specific CS timing configuration
3824  *
3825  * Return: zero on success, else a negative error code.
3826  */
3827 static int spi_set_cs_timing(struct spi_device *spi)
3828 {
3829 	struct device *parent = spi->controller->dev.parent;
3830 	int status = 0;
3831 
3832 	if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
3833 		if (spi->controller->auto_runtime_pm) {
3834 			status = pm_runtime_get_sync(parent);
3835 			if (status < 0) {
3836 				pm_runtime_put_noidle(parent);
3837 				dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3838 					status);
3839 				return status;
3840 			}
3841 
3842 			status = spi->controller->set_cs_timing(spi);
3843 			pm_runtime_put_autosuspend(parent);
3844 		} else {
3845 			status = spi->controller->set_cs_timing(spi);
3846 		}
3847 	}
3848 	return status;
3849 }
3850 
3851 /**
3852  * spi_setup - setup SPI mode and clock rate
3853  * @spi: the device whose settings are being modified
3854  * Context: can sleep, and no requests are queued to the device
3855  *
3856  * SPI protocol drivers may need to update the transfer mode if the
3857  * device doesn't work with its default.  They may likewise need
3858  * to update clock rates or word sizes from initial values.  This function
3859  * changes those settings, and must be called from a context that can sleep.
3860  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3861  * effect the next time the device is selected and data is transferred to
3862  * or from it.  When this function returns, the SPI device is deselected.
3863  *
3864  * Note that this call will fail if the protocol driver specifies an option
3865  * that the underlying controller or its driver does not support.  For
3866  * example, not all hardware supports wire transfers using nine bit words,
3867  * LSB-first wire encoding, or active-high chipselects.
3868  *
3869  * Return: zero on success, else a negative error code.
3870  */
3871 int spi_setup(struct spi_device *spi)
3872 {
3873 	unsigned	bad_bits, ugly_bits;
3874 	int		status;
3875 
3876 	/*
3877 	 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3878 	 * are set at the same time.
3879 	 */
3880 	if ((hweight_long(spi->mode &
3881 		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3882 	    (hweight_long(spi->mode &
3883 		(SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3884 		dev_err(&spi->dev,
3885 		"setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3886 		return -EINVAL;
3887 	}
3888 	/* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
3889 	if ((spi->mode & SPI_3WIRE) && (spi->mode &
3890 		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3891 		 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3892 		return -EINVAL;
3893 	/* Check against conflicting MOSI idle configuration */
3894 	if ((spi->mode & SPI_MOSI_IDLE_LOW) && (spi->mode & SPI_MOSI_IDLE_HIGH)) {
3895 		dev_err(&spi->dev,
3896 			"setup: MOSI configured to idle low and high at the same time.\n");
3897 		return -EINVAL;
3898 	}
3899 	/*
3900 	 * Help drivers fail *cleanly* when they need options
3901 	 * that aren't supported with their current controller.
3902 	 * SPI_CS_WORD has a fallback software implementation,
3903 	 * so it is ignored here.
3904 	 */
3905 	bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3906 				 SPI_NO_TX | SPI_NO_RX);
3907 	ugly_bits = bad_bits &
3908 		    (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3909 		     SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3910 	if (ugly_bits) {
3911 		dev_warn(&spi->dev,
3912 			 "setup: ignoring unsupported mode bits %x\n",
3913 			 ugly_bits);
3914 		spi->mode &= ~ugly_bits;
3915 		bad_bits &= ~ugly_bits;
3916 	}
3917 	if (bad_bits) {
3918 		dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3919 			bad_bits);
3920 		return -EINVAL;
3921 	}
3922 
3923 	if (!spi->bits_per_word) {
3924 		spi->bits_per_word = 8;
3925 	} else {
3926 		/*
3927 		 * Some controllers may not support the default 8 bits-per-word
3928 		 * so only perform the check when this is explicitly provided.
3929 		 */
3930 		status = __spi_validate_bits_per_word(spi->controller,
3931 						      spi->bits_per_word);
3932 		if (status)
3933 			return status;
3934 	}
3935 
3936 	if (spi->controller->max_speed_hz &&
3937 	    (!spi->max_speed_hz ||
3938 	     spi->max_speed_hz > spi->controller->max_speed_hz))
3939 		spi->max_speed_hz = spi->controller->max_speed_hz;
3940 
3941 	mutex_lock(&spi->controller->io_mutex);
3942 
3943 	if (spi->controller->setup) {
3944 		status = spi->controller->setup(spi);
3945 		if (status) {
3946 			mutex_unlock(&spi->controller->io_mutex);
3947 			dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3948 				status);
3949 			return status;
3950 		}
3951 	}
3952 
3953 	status = spi_set_cs_timing(spi);
3954 	if (status) {
3955 		mutex_unlock(&spi->controller->io_mutex);
3956 		return status;
3957 	}
3958 
3959 	if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3960 		status = pm_runtime_resume_and_get(spi->controller->dev.parent);
3961 		if (status < 0) {
3962 			mutex_unlock(&spi->controller->io_mutex);
3963 			dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3964 				status);
3965 			return status;
3966 		}
3967 
3968 		/*
3969 		 * We do not want to return positive value from pm_runtime_get,
3970 		 * there are many instances of devices calling spi_setup() and
3971 		 * checking for a non-zero return value instead of a negative
3972 		 * return value.
3973 		 */
3974 		status = 0;
3975 
3976 		spi_set_cs(spi, false, true);
3977 		pm_runtime_put_autosuspend(spi->controller->dev.parent);
3978 	} else {
3979 		spi_set_cs(spi, false, true);
3980 	}
3981 
3982 	mutex_unlock(&spi->controller->io_mutex);
3983 
3984 	if (spi->rt && !spi->controller->rt) {
3985 		spi->controller->rt = true;
3986 		spi_set_thread_rt(spi->controller);
3987 	}
3988 
3989 	trace_spi_setup(spi, status);
3990 
3991 	dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
3992 			spi->mode & SPI_MODE_X_MASK,
3993 			(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
3994 			(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
3995 			(spi->mode & SPI_3WIRE) ? "3wire, " : "",
3996 			(spi->mode & SPI_LOOP) ? "loopback, " : "",
3997 			spi->bits_per_word, spi->max_speed_hz,
3998 			status);
3999 
4000 	return status;
4001 }
4002 EXPORT_SYMBOL_GPL(spi_setup);
4003 
4004 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
4005 				       struct spi_device *spi)
4006 {
4007 	int delay1, delay2;
4008 
4009 	delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
4010 	if (delay1 < 0)
4011 		return delay1;
4012 
4013 	delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
4014 	if (delay2 < 0)
4015 		return delay2;
4016 
4017 	if (delay1 < delay2)
4018 		memcpy(&xfer->word_delay, &spi->word_delay,
4019 		       sizeof(xfer->word_delay));
4020 
4021 	return 0;
4022 }
4023 
4024 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
4025 {
4026 	struct spi_controller *ctlr = spi->controller;
4027 	struct spi_transfer *xfer;
4028 	int w_size;
4029 
4030 	if (list_empty(&message->transfers))
4031 		return -EINVAL;
4032 
4033 	message->spi = spi;
4034 
4035 	/*
4036 	 * Half-duplex links include original MicroWire, and ones with
4037 	 * only one data pin like SPI_3WIRE (switches direction) or where
4038 	 * either MOSI or MISO is missing.  They can also be caused by
4039 	 * software limitations.
4040 	 */
4041 	if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
4042 	    (spi->mode & SPI_3WIRE)) {
4043 		unsigned flags = ctlr->flags;
4044 
4045 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
4046 			if (xfer->rx_buf && xfer->tx_buf)
4047 				return -EINVAL;
4048 			if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
4049 				return -EINVAL;
4050 			if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
4051 				return -EINVAL;
4052 		}
4053 	}
4054 
4055 	/*
4056 	 * Set transfer bits_per_word and max speed as spi device default if
4057 	 * it is not set for this transfer.
4058 	 * Set transfer tx_nbits and rx_nbits as single transfer default
4059 	 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
4060 	 * Ensure transfer word_delay is at least as long as that required by
4061 	 * device itself.
4062 	 */
4063 	message->frame_length = 0;
4064 	list_for_each_entry(xfer, &message->transfers, transfer_list) {
4065 		xfer->effective_speed_hz = 0;
4066 		message->frame_length += xfer->len;
4067 		if (!xfer->bits_per_word)
4068 			xfer->bits_per_word = spi->bits_per_word;
4069 
4070 		if (!xfer->speed_hz)
4071 			xfer->speed_hz = spi->max_speed_hz;
4072 
4073 		if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
4074 			xfer->speed_hz = ctlr->max_speed_hz;
4075 
4076 		if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
4077 			return -EINVAL;
4078 
4079 		/* DDR mode is supported only if controller has dtr_caps=true.
4080 		 * default considered as SDR mode for SPI and QSPI controller.
4081 		 * Note: This is applicable only to QSPI controller.
4082 		 */
4083 		if (xfer->dtr_mode && !ctlr->dtr_caps)
4084 			return -EINVAL;
4085 
4086 		/*
4087 		 * SPI transfer length should be multiple of SPI word size
4088 		 * where SPI word size should be power-of-two multiple.
4089 		 */
4090 		if (xfer->bits_per_word <= 8)
4091 			w_size = 1;
4092 		else if (xfer->bits_per_word <= 16)
4093 			w_size = 2;
4094 		else
4095 			w_size = 4;
4096 
4097 		/* No partial transfers accepted */
4098 		if (xfer->len % w_size)
4099 			return -EINVAL;
4100 
4101 		if (xfer->speed_hz && ctlr->min_speed_hz &&
4102 		    xfer->speed_hz < ctlr->min_speed_hz)
4103 			return -EINVAL;
4104 
4105 		if (xfer->tx_buf && !xfer->tx_nbits)
4106 			xfer->tx_nbits = SPI_NBITS_SINGLE;
4107 		if (xfer->rx_buf && !xfer->rx_nbits)
4108 			xfer->rx_nbits = SPI_NBITS_SINGLE;
4109 		/*
4110 		 * Check transfer tx/rx_nbits:
4111 		 * 1. check the value matches one of single, dual and quad
4112 		 * 2. check tx/rx_nbits match the mode in spi_device
4113 		 */
4114 		if (xfer->tx_buf) {
4115 			if (spi->mode & SPI_NO_TX)
4116 				return -EINVAL;
4117 			if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
4118 				xfer->tx_nbits != SPI_NBITS_DUAL &&
4119 				xfer->tx_nbits != SPI_NBITS_QUAD &&
4120 				xfer->tx_nbits != SPI_NBITS_OCTAL)
4121 				return -EINVAL;
4122 			if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
4123 				!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL)))
4124 				return -EINVAL;
4125 			if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
4126 				!(spi->mode & (SPI_TX_QUAD | SPI_TX_OCTAL)))
4127 				return -EINVAL;
4128 			if ((xfer->tx_nbits == SPI_NBITS_OCTAL) &&
4129 				!(spi->mode & SPI_TX_OCTAL))
4130 				return -EINVAL;
4131 		}
4132 		/* Check transfer rx_nbits */
4133 		if (xfer->rx_buf) {
4134 			if (spi->mode & SPI_NO_RX)
4135 				return -EINVAL;
4136 			if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
4137 				xfer->rx_nbits != SPI_NBITS_DUAL &&
4138 				xfer->rx_nbits != SPI_NBITS_QUAD &&
4139 				xfer->rx_nbits != SPI_NBITS_OCTAL)
4140 				return -EINVAL;
4141 			if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
4142 				!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
4143 				return -EINVAL;
4144 			if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
4145 				!(spi->mode & (SPI_RX_QUAD | SPI_RX_OCTAL)))
4146 				return -EINVAL;
4147 			if ((xfer->rx_nbits == SPI_NBITS_OCTAL) &&
4148 				!(spi->mode & SPI_RX_OCTAL))
4149 				return -EINVAL;
4150 		}
4151 
4152 		if (_spi_xfer_word_delay_update(xfer, spi))
4153 			return -EINVAL;
4154 
4155 		/* Make sure controller supports required offload features. */
4156 		if (xfer->offload_flags) {
4157 			if (!message->offload)
4158 				return -EINVAL;
4159 
4160 			if (xfer->offload_flags & ~message->offload->xfer_flags)
4161 				return -EINVAL;
4162 		}
4163 	}
4164 
4165 	message->status = -EINPROGRESS;
4166 
4167 	return 0;
4168 }
4169 
4170 /*
4171  * spi_split_transfers - generic handling of transfer splitting
4172  * @msg: the message to split
4173  *
4174  * Under certain conditions, a SPI controller may not support arbitrary
4175  * transfer sizes or other features required by a peripheral. This function
4176  * will split the transfers in the message into smaller transfers that are
4177  * supported by the controller.
4178  *
4179  * Controllers with special requirements not covered here can also split
4180  * transfers in the optimize_message() callback.
4181  *
4182  * Context: can sleep
4183  * Return: zero on success, else a negative error code
4184  */
4185 static int spi_split_transfers(struct spi_message *msg)
4186 {
4187 	struct spi_controller *ctlr = msg->spi->controller;
4188 	struct spi_transfer *xfer;
4189 	int ret;
4190 
4191 	/*
4192 	 * If an SPI controller does not support toggling the CS line on each
4193 	 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
4194 	 * for the CS line, we can emulate the CS-per-word hardware function by
4195 	 * splitting transfers into one-word transfers and ensuring that
4196 	 * cs_change is set for each transfer.
4197 	 */
4198 	if ((msg->spi->mode & SPI_CS_WORD) &&
4199 	    (!(ctlr->mode_bits & SPI_CS_WORD) || spi_is_csgpiod(msg->spi))) {
4200 		ret = spi_split_transfers_maxwords(ctlr, msg, 1);
4201 		if (ret)
4202 			return ret;
4203 
4204 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
4205 			/* Don't change cs_change on the last entry in the list */
4206 			if (list_is_last(&xfer->transfer_list, &msg->transfers))
4207 				break;
4208 
4209 			xfer->cs_change = 1;
4210 		}
4211 	} else {
4212 		ret = spi_split_transfers_maxsize(ctlr, msg,
4213 						  spi_max_transfer_size(msg->spi));
4214 		if (ret)
4215 			return ret;
4216 	}
4217 
4218 	return 0;
4219 }
4220 
4221 /*
4222  * __spi_optimize_message - shared implementation for spi_optimize_message()
4223  *                          and spi_maybe_optimize_message()
4224  * @spi: the device that will be used for the message
4225  * @msg: the message to optimize
4226  *
4227  * Peripheral drivers will call spi_optimize_message() and the spi core will
4228  * call spi_maybe_optimize_message() instead of calling this directly.
4229  *
4230  * It is not valid to call this on a message that has already been optimized.
4231  *
4232  * Return: zero on success, else a negative error code
4233  */
4234 static int __spi_optimize_message(struct spi_device *spi,
4235 				  struct spi_message *msg)
4236 {
4237 	struct spi_controller *ctlr = spi->controller;
4238 	int ret;
4239 
4240 	ret = __spi_validate(spi, msg);
4241 	if (ret)
4242 		return ret;
4243 
4244 	ret = spi_split_transfers(msg);
4245 	if (ret)
4246 		return ret;
4247 
4248 	if (ctlr->optimize_message) {
4249 		ret = ctlr->optimize_message(msg);
4250 		if (ret) {
4251 			spi_res_release(ctlr, msg);
4252 			return ret;
4253 		}
4254 	}
4255 
4256 	msg->optimized = true;
4257 
4258 	return 0;
4259 }
4260 
4261 /*
4262  * spi_maybe_optimize_message - optimize message if it isn't already pre-optimized
4263  * @spi: the device that will be used for the message
4264  * @msg: the message to optimize
4265  * Return: zero on success, else a negative error code
4266  */
4267 static int spi_maybe_optimize_message(struct spi_device *spi,
4268 				      struct spi_message *msg)
4269 {
4270 	if (spi->controller->defer_optimize_message) {
4271 		msg->spi = spi;
4272 		return 0;
4273 	}
4274 
4275 	if (msg->pre_optimized)
4276 		return 0;
4277 
4278 	return __spi_optimize_message(spi, msg);
4279 }
4280 
4281 /**
4282  * spi_optimize_message - do any one-time validation and setup for a SPI message
4283  * @spi: the device that will be used for the message
4284  * @msg: the message to optimize
4285  *
4286  * Peripheral drivers that reuse the same message repeatedly may call this to
4287  * perform as much message prep as possible once, rather than repeating it each
4288  * time a message transfer is performed to improve throughput and reduce CPU
4289  * usage.
4290  *
4291  * Once a message has been optimized, it cannot be modified with the exception
4292  * of updating the contents of any xfer->tx_buf (the pointer can't be changed,
4293  * only the data in the memory it points to).
4294  *
4295  * Calls to this function must be balanced with calls to spi_unoptimize_message()
4296  * to avoid leaking resources.
4297  *
4298  * Context: can sleep
4299  * Return: zero on success, else a negative error code
4300  */
4301 int spi_optimize_message(struct spi_device *spi, struct spi_message *msg)
4302 {
4303 	int ret;
4304 
4305 	/*
4306 	 * Pre-optimization is not supported and optimization is deferred e.g.
4307 	 * when using spi-mux.
4308 	 */
4309 	if (spi->controller->defer_optimize_message)
4310 		return 0;
4311 
4312 	ret = __spi_optimize_message(spi, msg);
4313 	if (ret)
4314 		return ret;
4315 
4316 	/*
4317 	 * This flag indicates that the peripheral driver called spi_optimize_message()
4318 	 * and therefore we shouldn't unoptimize message automatically when finalizing
4319 	 * the message but rather wait until spi_unoptimize_message() is called
4320 	 * by the peripheral driver.
4321 	 */
4322 	msg->pre_optimized = true;
4323 
4324 	return 0;
4325 }
4326 EXPORT_SYMBOL_GPL(spi_optimize_message);
4327 
4328 /**
4329  * spi_unoptimize_message - releases any resources allocated by spi_optimize_message()
4330  * @msg: the message to unoptimize
4331  *
4332  * Calls to this function must be balanced with calls to spi_optimize_message().
4333  *
4334  * Context: can sleep
4335  */
4336 void spi_unoptimize_message(struct spi_message *msg)
4337 {
4338 	if (msg->spi->controller->defer_optimize_message)
4339 		return;
4340 
4341 	__spi_unoptimize_message(msg);
4342 	msg->pre_optimized = false;
4343 }
4344 EXPORT_SYMBOL_GPL(spi_unoptimize_message);
4345 
4346 static int __spi_async(struct spi_device *spi, struct spi_message *message)
4347 {
4348 	struct spi_controller *ctlr = spi->controller;
4349 	struct spi_transfer *xfer;
4350 
4351 	/*
4352 	 * Some controllers do not support doing regular SPI transfers. Return
4353 	 * ENOTSUPP when this is the case.
4354 	 */
4355 	if (!ctlr->transfer)
4356 		return -ENOTSUPP;
4357 
4358 	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
4359 	SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
4360 
4361 	trace_spi_message_submit(message);
4362 
4363 	if (!ctlr->ptp_sts_supported) {
4364 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
4365 			xfer->ptp_sts_word_pre = 0;
4366 			ptp_read_system_prets(xfer->ptp_sts);
4367 		}
4368 	}
4369 
4370 	return ctlr->transfer(spi, message);
4371 }
4372 
4373 static void devm_spi_unoptimize_message(void *msg)
4374 {
4375 	spi_unoptimize_message(msg);
4376 }
4377 
4378 /**
4379  * devm_spi_optimize_message - managed version of spi_optimize_message()
4380  * @dev: the device that manages @msg (usually @spi->dev)
4381  * @spi: the device that will be used for the message
4382  * @msg: the message to optimize
4383  * Return: zero on success, else a negative error code
4384  *
4385  * spi_unoptimize_message() will automatically be called when the device is
4386  * removed.
4387  */
4388 int devm_spi_optimize_message(struct device *dev, struct spi_device *spi,
4389 			      struct spi_message *msg)
4390 {
4391 	int ret;
4392 
4393 	ret = spi_optimize_message(spi, msg);
4394 	if (ret)
4395 		return ret;
4396 
4397 	return devm_add_action_or_reset(dev, devm_spi_unoptimize_message, msg);
4398 }
4399 EXPORT_SYMBOL_GPL(devm_spi_optimize_message);
4400 
4401 /**
4402  * spi_async - asynchronous SPI transfer
4403  * @spi: device with which data will be exchanged
4404  * @message: describes the data transfers, including completion callback
4405  * Context: any (IRQs may be blocked, etc)
4406  *
4407  * This call may be used in_irq and other contexts which can't sleep,
4408  * as well as from task contexts which can sleep.
4409  *
4410  * The completion callback is invoked in a context which can't sleep.
4411  * Before that invocation, the value of message->status is undefined.
4412  * When the callback is issued, message->status holds either zero (to
4413  * indicate complete success) or a negative error code.  After that
4414  * callback returns, the driver which issued the transfer request may
4415  * deallocate the associated memory; it's no longer in use by any SPI
4416  * core or controller driver code.
4417  *
4418  * Note that although all messages to a spi_device are handled in
4419  * FIFO order, messages may go to different devices in other orders.
4420  * Some device might be higher priority, or have various "hard" access
4421  * time requirements, for example.
4422  *
4423  * On detection of any fault during the transfer, processing of
4424  * the entire message is aborted, and the device is deselected.
4425  * Until returning from the associated message completion callback,
4426  * no other spi_message queued to that device will be processed.
4427  * (This rule applies equally to all the synchronous transfer calls,
4428  * which are wrappers around this core asynchronous primitive.)
4429  *
4430  * Return: zero on success, else a negative error code.
4431  */
4432 int spi_async(struct spi_device *spi, struct spi_message *message)
4433 {
4434 	struct spi_controller *ctlr = spi->controller;
4435 	int ret;
4436 	unsigned long flags;
4437 
4438 	ret = spi_maybe_optimize_message(spi, message);
4439 	if (ret)
4440 		return ret;
4441 
4442 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4443 
4444 	if (ctlr->bus_lock_flag)
4445 		ret = -EBUSY;
4446 	else
4447 		ret = __spi_async(spi, message);
4448 
4449 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4450 
4451 	return ret;
4452 }
4453 EXPORT_SYMBOL_GPL(spi_async);
4454 
4455 static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
4456 {
4457 	bool was_busy;
4458 	int ret;
4459 
4460 	mutex_lock(&ctlr->io_mutex);
4461 
4462 	was_busy = ctlr->busy;
4463 
4464 	ctlr->cur_msg = msg;
4465 	ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
4466 	if (ret)
4467 		dev_err(&ctlr->dev, "noqueue transfer failed\n");
4468 	ctlr->cur_msg = NULL;
4469 	ctlr->fallback = false;
4470 
4471 	if (!was_busy) {
4472 		kfree(ctlr->dummy_rx);
4473 		ctlr->dummy_rx = NULL;
4474 		kfree(ctlr->dummy_tx);
4475 		ctlr->dummy_tx = NULL;
4476 		if (ctlr->unprepare_transfer_hardware &&
4477 		    ctlr->unprepare_transfer_hardware(ctlr))
4478 			dev_err(&ctlr->dev,
4479 				"failed to unprepare transfer hardware\n");
4480 		spi_idle_runtime_pm(ctlr);
4481 	}
4482 
4483 	mutex_unlock(&ctlr->io_mutex);
4484 }
4485 
4486 /*-------------------------------------------------------------------------*/
4487 
4488 /*
4489  * Utility methods for SPI protocol drivers, layered on
4490  * top of the core.  Some other utility methods are defined as
4491  * inline functions.
4492  */
4493 
4494 static void spi_complete(void *arg)
4495 {
4496 	complete(arg);
4497 }
4498 
4499 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
4500 {
4501 	DECLARE_COMPLETION_ONSTACK(done);
4502 	unsigned long flags;
4503 	int status;
4504 	struct spi_controller *ctlr = spi->controller;
4505 
4506 	if (__spi_check_suspended(ctlr)) {
4507 		dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
4508 		return -ESHUTDOWN;
4509 	}
4510 
4511 	status = spi_maybe_optimize_message(spi, message);
4512 	if (status)
4513 		return status;
4514 
4515 	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4516 	SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4517 
4518 	/*
4519 	 * Checking queue_empty here only guarantees async/sync message
4520 	 * ordering when coming from the same context. It does not need to
4521 	 * guard against reentrancy from a different context. The io_mutex
4522 	 * will catch those cases.
4523 	 */
4524 	if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4525 		message->actual_length = 0;
4526 		message->status = -EINPROGRESS;
4527 
4528 		trace_spi_message_submit(message);
4529 
4530 		SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4531 		SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4532 
4533 		__spi_transfer_message_noqueue(ctlr, message);
4534 
4535 		return message->status;
4536 	}
4537 
4538 	/*
4539 	 * There are messages in the async queue that could have originated
4540 	 * from the same context, so we need to preserve ordering.
4541 	 * Therefor we send the message to the async queue and wait until they
4542 	 * are completed.
4543 	 */
4544 	message->complete = spi_complete;
4545 	message->context = &done;
4546 
4547 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4548 	status = __spi_async(spi, message);
4549 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4550 
4551 	if (status == 0) {
4552 		wait_for_completion(&done);
4553 		status = message->status;
4554 	}
4555 	message->complete = NULL;
4556 	message->context = NULL;
4557 
4558 	return status;
4559 }
4560 
4561 /**
4562  * spi_sync - blocking/synchronous SPI data transfers
4563  * @spi: device with which data will be exchanged
4564  * @message: describes the data transfers
4565  * Context: can sleep
4566  *
4567  * This call may only be used from a context that may sleep.  The sleep
4568  * is non-interruptible, and has no timeout.  Low-overhead controller
4569  * drivers may DMA directly into and out of the message buffers.
4570  *
4571  * Note that the SPI device's chip select is active during the message,
4572  * and then is normally disabled between messages.  Drivers for some
4573  * frequently-used devices may want to minimize costs of selecting a chip,
4574  * by leaving it selected in anticipation that the next message will go
4575  * to the same chip.  (That may increase power usage.)
4576  *
4577  * Also, the caller is guaranteeing that the memory associated with the
4578  * message will not be freed before this call returns.
4579  *
4580  * Return: zero on success, else a negative error code.
4581  */
4582 int spi_sync(struct spi_device *spi, struct spi_message *message)
4583 {
4584 	int ret;
4585 
4586 	mutex_lock(&spi->controller->bus_lock_mutex);
4587 	ret = __spi_sync(spi, message);
4588 	mutex_unlock(&spi->controller->bus_lock_mutex);
4589 
4590 	return ret;
4591 }
4592 EXPORT_SYMBOL_GPL(spi_sync);
4593 
4594 /**
4595  * spi_sync_locked - version of spi_sync with exclusive bus usage
4596  * @spi: device with which data will be exchanged
4597  * @message: describes the data transfers
4598  * Context: can sleep
4599  *
4600  * This call may only be used from a context that may sleep.  The sleep
4601  * is non-interruptible, and has no timeout.  Low-overhead controller
4602  * drivers may DMA directly into and out of the message buffers.
4603  *
4604  * This call should be used by drivers that require exclusive access to the
4605  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4606  * be released by a spi_bus_unlock call when the exclusive access is over.
4607  *
4608  * Return: zero on success, else a negative error code.
4609  */
4610 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4611 {
4612 	return __spi_sync(spi, message);
4613 }
4614 EXPORT_SYMBOL_GPL(spi_sync_locked);
4615 
4616 /**
4617  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4618  * @ctlr: SPI bus controller that should be locked for exclusive bus access
4619  * Context: can sleep
4620  *
4621  * This call may only be used from a context that may sleep.  The sleep
4622  * is non-interruptible, and has no timeout.
4623  *
4624  * This call should be used by drivers that require exclusive access to the
4625  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4626  * exclusive access is over. Data transfer must be done by spi_sync_locked
4627  * and spi_async_locked calls when the SPI bus lock is held.
4628  *
4629  * Return: always zero.
4630  */
4631 int spi_bus_lock(struct spi_controller *ctlr)
4632 {
4633 	unsigned long flags;
4634 
4635 	mutex_lock(&ctlr->bus_lock_mutex);
4636 
4637 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4638 	ctlr->bus_lock_flag = 1;
4639 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4640 
4641 	/* Mutex remains locked until spi_bus_unlock() is called */
4642 
4643 	return 0;
4644 }
4645 EXPORT_SYMBOL_GPL(spi_bus_lock);
4646 
4647 /**
4648  * spi_bus_unlock - release the lock for exclusive SPI bus usage
4649  * @ctlr: SPI bus controller that was locked for exclusive bus access
4650  * Context: can sleep
4651  *
4652  * This call may only be used from a context that may sleep.  The sleep
4653  * is non-interruptible, and has no timeout.
4654  *
4655  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4656  * call.
4657  *
4658  * Return: always zero.
4659  */
4660 int spi_bus_unlock(struct spi_controller *ctlr)
4661 {
4662 	ctlr->bus_lock_flag = 0;
4663 
4664 	mutex_unlock(&ctlr->bus_lock_mutex);
4665 
4666 	return 0;
4667 }
4668 EXPORT_SYMBOL_GPL(spi_bus_unlock);
4669 
4670 /* Portable code must never pass more than 32 bytes */
4671 #define	SPI_BUFSIZ	max(32, SMP_CACHE_BYTES)
4672 
4673 static u8	*buf;
4674 
4675 /**
4676  * spi_write_then_read - SPI synchronous write followed by read
4677  * @spi: device with which data will be exchanged
4678  * @txbuf: data to be written (need not be DMA-safe)
4679  * @n_tx: size of txbuf, in bytes
4680  * @rxbuf: buffer into which data will be read (need not be DMA-safe)
4681  * @n_rx: size of rxbuf, in bytes
4682  * Context: can sleep
4683  *
4684  * This performs a half duplex MicroWire style transaction with the
4685  * device, sending txbuf and then reading rxbuf.  The return value
4686  * is zero for success, else a negative errno status code.
4687  * This call may only be used from a context that may sleep.
4688  *
4689  * Parameters to this routine are always copied using a small buffer.
4690  * Performance-sensitive or bulk transfer code should instead use
4691  * spi_{async,sync}() calls with DMA-safe buffers.
4692  *
4693  * Return: zero on success, else a negative error code.
4694  */
4695 int spi_write_then_read(struct spi_device *spi,
4696 		const void *txbuf, unsigned n_tx,
4697 		void *rxbuf, unsigned n_rx)
4698 {
4699 	static DEFINE_MUTEX(lock);
4700 
4701 	int			status;
4702 	struct spi_message	message;
4703 	struct spi_transfer	x[2];
4704 	u8			*local_buf;
4705 
4706 	/*
4707 	 * Use preallocated DMA-safe buffer if we can. We can't avoid
4708 	 * copying here, (as a pure convenience thing), but we can
4709 	 * keep heap costs out of the hot path unless someone else is
4710 	 * using the pre-allocated buffer or the transfer is too large.
4711 	 */
4712 	if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4713 		local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4714 				    GFP_KERNEL | GFP_DMA);
4715 		if (!local_buf)
4716 			return -ENOMEM;
4717 	} else {
4718 		local_buf = buf;
4719 	}
4720 
4721 	spi_message_init(&message);
4722 	memset(x, 0, sizeof(x));
4723 	if (n_tx) {
4724 		x[0].len = n_tx;
4725 		spi_message_add_tail(&x[0], &message);
4726 	}
4727 	if (n_rx) {
4728 		x[1].len = n_rx;
4729 		spi_message_add_tail(&x[1], &message);
4730 	}
4731 
4732 	memcpy(local_buf, txbuf, n_tx);
4733 	x[0].tx_buf = local_buf;
4734 	x[1].rx_buf = local_buf + n_tx;
4735 
4736 	/* Do the I/O */
4737 	status = spi_sync(spi, &message);
4738 	if (status == 0)
4739 		memcpy(rxbuf, x[1].rx_buf, n_rx);
4740 
4741 	if (x[0].tx_buf == buf)
4742 		mutex_unlock(&lock);
4743 	else
4744 		kfree(local_buf);
4745 
4746 	return status;
4747 }
4748 EXPORT_SYMBOL_GPL(spi_write_then_read);
4749 
4750 /*-------------------------------------------------------------------------*/
4751 
4752 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
4753 /* Must call put_device() when done with returned spi_device device */
4754 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4755 {
4756 	struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4757 
4758 	return dev ? to_spi_device(dev) : NULL;
4759 }
4760 
4761 /* The spi controllers are not using spi_bus, so we find it with another way */
4762 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4763 {
4764 	struct device *dev;
4765 
4766 	dev = class_find_device_by_of_node(&spi_controller_class, node);
4767 	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4768 		dev = class_find_device_by_of_node(&spi_target_class, node);
4769 	if (!dev)
4770 		return NULL;
4771 
4772 	/* Reference got in class_find_device */
4773 	return container_of(dev, struct spi_controller, dev);
4774 }
4775 
4776 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4777 			 void *arg)
4778 {
4779 	struct of_reconfig_data *rd = arg;
4780 	struct spi_controller *ctlr;
4781 	struct spi_device *spi;
4782 
4783 	switch (of_reconfig_get_state_change(action, arg)) {
4784 	case OF_RECONFIG_CHANGE_ADD:
4785 		ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4786 		if (ctlr == NULL)
4787 			return NOTIFY_OK;	/* Not for us */
4788 
4789 		if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4790 			put_device(&ctlr->dev);
4791 			return NOTIFY_OK;
4792 		}
4793 
4794 		/*
4795 		 * Clear the flag before adding the device so that fw_devlink
4796 		 * doesn't skip adding consumers to this device.
4797 		 */
4798 		rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
4799 		spi = of_register_spi_device(ctlr, rd->dn);
4800 		put_device(&ctlr->dev);
4801 
4802 		if (IS_ERR(spi)) {
4803 			pr_err("%s: failed to create for '%pOF'\n",
4804 					__func__, rd->dn);
4805 			of_node_clear_flag(rd->dn, OF_POPULATED);
4806 			return notifier_from_errno(PTR_ERR(spi));
4807 		}
4808 		break;
4809 
4810 	case OF_RECONFIG_CHANGE_REMOVE:
4811 		/* Already depopulated? */
4812 		if (!of_node_check_flag(rd->dn, OF_POPULATED))
4813 			return NOTIFY_OK;
4814 
4815 		/* Find our device by node */
4816 		spi = of_find_spi_device_by_node(rd->dn);
4817 		if (spi == NULL)
4818 			return NOTIFY_OK;	/* No? not meant for us */
4819 
4820 		/* Unregister takes one ref away */
4821 		spi_unregister_device(spi);
4822 
4823 		/* And put the reference of the find */
4824 		put_device(&spi->dev);
4825 		break;
4826 	}
4827 
4828 	return NOTIFY_OK;
4829 }
4830 
4831 static struct notifier_block spi_of_notifier = {
4832 	.notifier_call = of_spi_notify,
4833 };
4834 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4835 extern struct notifier_block spi_of_notifier;
4836 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4837 
4838 #if IS_ENABLED(CONFIG_ACPI)
4839 static int spi_acpi_controller_match(struct device *dev, const void *data)
4840 {
4841 	return device_match_acpi_dev(dev->parent, data);
4842 }
4843 
4844 struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4845 {
4846 	struct device *dev;
4847 
4848 	dev = class_find_device(&spi_controller_class, NULL, adev,
4849 				spi_acpi_controller_match);
4850 	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4851 		dev = class_find_device(&spi_target_class, NULL, adev,
4852 					spi_acpi_controller_match);
4853 	if (!dev)
4854 		return NULL;
4855 
4856 	return container_of(dev, struct spi_controller, dev);
4857 }
4858 EXPORT_SYMBOL_GPL(acpi_spi_find_controller_by_adev);
4859 
4860 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4861 {
4862 	struct device *dev;
4863 
4864 	dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4865 	return to_spi_device(dev);
4866 }
4867 
4868 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4869 			   void *arg)
4870 {
4871 	struct acpi_device *adev = arg;
4872 	struct spi_controller *ctlr;
4873 	struct spi_device *spi;
4874 
4875 	switch (value) {
4876 	case ACPI_RECONFIG_DEVICE_ADD:
4877 		ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
4878 		if (!ctlr)
4879 			break;
4880 
4881 		acpi_register_spi_device(ctlr, adev);
4882 		put_device(&ctlr->dev);
4883 		break;
4884 	case ACPI_RECONFIG_DEVICE_REMOVE:
4885 		if (!acpi_device_enumerated(adev))
4886 			break;
4887 
4888 		spi = acpi_spi_find_device_by_adev(adev);
4889 		if (!spi)
4890 			break;
4891 
4892 		spi_unregister_device(spi);
4893 		put_device(&spi->dev);
4894 		break;
4895 	}
4896 
4897 	return NOTIFY_OK;
4898 }
4899 
4900 static struct notifier_block spi_acpi_notifier = {
4901 	.notifier_call = acpi_spi_notify,
4902 };
4903 #else
4904 extern struct notifier_block spi_acpi_notifier;
4905 #endif
4906 
4907 static int __init spi_init(void)
4908 {
4909 	int	status;
4910 
4911 	buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4912 	if (!buf) {
4913 		status = -ENOMEM;
4914 		goto err0;
4915 	}
4916 
4917 	status = bus_register(&spi_bus_type);
4918 	if (status < 0)
4919 		goto err1;
4920 
4921 	status = class_register(&spi_controller_class);
4922 	if (status < 0)
4923 		goto err2;
4924 
4925 	if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4926 		status = class_register(&spi_target_class);
4927 		if (status < 0)
4928 			goto err3;
4929 	}
4930 
4931 	if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4932 		WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4933 	if (IS_ENABLED(CONFIG_ACPI))
4934 		WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4935 
4936 	return 0;
4937 
4938 err3:
4939 	class_unregister(&spi_controller_class);
4940 err2:
4941 	bus_unregister(&spi_bus_type);
4942 err1:
4943 	kfree(buf);
4944 	buf = NULL;
4945 err0:
4946 	return status;
4947 }
4948 
4949 /*
4950  * A board_info is normally registered in arch_initcall(),
4951  * but even essential drivers wait till later.
4952  *
4953  * REVISIT only boardinfo really needs static linking. The rest (device and
4954  * driver registration) _could_ be dynamically linked (modular) ... Costs
4955  * include needing to have boardinfo data structures be much more public.
4956  */
4957 postcore_initcall(spi_init);
4958