xref: /linux/drivers/spi/spi.c (revision 1e0cc8d0a14271fbf6a50e680c8020458121a52b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
3 //
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
6 
7 #include <linux/acpi.h>
8 #include <linux/cache.h>
9 #include <linux/clk/clk-conf.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/dmaengine.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/export.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/highmem.h>
17 #include <linux/idr.h>
18 #include <linux/init.h>
19 #include <linux/ioport.h>
20 #include <linux/kernel.h>
21 #include <linux/kthread.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/mutex.h>
24 #include <linux/of_device.h>
25 #include <linux/of_irq.h>
26 #include <linux/percpu.h>
27 #include <linux/platform_data/x86/apple.h>
28 #include <linux/pm_domain.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/property.h>
31 #include <linux/ptp_clock_kernel.h>
32 #include <linux/sched/rt.h>
33 #include <linux/slab.h>
34 #include <linux/spi/spi.h>
35 #include <linux/spi/spi-mem.h>
36 #include <uapi/linux/sched/types.h>
37 
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/spi.h>
40 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
41 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
42 
43 #include "internals.h"
44 
45 static DEFINE_IDR(spi_master_idr);
46 
47 static void spidev_release(struct device *dev)
48 {
49 	struct spi_device	*spi = to_spi_device(dev);
50 
51 	spi_controller_put(spi->controller);
52 	kfree(spi->driver_override);
53 	free_percpu(spi->pcpu_statistics);
54 	kfree(spi);
55 }
56 
57 static ssize_t
58 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
59 {
60 	const struct spi_device	*spi = to_spi_device(dev);
61 	int len;
62 
63 	len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
64 	if (len != -ENODEV)
65 		return len;
66 
67 	return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
68 }
69 static DEVICE_ATTR_RO(modalias);
70 
71 static ssize_t driver_override_store(struct device *dev,
72 				     struct device_attribute *a,
73 				     const char *buf, size_t count)
74 {
75 	struct spi_device *spi = to_spi_device(dev);
76 	int ret;
77 
78 	ret = driver_set_override(dev, &spi->driver_override, buf, count);
79 	if (ret)
80 		return ret;
81 
82 	return count;
83 }
84 
85 static ssize_t driver_override_show(struct device *dev,
86 				    struct device_attribute *a, char *buf)
87 {
88 	const struct spi_device *spi = to_spi_device(dev);
89 	ssize_t len;
90 
91 	device_lock(dev);
92 	len = sysfs_emit(buf, "%s\n", spi->driver_override ? : "");
93 	device_unlock(dev);
94 	return len;
95 }
96 static DEVICE_ATTR_RW(driver_override);
97 
98 static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
99 {
100 	struct spi_statistics __percpu *pcpu_stats;
101 
102 	if (dev)
103 		pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
104 	else
105 		pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
106 
107 	if (pcpu_stats) {
108 		int cpu;
109 
110 		for_each_possible_cpu(cpu) {
111 			struct spi_statistics *stat;
112 
113 			stat = per_cpu_ptr(pcpu_stats, cpu);
114 			u64_stats_init(&stat->syncp);
115 		}
116 	}
117 	return pcpu_stats;
118 }
119 
120 static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat,
121 				   char *buf, size_t offset)
122 {
123 	u64 val = 0;
124 	int i;
125 
126 	for_each_possible_cpu(i) {
127 		const struct spi_statistics *pcpu_stats;
128 		u64_stats_t *field;
129 		unsigned int start;
130 		u64 inc;
131 
132 		pcpu_stats = per_cpu_ptr(stat, i);
133 		field = (void *)pcpu_stats + offset;
134 		do {
135 			start = u64_stats_fetch_begin(&pcpu_stats->syncp);
136 			inc = u64_stats_read(field);
137 		} while (u64_stats_fetch_retry(&pcpu_stats->syncp, start));
138 		val += inc;
139 	}
140 	return sysfs_emit(buf, "%llu\n", val);
141 }
142 
143 #define SPI_STATISTICS_ATTRS(field, file)				\
144 static ssize_t spi_controller_##field##_show(struct device *dev,	\
145 					     struct device_attribute *attr, \
146 					     char *buf)			\
147 {									\
148 	struct spi_controller *ctlr = container_of(dev,			\
149 					 struct spi_controller, dev);	\
150 	return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
151 }									\
152 static struct device_attribute dev_attr_spi_controller_##field = {	\
153 	.attr = { .name = file, .mode = 0444 },				\
154 	.show = spi_controller_##field##_show,				\
155 };									\
156 static ssize_t spi_device_##field##_show(struct device *dev,		\
157 					 struct device_attribute *attr,	\
158 					char *buf)			\
159 {									\
160 	struct spi_device *spi = to_spi_device(dev);			\
161 	return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
162 }									\
163 static struct device_attribute dev_attr_spi_device_##field = {		\
164 	.attr = { .name = file, .mode = 0444 },				\
165 	.show = spi_device_##field##_show,				\
166 }
167 
168 #define SPI_STATISTICS_SHOW_NAME(name, file, field)			\
169 static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
170 					    char *buf)			\
171 {									\
172 	return spi_emit_pcpu_stats(stat, buf,				\
173 			offsetof(struct spi_statistics, field));	\
174 }									\
175 SPI_STATISTICS_ATTRS(name, file)
176 
177 #define SPI_STATISTICS_SHOW(field)					\
178 	SPI_STATISTICS_SHOW_NAME(field, __stringify(field),		\
179 				 field)
180 
181 SPI_STATISTICS_SHOW(messages);
182 SPI_STATISTICS_SHOW(transfers);
183 SPI_STATISTICS_SHOW(errors);
184 SPI_STATISTICS_SHOW(timedout);
185 
186 SPI_STATISTICS_SHOW(spi_sync);
187 SPI_STATISTICS_SHOW(spi_sync_immediate);
188 SPI_STATISTICS_SHOW(spi_async);
189 
190 SPI_STATISTICS_SHOW(bytes);
191 SPI_STATISTICS_SHOW(bytes_rx);
192 SPI_STATISTICS_SHOW(bytes_tx);
193 
194 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)		\
195 	SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,		\
196 				 "transfer_bytes_histo_" number,	\
197 				 transfer_bytes_histo[index])
198 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
199 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
200 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
201 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
202 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
203 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
204 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
205 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
206 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
207 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
208 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
209 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
210 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
211 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
212 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
213 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
214 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
215 
216 SPI_STATISTICS_SHOW(transfers_split_maxsize);
217 
218 static struct attribute *spi_dev_attrs[] = {
219 	&dev_attr_modalias.attr,
220 	&dev_attr_driver_override.attr,
221 	NULL,
222 };
223 
224 static const struct attribute_group spi_dev_group = {
225 	.attrs  = spi_dev_attrs,
226 };
227 
228 static struct attribute *spi_device_statistics_attrs[] = {
229 	&dev_attr_spi_device_messages.attr,
230 	&dev_attr_spi_device_transfers.attr,
231 	&dev_attr_spi_device_errors.attr,
232 	&dev_attr_spi_device_timedout.attr,
233 	&dev_attr_spi_device_spi_sync.attr,
234 	&dev_attr_spi_device_spi_sync_immediate.attr,
235 	&dev_attr_spi_device_spi_async.attr,
236 	&dev_attr_spi_device_bytes.attr,
237 	&dev_attr_spi_device_bytes_rx.attr,
238 	&dev_attr_spi_device_bytes_tx.attr,
239 	&dev_attr_spi_device_transfer_bytes_histo0.attr,
240 	&dev_attr_spi_device_transfer_bytes_histo1.attr,
241 	&dev_attr_spi_device_transfer_bytes_histo2.attr,
242 	&dev_attr_spi_device_transfer_bytes_histo3.attr,
243 	&dev_attr_spi_device_transfer_bytes_histo4.attr,
244 	&dev_attr_spi_device_transfer_bytes_histo5.attr,
245 	&dev_attr_spi_device_transfer_bytes_histo6.attr,
246 	&dev_attr_spi_device_transfer_bytes_histo7.attr,
247 	&dev_attr_spi_device_transfer_bytes_histo8.attr,
248 	&dev_attr_spi_device_transfer_bytes_histo9.attr,
249 	&dev_attr_spi_device_transfer_bytes_histo10.attr,
250 	&dev_attr_spi_device_transfer_bytes_histo11.attr,
251 	&dev_attr_spi_device_transfer_bytes_histo12.attr,
252 	&dev_attr_spi_device_transfer_bytes_histo13.attr,
253 	&dev_attr_spi_device_transfer_bytes_histo14.attr,
254 	&dev_attr_spi_device_transfer_bytes_histo15.attr,
255 	&dev_attr_spi_device_transfer_bytes_histo16.attr,
256 	&dev_attr_spi_device_transfers_split_maxsize.attr,
257 	NULL,
258 };
259 
260 static const struct attribute_group spi_device_statistics_group = {
261 	.name  = "statistics",
262 	.attrs  = spi_device_statistics_attrs,
263 };
264 
265 static const struct attribute_group *spi_dev_groups[] = {
266 	&spi_dev_group,
267 	&spi_device_statistics_group,
268 	NULL,
269 };
270 
271 static struct attribute *spi_controller_statistics_attrs[] = {
272 	&dev_attr_spi_controller_messages.attr,
273 	&dev_attr_spi_controller_transfers.attr,
274 	&dev_attr_spi_controller_errors.attr,
275 	&dev_attr_spi_controller_timedout.attr,
276 	&dev_attr_spi_controller_spi_sync.attr,
277 	&dev_attr_spi_controller_spi_sync_immediate.attr,
278 	&dev_attr_spi_controller_spi_async.attr,
279 	&dev_attr_spi_controller_bytes.attr,
280 	&dev_attr_spi_controller_bytes_rx.attr,
281 	&dev_attr_spi_controller_bytes_tx.attr,
282 	&dev_attr_spi_controller_transfer_bytes_histo0.attr,
283 	&dev_attr_spi_controller_transfer_bytes_histo1.attr,
284 	&dev_attr_spi_controller_transfer_bytes_histo2.attr,
285 	&dev_attr_spi_controller_transfer_bytes_histo3.attr,
286 	&dev_attr_spi_controller_transfer_bytes_histo4.attr,
287 	&dev_attr_spi_controller_transfer_bytes_histo5.attr,
288 	&dev_attr_spi_controller_transfer_bytes_histo6.attr,
289 	&dev_attr_spi_controller_transfer_bytes_histo7.attr,
290 	&dev_attr_spi_controller_transfer_bytes_histo8.attr,
291 	&dev_attr_spi_controller_transfer_bytes_histo9.attr,
292 	&dev_attr_spi_controller_transfer_bytes_histo10.attr,
293 	&dev_attr_spi_controller_transfer_bytes_histo11.attr,
294 	&dev_attr_spi_controller_transfer_bytes_histo12.attr,
295 	&dev_attr_spi_controller_transfer_bytes_histo13.attr,
296 	&dev_attr_spi_controller_transfer_bytes_histo14.attr,
297 	&dev_attr_spi_controller_transfer_bytes_histo15.attr,
298 	&dev_attr_spi_controller_transfer_bytes_histo16.attr,
299 	&dev_attr_spi_controller_transfers_split_maxsize.attr,
300 	NULL,
301 };
302 
303 static const struct attribute_group spi_controller_statistics_group = {
304 	.name  = "statistics",
305 	.attrs  = spi_controller_statistics_attrs,
306 };
307 
308 static const struct attribute_group *spi_master_groups[] = {
309 	&spi_controller_statistics_group,
310 	NULL,
311 };
312 
313 static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
314 					      struct spi_transfer *xfer,
315 					      struct spi_message *msg)
316 {
317 	int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
318 	struct spi_statistics *stats;
319 
320 	if (l2len < 0)
321 		l2len = 0;
322 
323 	get_cpu();
324 	stats = this_cpu_ptr(pcpu_stats);
325 	u64_stats_update_begin(&stats->syncp);
326 
327 	u64_stats_inc(&stats->transfers);
328 	u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
329 
330 	u64_stats_add(&stats->bytes, xfer->len);
331 	if (spi_valid_txbuf(msg, xfer))
332 		u64_stats_add(&stats->bytes_tx, xfer->len);
333 	if (spi_valid_rxbuf(msg, xfer))
334 		u64_stats_add(&stats->bytes_rx, xfer->len);
335 
336 	u64_stats_update_end(&stats->syncp);
337 	put_cpu();
338 }
339 
340 /*
341  * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
342  * and the sysfs version makes coldplug work too.
343  */
344 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
345 {
346 	while (id->name[0]) {
347 		if (!strcmp(name, id->name))
348 			return id;
349 		id++;
350 	}
351 	return NULL;
352 }
353 
354 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
355 {
356 	const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
357 
358 	return spi_match_id(sdrv->id_table, sdev->modalias);
359 }
360 EXPORT_SYMBOL_GPL(spi_get_device_id);
361 
362 const void *spi_get_device_match_data(const struct spi_device *sdev)
363 {
364 	const void *match;
365 
366 	match = device_get_match_data(&sdev->dev);
367 	if (match)
368 		return match;
369 
370 	return (const void *)spi_get_device_id(sdev)->driver_data;
371 }
372 EXPORT_SYMBOL_GPL(spi_get_device_match_data);
373 
374 static int spi_match_device(struct device *dev, const struct device_driver *drv)
375 {
376 	const struct spi_device	*spi = to_spi_device(dev);
377 	const struct spi_driver	*sdrv = to_spi_driver(drv);
378 
379 	/* Check override first, and if set, only use the named driver */
380 	if (spi->driver_override)
381 		return strcmp(spi->driver_override, drv->name) == 0;
382 
383 	/* Attempt an OF style match */
384 	if (of_driver_match_device(dev, drv))
385 		return 1;
386 
387 	/* Then try ACPI */
388 	if (acpi_driver_match_device(dev, drv))
389 		return 1;
390 
391 	if (sdrv->id_table)
392 		return !!spi_match_id(sdrv->id_table, spi->modalias);
393 
394 	return strcmp(spi->modalias, drv->name) == 0;
395 }
396 
397 static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env)
398 {
399 	const struct spi_device		*spi = to_spi_device(dev);
400 	int rc;
401 
402 	rc = acpi_device_uevent_modalias(dev, env);
403 	if (rc != -ENODEV)
404 		return rc;
405 
406 	return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
407 }
408 
409 static int spi_probe(struct device *dev)
410 {
411 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
412 	struct spi_device		*spi = to_spi_device(dev);
413 	int ret;
414 
415 	ret = of_clk_set_defaults(dev->of_node, false);
416 	if (ret)
417 		return ret;
418 
419 	if (dev->of_node) {
420 		spi->irq = of_irq_get(dev->of_node, 0);
421 		if (spi->irq == -EPROBE_DEFER)
422 			return -EPROBE_DEFER;
423 		if (spi->irq < 0)
424 			spi->irq = 0;
425 	}
426 
427 	ret = dev_pm_domain_attach(dev, true);
428 	if (ret)
429 		return ret;
430 
431 	if (sdrv->probe) {
432 		ret = sdrv->probe(spi);
433 		if (ret)
434 			dev_pm_domain_detach(dev, true);
435 	}
436 
437 	return ret;
438 }
439 
440 static void spi_remove(struct device *dev)
441 {
442 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
443 
444 	if (sdrv->remove)
445 		sdrv->remove(to_spi_device(dev));
446 
447 	dev_pm_domain_detach(dev, true);
448 }
449 
450 static void spi_shutdown(struct device *dev)
451 {
452 	if (dev->driver) {
453 		const struct spi_driver	*sdrv = to_spi_driver(dev->driver);
454 
455 		if (sdrv->shutdown)
456 			sdrv->shutdown(to_spi_device(dev));
457 	}
458 }
459 
460 const struct bus_type spi_bus_type = {
461 	.name		= "spi",
462 	.dev_groups	= spi_dev_groups,
463 	.match		= spi_match_device,
464 	.uevent		= spi_uevent,
465 	.probe		= spi_probe,
466 	.remove		= spi_remove,
467 	.shutdown	= spi_shutdown,
468 };
469 EXPORT_SYMBOL_GPL(spi_bus_type);
470 
471 /**
472  * __spi_register_driver - register a SPI driver
473  * @owner: owner module of the driver to register
474  * @sdrv: the driver to register
475  * Context: can sleep
476  *
477  * Return: zero on success, else a negative error code.
478  */
479 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
480 {
481 	sdrv->driver.owner = owner;
482 	sdrv->driver.bus = &spi_bus_type;
483 
484 	/*
485 	 * For Really Good Reasons we use spi: modaliases not of:
486 	 * modaliases for DT so module autoloading won't work if we
487 	 * don't have a spi_device_id as well as a compatible string.
488 	 */
489 	if (sdrv->driver.of_match_table) {
490 		const struct of_device_id *of_id;
491 
492 		for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
493 		     of_id++) {
494 			const char *of_name;
495 
496 			/* Strip off any vendor prefix */
497 			of_name = strnchr(of_id->compatible,
498 					  sizeof(of_id->compatible), ',');
499 			if (of_name)
500 				of_name++;
501 			else
502 				of_name = of_id->compatible;
503 
504 			if (sdrv->id_table) {
505 				const struct spi_device_id *spi_id;
506 
507 				spi_id = spi_match_id(sdrv->id_table, of_name);
508 				if (spi_id)
509 					continue;
510 			} else {
511 				if (strcmp(sdrv->driver.name, of_name) == 0)
512 					continue;
513 			}
514 
515 			pr_warn("SPI driver %s has no spi_device_id for %s\n",
516 				sdrv->driver.name, of_id->compatible);
517 		}
518 	}
519 
520 	return driver_register(&sdrv->driver);
521 }
522 EXPORT_SYMBOL_GPL(__spi_register_driver);
523 
524 /*-------------------------------------------------------------------------*/
525 
526 /*
527  * SPI devices should normally not be created by SPI device drivers; that
528  * would make them board-specific.  Similarly with SPI controller drivers.
529  * Device registration normally goes into like arch/.../mach.../board-YYY.c
530  * with other readonly (flashable) information about mainboard devices.
531  */
532 
533 struct boardinfo {
534 	struct list_head	list;
535 	struct spi_board_info	board_info;
536 };
537 
538 static LIST_HEAD(board_list);
539 static LIST_HEAD(spi_controller_list);
540 
541 /*
542  * Used to protect add/del operation for board_info list and
543  * spi_controller list, and their matching process also used
544  * to protect object of type struct idr.
545  */
546 static DEFINE_MUTEX(board_lock);
547 
548 /**
549  * spi_alloc_device - Allocate a new SPI device
550  * @ctlr: Controller to which device is connected
551  * Context: can sleep
552  *
553  * Allows a driver to allocate and initialize a spi_device without
554  * registering it immediately.  This allows a driver to directly
555  * fill the spi_device with device parameters before calling
556  * spi_add_device() on it.
557  *
558  * Caller is responsible to call spi_add_device() on the returned
559  * spi_device structure to add it to the SPI controller.  If the caller
560  * needs to discard the spi_device without adding it, then it should
561  * call spi_dev_put() on it.
562  *
563  * Return: a pointer to the new device, or NULL.
564  */
565 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
566 {
567 	struct spi_device	*spi;
568 
569 	if (!spi_controller_get(ctlr))
570 		return NULL;
571 
572 	spi = kzalloc(sizeof(*spi), GFP_KERNEL);
573 	if (!spi) {
574 		spi_controller_put(ctlr);
575 		return NULL;
576 	}
577 
578 	spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
579 	if (!spi->pcpu_statistics) {
580 		kfree(spi);
581 		spi_controller_put(ctlr);
582 		return NULL;
583 	}
584 
585 	spi->controller = ctlr;
586 	spi->dev.parent = &ctlr->dev;
587 	spi->dev.bus = &spi_bus_type;
588 	spi->dev.release = spidev_release;
589 	spi->mode = ctlr->buswidth_override_bits;
590 
591 	device_initialize(&spi->dev);
592 	return spi;
593 }
594 EXPORT_SYMBOL_GPL(spi_alloc_device);
595 
596 static void spi_dev_set_name(struct spi_device *spi)
597 {
598 	struct device *dev = &spi->dev;
599 	struct fwnode_handle *fwnode = dev_fwnode(dev);
600 
601 	if (is_acpi_device_node(fwnode)) {
602 		dev_set_name(dev, "spi-%s", acpi_dev_name(to_acpi_device_node(fwnode)));
603 		return;
604 	}
605 
606 	if (is_software_node(fwnode)) {
607 		dev_set_name(dev, "spi-%pfwP", fwnode);
608 		return;
609 	}
610 
611 	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
612 		     spi_get_chipselect(spi, 0));
613 }
614 
615 /*
616  * Zero(0) is a valid physical CS value and can be located at any
617  * logical CS in the spi->chip_select[]. If all the physical CS
618  * are initialized to 0 then It would be difficult to differentiate
619  * between a valid physical CS 0 & an unused logical CS whose physical
620  * CS can be 0. As a solution to this issue initialize all the CS to -1.
621  * Now all the unused logical CS will have -1 physical CS value & can be
622  * ignored while performing physical CS validity checks.
623  */
624 #define SPI_INVALID_CS		((s8)-1)
625 
626 static inline bool is_valid_cs(s8 chip_select)
627 {
628 	return chip_select != SPI_INVALID_CS;
629 }
630 
631 static inline int spi_dev_check_cs(struct device *dev,
632 				   struct spi_device *spi, u8 idx,
633 				   struct spi_device *new_spi, u8 new_idx)
634 {
635 	u8 cs, cs_new;
636 	u8 idx_new;
637 
638 	cs = spi_get_chipselect(spi, idx);
639 	for (idx_new = new_idx; idx_new < SPI_CS_CNT_MAX; idx_new++) {
640 		cs_new = spi_get_chipselect(new_spi, idx_new);
641 		if (is_valid_cs(cs) && is_valid_cs(cs_new) && cs == cs_new) {
642 			dev_err(dev, "chipselect %u already in use\n", cs_new);
643 			return -EBUSY;
644 		}
645 	}
646 	return 0;
647 }
648 
649 static int spi_dev_check(struct device *dev, void *data)
650 {
651 	struct spi_device *spi = to_spi_device(dev);
652 	struct spi_device *new_spi = data;
653 	int status, idx;
654 
655 	if (spi->controller == new_spi->controller) {
656 		for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
657 			status = spi_dev_check_cs(dev, spi, idx, new_spi, 0);
658 			if (status)
659 				return status;
660 		}
661 	}
662 	return 0;
663 }
664 
665 static void spi_cleanup(struct spi_device *spi)
666 {
667 	if (spi->controller->cleanup)
668 		spi->controller->cleanup(spi);
669 }
670 
671 static int __spi_add_device(struct spi_device *spi)
672 {
673 	struct spi_controller *ctlr = spi->controller;
674 	struct device *dev = ctlr->dev.parent;
675 	int status, idx;
676 	u8 cs;
677 
678 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
679 		/* Chipselects are numbered 0..max; validate. */
680 		cs = spi_get_chipselect(spi, idx);
681 		if (is_valid_cs(cs) && cs >= ctlr->num_chipselect) {
682 			dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx),
683 				ctlr->num_chipselect);
684 			return -EINVAL;
685 		}
686 	}
687 
688 	/*
689 	 * Make sure that multiple logical CS doesn't map to the same physical CS.
690 	 * For example, spi->chip_select[0] != spi->chip_select[1] and so on.
691 	 */
692 	if (!spi_controller_is_target(ctlr)) {
693 		for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
694 			status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
695 			if (status)
696 				return status;
697 		}
698 	}
699 
700 	/* Set the bus ID string */
701 	spi_dev_set_name(spi);
702 
703 	/*
704 	 * We need to make sure there's no other device with this
705 	 * chipselect **BEFORE** we call setup(), else we'll trash
706 	 * its configuration.
707 	 */
708 	status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
709 	if (status)
710 		return status;
711 
712 	/* Controller may unregister concurrently */
713 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
714 	    !device_is_registered(&ctlr->dev)) {
715 		return -ENODEV;
716 	}
717 
718 	if (ctlr->cs_gpiods) {
719 		u8 cs;
720 
721 		for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
722 			cs = spi_get_chipselect(spi, idx);
723 			if (is_valid_cs(cs))
724 				spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
725 		}
726 	}
727 
728 	/*
729 	 * Drivers may modify this initial i/o setup, but will
730 	 * normally rely on the device being setup.  Devices
731 	 * using SPI_CS_HIGH can't coexist well otherwise...
732 	 */
733 	status = spi_setup(spi);
734 	if (status < 0) {
735 		dev_err(dev, "can't setup %s, status %d\n",
736 				dev_name(&spi->dev), status);
737 		return status;
738 	}
739 
740 	/* Device may be bound to an active driver when this returns */
741 	status = device_add(&spi->dev);
742 	if (status < 0) {
743 		dev_err(dev, "can't add %s, status %d\n",
744 				dev_name(&spi->dev), status);
745 		spi_cleanup(spi);
746 	} else {
747 		dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
748 	}
749 
750 	return status;
751 }
752 
753 /**
754  * spi_add_device - Add spi_device allocated with spi_alloc_device
755  * @spi: spi_device to register
756  *
757  * Companion function to spi_alloc_device.  Devices allocated with
758  * spi_alloc_device can be added onto the SPI bus with this function.
759  *
760  * Return: 0 on success; negative errno on failure
761  */
762 int spi_add_device(struct spi_device *spi)
763 {
764 	struct spi_controller *ctlr = spi->controller;
765 	int status;
766 
767 	/* Set the bus ID string */
768 	spi_dev_set_name(spi);
769 
770 	mutex_lock(&ctlr->add_lock);
771 	status = __spi_add_device(spi);
772 	mutex_unlock(&ctlr->add_lock);
773 	return status;
774 }
775 EXPORT_SYMBOL_GPL(spi_add_device);
776 
777 static void spi_set_all_cs_unused(struct spi_device *spi)
778 {
779 	u8 idx;
780 
781 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
782 		spi_set_chipselect(spi, idx, SPI_INVALID_CS);
783 }
784 
785 /**
786  * spi_new_device - instantiate one new SPI device
787  * @ctlr: Controller to which device is connected
788  * @chip: Describes the SPI device
789  * Context: can sleep
790  *
791  * On typical mainboards, this is purely internal; and it's not needed
792  * after board init creates the hard-wired devices.  Some development
793  * platforms may not be able to use spi_register_board_info though, and
794  * this is exported so that for example a USB or parport based adapter
795  * driver could add devices (which it would learn about out-of-band).
796  *
797  * Return: the new device, or NULL.
798  */
799 struct spi_device *spi_new_device(struct spi_controller *ctlr,
800 				  struct spi_board_info *chip)
801 {
802 	struct spi_device	*proxy;
803 	int			status;
804 
805 	/*
806 	 * NOTE:  caller did any chip->bus_num checks necessary.
807 	 *
808 	 * Also, unless we change the return value convention to use
809 	 * error-or-pointer (not NULL-or-pointer), troubleshootability
810 	 * suggests syslogged diagnostics are best here (ugh).
811 	 */
812 
813 	proxy = spi_alloc_device(ctlr);
814 	if (!proxy)
815 		return NULL;
816 
817 	WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
818 
819 	/* Use provided chip-select for proxy device */
820 	spi_set_all_cs_unused(proxy);
821 	spi_set_chipselect(proxy, 0, chip->chip_select);
822 
823 	proxy->max_speed_hz = chip->max_speed_hz;
824 	proxy->mode = chip->mode;
825 	proxy->irq = chip->irq;
826 	strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
827 	proxy->dev.platform_data = (void *) chip->platform_data;
828 	proxy->controller_data = chip->controller_data;
829 	proxy->controller_state = NULL;
830 	/*
831 	 * By default spi->chip_select[0] will hold the physical CS number,
832 	 * so set bit 0 in spi->cs_index_mask.
833 	 */
834 	proxy->cs_index_mask = BIT(0);
835 
836 	if (chip->swnode) {
837 		status = device_add_software_node(&proxy->dev, chip->swnode);
838 		if (status) {
839 			dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
840 				chip->modalias, status);
841 			goto err_dev_put;
842 		}
843 	}
844 
845 	status = spi_add_device(proxy);
846 	if (status < 0)
847 		goto err_dev_put;
848 
849 	return proxy;
850 
851 err_dev_put:
852 	device_remove_software_node(&proxy->dev);
853 	spi_dev_put(proxy);
854 	return NULL;
855 }
856 EXPORT_SYMBOL_GPL(spi_new_device);
857 
858 /**
859  * spi_unregister_device - unregister a single SPI device
860  * @spi: spi_device to unregister
861  *
862  * Start making the passed SPI device vanish. Normally this would be handled
863  * by spi_unregister_controller().
864  */
865 void spi_unregister_device(struct spi_device *spi)
866 {
867 	if (!spi)
868 		return;
869 
870 	if (spi->dev.of_node) {
871 		of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
872 		of_node_put(spi->dev.of_node);
873 	}
874 	if (ACPI_COMPANION(&spi->dev))
875 		acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
876 	device_remove_software_node(&spi->dev);
877 	device_del(&spi->dev);
878 	spi_cleanup(spi);
879 	put_device(&spi->dev);
880 }
881 EXPORT_SYMBOL_GPL(spi_unregister_device);
882 
883 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
884 					      struct spi_board_info *bi)
885 {
886 	struct spi_device *dev;
887 
888 	if (ctlr->bus_num != bi->bus_num)
889 		return;
890 
891 	dev = spi_new_device(ctlr, bi);
892 	if (!dev)
893 		dev_err(ctlr->dev.parent, "can't create new device for %s\n",
894 			bi->modalias);
895 }
896 
897 /**
898  * spi_register_board_info - register SPI devices for a given board
899  * @info: array of chip descriptors
900  * @n: how many descriptors are provided
901  * Context: can sleep
902  *
903  * Board-specific early init code calls this (probably during arch_initcall)
904  * with segments of the SPI device table.  Any device nodes are created later,
905  * after the relevant parent SPI controller (bus_num) is defined.  We keep
906  * this table of devices forever, so that reloading a controller driver will
907  * not make Linux forget about these hard-wired devices.
908  *
909  * Other code can also call this, e.g. a particular add-on board might provide
910  * SPI devices through its expansion connector, so code initializing that board
911  * would naturally declare its SPI devices.
912  *
913  * The board info passed can safely be __initdata ... but be careful of
914  * any embedded pointers (platform_data, etc), they're copied as-is.
915  *
916  * Return: zero on success, else a negative error code.
917  */
918 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
919 {
920 	struct boardinfo *bi;
921 	int i;
922 
923 	if (!n)
924 		return 0;
925 
926 	bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
927 	if (!bi)
928 		return -ENOMEM;
929 
930 	for (i = 0; i < n; i++, bi++, info++) {
931 		struct spi_controller *ctlr;
932 
933 		memcpy(&bi->board_info, info, sizeof(*info));
934 
935 		mutex_lock(&board_lock);
936 		list_add_tail(&bi->list, &board_list);
937 		list_for_each_entry(ctlr, &spi_controller_list, list)
938 			spi_match_controller_to_boardinfo(ctlr,
939 							  &bi->board_info);
940 		mutex_unlock(&board_lock);
941 	}
942 
943 	return 0;
944 }
945 
946 /*-------------------------------------------------------------------------*/
947 
948 /* Core methods for SPI resource management */
949 
950 /**
951  * spi_res_alloc - allocate a spi resource that is life-cycle managed
952  *                 during the processing of a spi_message while using
953  *                 spi_transfer_one
954  * @spi:     the SPI device for which we allocate memory
955  * @release: the release code to execute for this resource
956  * @size:    size to alloc and return
957  * @gfp:     GFP allocation flags
958  *
959  * Return: the pointer to the allocated data
960  *
961  * This may get enhanced in the future to allocate from a memory pool
962  * of the @spi_device or @spi_controller to avoid repeated allocations.
963  */
964 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
965 			   size_t size, gfp_t gfp)
966 {
967 	struct spi_res *sres;
968 
969 	sres = kzalloc(sizeof(*sres) + size, gfp);
970 	if (!sres)
971 		return NULL;
972 
973 	INIT_LIST_HEAD(&sres->entry);
974 	sres->release = release;
975 
976 	return sres->data;
977 }
978 
979 /**
980  * spi_res_free - free an SPI resource
981  * @res: pointer to the custom data of a resource
982  */
983 static void spi_res_free(void *res)
984 {
985 	struct spi_res *sres = container_of(res, struct spi_res, data);
986 
987 	if (!res)
988 		return;
989 
990 	WARN_ON(!list_empty(&sres->entry));
991 	kfree(sres);
992 }
993 
994 /**
995  * spi_res_add - add a spi_res to the spi_message
996  * @message: the SPI message
997  * @res:     the spi_resource
998  */
999 static void spi_res_add(struct spi_message *message, void *res)
1000 {
1001 	struct spi_res *sres = container_of(res, struct spi_res, data);
1002 
1003 	WARN_ON(!list_empty(&sres->entry));
1004 	list_add_tail(&sres->entry, &message->resources);
1005 }
1006 
1007 /**
1008  * spi_res_release - release all SPI resources for this message
1009  * @ctlr:  the @spi_controller
1010  * @message: the @spi_message
1011  */
1012 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
1013 {
1014 	struct spi_res *res, *tmp;
1015 
1016 	list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
1017 		if (res->release)
1018 			res->release(ctlr, message, res->data);
1019 
1020 		list_del(&res->entry);
1021 
1022 		kfree(res);
1023 	}
1024 }
1025 
1026 /*-------------------------------------------------------------------------*/
1027 #define spi_for_each_valid_cs(spi, idx)				\
1028 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)		\
1029 		if (!(spi->cs_index_mask & BIT(idx))) {} else
1030 
1031 static inline bool spi_is_last_cs(struct spi_device *spi)
1032 {
1033 	u8 idx;
1034 	bool last = false;
1035 
1036 	spi_for_each_valid_cs(spi, idx) {
1037 		if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx))
1038 			last = true;
1039 	}
1040 	return last;
1041 }
1042 
1043 static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool activate)
1044 {
1045 	/*
1046 	 * Historically ACPI has no means of the GPIO polarity and
1047 	 * thus the SPISerialBus() resource defines it on the per-chip
1048 	 * basis. In order to avoid a chain of negations, the GPIO
1049 	 * polarity is considered being Active High. Even for the cases
1050 	 * when _DSD() is involved (in the updated versions of ACPI)
1051 	 * the GPIO CS polarity must be defined Active High to avoid
1052 	 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
1053 	 * into account.
1054 	 */
1055 	if (has_acpi_companion(&spi->dev))
1056 		gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable);
1057 	else
1058 		/* Polarity handled by GPIO library */
1059 		gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), activate);
1060 
1061 	if (activate)
1062 		spi_delay_exec(&spi->cs_setup, NULL);
1063 	else
1064 		spi_delay_exec(&spi->cs_inactive, NULL);
1065 }
1066 
1067 static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
1068 {
1069 	bool activate = enable;
1070 	u8 idx;
1071 
1072 	/*
1073 	 * Avoid calling into the driver (or doing delays) if the chip select
1074 	 * isn't actually changing from the last time this was called.
1075 	 */
1076 	if (!force && ((enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
1077 			spi_is_last_cs(spi)) ||
1078 		       (!enable && spi->controller->last_cs_index_mask == spi->cs_index_mask &&
1079 			!spi_is_last_cs(spi))) &&
1080 	    (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
1081 		return;
1082 
1083 	trace_spi_set_cs(spi, activate);
1084 
1085 	spi->controller->last_cs_index_mask = spi->cs_index_mask;
1086 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
1087 		spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : SPI_INVALID_CS;
1088 	spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
1089 
1090 	if (spi->mode & SPI_CS_HIGH)
1091 		enable = !enable;
1092 
1093 	/*
1094 	 * Handle chip select delays for GPIO based CS or controllers without
1095 	 * programmable chip select timing.
1096 	 */
1097 	if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate)
1098 		spi_delay_exec(&spi->cs_hold, NULL);
1099 
1100 	if (spi_is_csgpiod(spi)) {
1101 		if (!(spi->mode & SPI_NO_CS)) {
1102 			spi_for_each_valid_cs(spi, idx) {
1103 				if (spi_get_csgpiod(spi, idx))
1104 					spi_toggle_csgpiod(spi, idx, enable, activate);
1105 			}
1106 		}
1107 		/* Some SPI masters need both GPIO CS & slave_select */
1108 		if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
1109 		    spi->controller->set_cs)
1110 			spi->controller->set_cs(spi, !enable);
1111 	} else if (spi->controller->set_cs) {
1112 		spi->controller->set_cs(spi, !enable);
1113 	}
1114 
1115 	if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) {
1116 		if (activate)
1117 			spi_delay_exec(&spi->cs_setup, NULL);
1118 		else
1119 			spi_delay_exec(&spi->cs_inactive, NULL);
1120 	}
1121 }
1122 
1123 #ifdef CONFIG_HAS_DMA
1124 static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
1125 			     struct sg_table *sgt, void *buf, size_t len,
1126 			     enum dma_data_direction dir, unsigned long attrs)
1127 {
1128 	const bool vmalloced_buf = is_vmalloc_addr(buf);
1129 	unsigned int max_seg_size = dma_get_max_seg_size(dev);
1130 #ifdef CONFIG_HIGHMEM
1131 	const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
1132 				(unsigned long)buf < (PKMAP_BASE +
1133 					(LAST_PKMAP * PAGE_SIZE)));
1134 #else
1135 	const bool kmap_buf = false;
1136 #endif
1137 	int desc_len;
1138 	int sgs;
1139 	struct page *vm_page;
1140 	struct scatterlist *sg;
1141 	void *sg_buf;
1142 	size_t min;
1143 	int i, ret;
1144 
1145 	if (vmalloced_buf || kmap_buf) {
1146 		desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
1147 		sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1148 	} else if (virt_addr_valid(buf)) {
1149 		desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1150 		sgs = DIV_ROUND_UP(len, desc_len);
1151 	} else {
1152 		return -EINVAL;
1153 	}
1154 
1155 	ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1156 	if (ret != 0)
1157 		return ret;
1158 
1159 	sg = &sgt->sgl[0];
1160 	for (i = 0; i < sgs; i++) {
1161 
1162 		if (vmalloced_buf || kmap_buf) {
1163 			/*
1164 			 * Next scatterlist entry size is the minimum between
1165 			 * the desc_len and the remaining buffer length that
1166 			 * fits in a page.
1167 			 */
1168 			min = min_t(size_t, desc_len,
1169 				    min_t(size_t, len,
1170 					  PAGE_SIZE - offset_in_page(buf)));
1171 			if (vmalloced_buf)
1172 				vm_page = vmalloc_to_page(buf);
1173 			else
1174 				vm_page = kmap_to_page(buf);
1175 			if (!vm_page) {
1176 				sg_free_table(sgt);
1177 				return -ENOMEM;
1178 			}
1179 			sg_set_page(sg, vm_page,
1180 				    min, offset_in_page(buf));
1181 		} else {
1182 			min = min_t(size_t, len, desc_len);
1183 			sg_buf = buf;
1184 			sg_set_buf(sg, sg_buf, min);
1185 		}
1186 
1187 		buf += min;
1188 		len -= min;
1189 		sg = sg_next(sg);
1190 	}
1191 
1192 	ret = dma_map_sgtable(dev, sgt, dir, attrs);
1193 	if (ret < 0) {
1194 		sg_free_table(sgt);
1195 		return ret;
1196 	}
1197 
1198 	return 0;
1199 }
1200 
1201 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1202 		struct sg_table *sgt, void *buf, size_t len,
1203 		enum dma_data_direction dir)
1204 {
1205 	return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
1206 }
1207 
1208 static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
1209 				struct device *dev, struct sg_table *sgt,
1210 				enum dma_data_direction dir,
1211 				unsigned long attrs)
1212 {
1213 	dma_unmap_sgtable(dev, sgt, dir, attrs);
1214 	sg_free_table(sgt);
1215 	sgt->orig_nents = 0;
1216 	sgt->nents = 0;
1217 }
1218 
1219 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1220 		   struct sg_table *sgt, enum dma_data_direction dir)
1221 {
1222 	spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
1223 }
1224 
1225 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1226 {
1227 	struct device *tx_dev, *rx_dev;
1228 	struct spi_transfer *xfer;
1229 	int ret;
1230 
1231 	if (!ctlr->can_dma)
1232 		return 0;
1233 
1234 	if (ctlr->dma_tx)
1235 		tx_dev = ctlr->dma_tx->device->dev;
1236 	else if (ctlr->dma_map_dev)
1237 		tx_dev = ctlr->dma_map_dev;
1238 	else
1239 		tx_dev = ctlr->dev.parent;
1240 
1241 	if (ctlr->dma_rx)
1242 		rx_dev = ctlr->dma_rx->device->dev;
1243 	else if (ctlr->dma_map_dev)
1244 		rx_dev = ctlr->dma_map_dev;
1245 	else
1246 		rx_dev = ctlr->dev.parent;
1247 
1248 	ret = -ENOMSG;
1249 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1250 		/* The sync is done before each transfer. */
1251 		unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1252 
1253 		if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1254 			continue;
1255 
1256 		if (xfer->tx_buf != NULL) {
1257 			ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1258 						(void *)xfer->tx_buf,
1259 						xfer->len, DMA_TO_DEVICE,
1260 						attrs);
1261 			if (ret != 0)
1262 				return ret;
1263 
1264 			xfer->tx_sg_mapped = true;
1265 		}
1266 
1267 		if (xfer->rx_buf != NULL) {
1268 			ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1269 						xfer->rx_buf, xfer->len,
1270 						DMA_FROM_DEVICE, attrs);
1271 			if (ret != 0) {
1272 				spi_unmap_buf_attrs(ctlr, tx_dev,
1273 						&xfer->tx_sg, DMA_TO_DEVICE,
1274 						attrs);
1275 
1276 				return ret;
1277 			}
1278 
1279 			xfer->rx_sg_mapped = true;
1280 		}
1281 	}
1282 	/* No transfer has been mapped, bail out with success */
1283 	if (ret)
1284 		return 0;
1285 
1286 	ctlr->cur_rx_dma_dev = rx_dev;
1287 	ctlr->cur_tx_dma_dev = tx_dev;
1288 
1289 	return 0;
1290 }
1291 
1292 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1293 {
1294 	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1295 	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1296 	struct spi_transfer *xfer;
1297 
1298 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1299 		/* The sync has already been done after each transfer. */
1300 		unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1301 
1302 		if (xfer->rx_sg_mapped)
1303 			spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1304 					    DMA_FROM_DEVICE, attrs);
1305 		xfer->rx_sg_mapped = false;
1306 
1307 		if (xfer->tx_sg_mapped)
1308 			spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1309 					    DMA_TO_DEVICE, attrs);
1310 		xfer->tx_sg_mapped = false;
1311 	}
1312 
1313 	return 0;
1314 }
1315 
1316 static void spi_dma_sync_for_device(struct spi_controller *ctlr,
1317 				    struct spi_transfer *xfer)
1318 {
1319 	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1320 	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1321 
1322 	if (xfer->tx_sg_mapped)
1323 		dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1324 	if (xfer->rx_sg_mapped)
1325 		dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1326 }
1327 
1328 static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
1329 				 struct spi_transfer *xfer)
1330 {
1331 	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1332 	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1333 
1334 	if (xfer->rx_sg_mapped)
1335 		dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1336 	if (xfer->tx_sg_mapped)
1337 		dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1338 }
1339 #else /* !CONFIG_HAS_DMA */
1340 static inline int __spi_map_msg(struct spi_controller *ctlr,
1341 				struct spi_message *msg)
1342 {
1343 	return 0;
1344 }
1345 
1346 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1347 				  struct spi_message *msg)
1348 {
1349 	return 0;
1350 }
1351 
1352 static void spi_dma_sync_for_device(struct spi_controller *ctrl,
1353 				    struct spi_transfer *xfer)
1354 {
1355 }
1356 
1357 static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
1358 				 struct spi_transfer *xfer)
1359 {
1360 }
1361 #endif /* !CONFIG_HAS_DMA */
1362 
1363 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1364 				struct spi_message *msg)
1365 {
1366 	struct spi_transfer *xfer;
1367 
1368 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1369 		/*
1370 		 * Restore the original value of tx_buf or rx_buf if they are
1371 		 * NULL.
1372 		 */
1373 		if (xfer->tx_buf == ctlr->dummy_tx)
1374 			xfer->tx_buf = NULL;
1375 		if (xfer->rx_buf == ctlr->dummy_rx)
1376 			xfer->rx_buf = NULL;
1377 	}
1378 
1379 	return __spi_unmap_msg(ctlr, msg);
1380 }
1381 
1382 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1383 {
1384 	struct spi_transfer *xfer;
1385 	void *tmp;
1386 	unsigned int max_tx, max_rx;
1387 
1388 	if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1389 		&& !(msg->spi->mode & SPI_3WIRE)) {
1390 		max_tx = 0;
1391 		max_rx = 0;
1392 
1393 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1394 			if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1395 			    !xfer->tx_buf)
1396 				max_tx = max(xfer->len, max_tx);
1397 			if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1398 			    !xfer->rx_buf)
1399 				max_rx = max(xfer->len, max_rx);
1400 		}
1401 
1402 		if (max_tx) {
1403 			tmp = krealloc(ctlr->dummy_tx, max_tx,
1404 				       GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1405 			if (!tmp)
1406 				return -ENOMEM;
1407 			ctlr->dummy_tx = tmp;
1408 		}
1409 
1410 		if (max_rx) {
1411 			tmp = krealloc(ctlr->dummy_rx, max_rx,
1412 				       GFP_KERNEL | GFP_DMA);
1413 			if (!tmp)
1414 				return -ENOMEM;
1415 			ctlr->dummy_rx = tmp;
1416 		}
1417 
1418 		if (max_tx || max_rx) {
1419 			list_for_each_entry(xfer, &msg->transfers,
1420 					    transfer_list) {
1421 				if (!xfer->len)
1422 					continue;
1423 				if (!xfer->tx_buf)
1424 					xfer->tx_buf = ctlr->dummy_tx;
1425 				if (!xfer->rx_buf)
1426 					xfer->rx_buf = ctlr->dummy_rx;
1427 			}
1428 		}
1429 	}
1430 
1431 	return __spi_map_msg(ctlr, msg);
1432 }
1433 
1434 static int spi_transfer_wait(struct spi_controller *ctlr,
1435 			     struct spi_message *msg,
1436 			     struct spi_transfer *xfer)
1437 {
1438 	struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1439 	struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1440 	u32 speed_hz = xfer->speed_hz;
1441 	unsigned long long ms;
1442 
1443 	if (spi_controller_is_target(ctlr)) {
1444 		if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1445 			dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1446 			return -EINTR;
1447 		}
1448 	} else {
1449 		if (!speed_hz)
1450 			speed_hz = 100000;
1451 
1452 		/*
1453 		 * For each byte we wait for 8 cycles of the SPI clock.
1454 		 * Since speed is defined in Hz and we want milliseconds,
1455 		 * use respective multiplier, but before the division,
1456 		 * otherwise we may get 0 for short transfers.
1457 		 */
1458 		ms = 8LL * MSEC_PER_SEC * xfer->len;
1459 		do_div(ms, speed_hz);
1460 
1461 		/*
1462 		 * Increase it twice and add 200 ms tolerance, use
1463 		 * predefined maximum in case of overflow.
1464 		 */
1465 		ms += ms + 200;
1466 		if (ms > UINT_MAX)
1467 			ms = UINT_MAX;
1468 
1469 		ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1470 						 msecs_to_jiffies(ms));
1471 
1472 		if (ms == 0) {
1473 			SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1474 			SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1475 			dev_err(&msg->spi->dev,
1476 				"SPI transfer timed out\n");
1477 			return -ETIMEDOUT;
1478 		}
1479 
1480 		if (xfer->error & SPI_TRANS_FAIL_IO)
1481 			return -EIO;
1482 	}
1483 
1484 	return 0;
1485 }
1486 
1487 static void _spi_transfer_delay_ns(u32 ns)
1488 {
1489 	if (!ns)
1490 		return;
1491 	if (ns <= NSEC_PER_USEC) {
1492 		ndelay(ns);
1493 	} else {
1494 		u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1495 
1496 		if (us <= 10)
1497 			udelay(us);
1498 		else
1499 			usleep_range(us, us + DIV_ROUND_UP(us, 10));
1500 	}
1501 }
1502 
1503 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1504 {
1505 	u32 delay = _delay->value;
1506 	u32 unit = _delay->unit;
1507 	u32 hz;
1508 
1509 	if (!delay)
1510 		return 0;
1511 
1512 	switch (unit) {
1513 	case SPI_DELAY_UNIT_USECS:
1514 		delay *= NSEC_PER_USEC;
1515 		break;
1516 	case SPI_DELAY_UNIT_NSECS:
1517 		/* Nothing to do here */
1518 		break;
1519 	case SPI_DELAY_UNIT_SCK:
1520 		/* Clock cycles need to be obtained from spi_transfer */
1521 		if (!xfer)
1522 			return -EINVAL;
1523 		/*
1524 		 * If there is unknown effective speed, approximate it
1525 		 * by underestimating with half of the requested Hz.
1526 		 */
1527 		hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1528 		if (!hz)
1529 			return -EINVAL;
1530 
1531 		/* Convert delay to nanoseconds */
1532 		delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1533 		break;
1534 	default:
1535 		return -EINVAL;
1536 	}
1537 
1538 	return delay;
1539 }
1540 EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1541 
1542 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1543 {
1544 	int delay;
1545 
1546 	might_sleep();
1547 
1548 	if (!_delay)
1549 		return -EINVAL;
1550 
1551 	delay = spi_delay_to_ns(_delay, xfer);
1552 	if (delay < 0)
1553 		return delay;
1554 
1555 	_spi_transfer_delay_ns(delay);
1556 
1557 	return 0;
1558 }
1559 EXPORT_SYMBOL_GPL(spi_delay_exec);
1560 
1561 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1562 					  struct spi_transfer *xfer)
1563 {
1564 	u32 default_delay_ns = 10 * NSEC_PER_USEC;
1565 	u32 delay = xfer->cs_change_delay.value;
1566 	u32 unit = xfer->cs_change_delay.unit;
1567 	int ret;
1568 
1569 	/* Return early on "fast" mode - for everything but USECS */
1570 	if (!delay) {
1571 		if (unit == SPI_DELAY_UNIT_USECS)
1572 			_spi_transfer_delay_ns(default_delay_ns);
1573 		return;
1574 	}
1575 
1576 	ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1577 	if (ret) {
1578 		dev_err_once(&msg->spi->dev,
1579 			     "Use of unsupported delay unit %i, using default of %luus\n",
1580 			     unit, default_delay_ns / NSEC_PER_USEC);
1581 		_spi_transfer_delay_ns(default_delay_ns);
1582 	}
1583 }
1584 
1585 void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
1586 						  struct spi_transfer *xfer)
1587 {
1588 	_spi_transfer_cs_change_delay(msg, xfer);
1589 }
1590 EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec);
1591 
1592 /*
1593  * spi_transfer_one_message - Default implementation of transfer_one_message()
1594  *
1595  * This is a standard implementation of transfer_one_message() for
1596  * drivers which implement a transfer_one() operation.  It provides
1597  * standard handling of delays and chip select management.
1598  */
1599 static int spi_transfer_one_message(struct spi_controller *ctlr,
1600 				    struct spi_message *msg)
1601 {
1602 	struct spi_transfer *xfer;
1603 	bool keep_cs = false;
1604 	int ret = 0;
1605 	struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1606 	struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1607 
1608 	xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
1609 	spi_set_cs(msg->spi, !xfer->cs_off, false);
1610 
1611 	SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1612 	SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1613 
1614 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1615 		trace_spi_transfer_start(msg, xfer);
1616 
1617 		spi_statistics_add_transfer_stats(statm, xfer, msg);
1618 		spi_statistics_add_transfer_stats(stats, xfer, msg);
1619 
1620 		if (!ctlr->ptp_sts_supported) {
1621 			xfer->ptp_sts_word_pre = 0;
1622 			ptp_read_system_prets(xfer->ptp_sts);
1623 		}
1624 
1625 		if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1626 			reinit_completion(&ctlr->xfer_completion);
1627 
1628 fallback_pio:
1629 			spi_dma_sync_for_device(ctlr, xfer);
1630 			ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1631 			if (ret < 0) {
1632 				spi_dma_sync_for_cpu(ctlr, xfer);
1633 
1634 				if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) &&
1635 				    (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1636 					__spi_unmap_msg(ctlr, msg);
1637 					ctlr->fallback = true;
1638 					xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1639 					goto fallback_pio;
1640 				}
1641 
1642 				SPI_STATISTICS_INCREMENT_FIELD(statm,
1643 							       errors);
1644 				SPI_STATISTICS_INCREMENT_FIELD(stats,
1645 							       errors);
1646 				dev_err(&msg->spi->dev,
1647 					"SPI transfer failed: %d\n", ret);
1648 				goto out;
1649 			}
1650 
1651 			if (ret > 0) {
1652 				ret = spi_transfer_wait(ctlr, msg, xfer);
1653 				if (ret < 0)
1654 					msg->status = ret;
1655 			}
1656 
1657 			spi_dma_sync_for_cpu(ctlr, xfer);
1658 		} else {
1659 			if (xfer->len)
1660 				dev_err(&msg->spi->dev,
1661 					"Bufferless transfer has length %u\n",
1662 					xfer->len);
1663 		}
1664 
1665 		if (!ctlr->ptp_sts_supported) {
1666 			ptp_read_system_postts(xfer->ptp_sts);
1667 			xfer->ptp_sts_word_post = xfer->len;
1668 		}
1669 
1670 		trace_spi_transfer_stop(msg, xfer);
1671 
1672 		if (msg->status != -EINPROGRESS)
1673 			goto out;
1674 
1675 		spi_transfer_delay_exec(xfer);
1676 
1677 		if (xfer->cs_change) {
1678 			if (list_is_last(&xfer->transfer_list,
1679 					 &msg->transfers)) {
1680 				keep_cs = true;
1681 			} else {
1682 				if (!xfer->cs_off)
1683 					spi_set_cs(msg->spi, false, false);
1684 				_spi_transfer_cs_change_delay(msg, xfer);
1685 				if (!list_next_entry(xfer, transfer_list)->cs_off)
1686 					spi_set_cs(msg->spi, true, false);
1687 			}
1688 		} else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
1689 			   xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
1690 			spi_set_cs(msg->spi, xfer->cs_off, false);
1691 		}
1692 
1693 		msg->actual_length += xfer->len;
1694 	}
1695 
1696 out:
1697 	if (ret != 0 || !keep_cs)
1698 		spi_set_cs(msg->spi, false, false);
1699 
1700 	if (msg->status == -EINPROGRESS)
1701 		msg->status = ret;
1702 
1703 	if (msg->status && ctlr->handle_err)
1704 		ctlr->handle_err(ctlr, msg);
1705 
1706 	spi_finalize_current_message(ctlr);
1707 
1708 	return ret;
1709 }
1710 
1711 /**
1712  * spi_finalize_current_transfer - report completion of a transfer
1713  * @ctlr: the controller reporting completion
1714  *
1715  * Called by SPI drivers using the core transfer_one_message()
1716  * implementation to notify it that the current interrupt driven
1717  * transfer has finished and the next one may be scheduled.
1718  */
1719 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1720 {
1721 	complete(&ctlr->xfer_completion);
1722 }
1723 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1724 
1725 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1726 {
1727 	if (ctlr->auto_runtime_pm) {
1728 		pm_runtime_mark_last_busy(ctlr->dev.parent);
1729 		pm_runtime_put_autosuspend(ctlr->dev.parent);
1730 	}
1731 }
1732 
1733 static int __spi_pump_transfer_message(struct spi_controller *ctlr,
1734 		struct spi_message *msg, bool was_busy)
1735 {
1736 	struct spi_transfer *xfer;
1737 	int ret;
1738 
1739 	if (!was_busy && ctlr->auto_runtime_pm) {
1740 		ret = pm_runtime_get_sync(ctlr->dev.parent);
1741 		if (ret < 0) {
1742 			pm_runtime_put_noidle(ctlr->dev.parent);
1743 			dev_err(&ctlr->dev, "Failed to power device: %d\n",
1744 				ret);
1745 
1746 			msg->status = ret;
1747 			spi_finalize_current_message(ctlr);
1748 
1749 			return ret;
1750 		}
1751 	}
1752 
1753 	if (!was_busy)
1754 		trace_spi_controller_busy(ctlr);
1755 
1756 	if (!was_busy && ctlr->prepare_transfer_hardware) {
1757 		ret = ctlr->prepare_transfer_hardware(ctlr);
1758 		if (ret) {
1759 			dev_err(&ctlr->dev,
1760 				"failed to prepare transfer hardware: %d\n",
1761 				ret);
1762 
1763 			if (ctlr->auto_runtime_pm)
1764 				pm_runtime_put(ctlr->dev.parent);
1765 
1766 			msg->status = ret;
1767 			spi_finalize_current_message(ctlr);
1768 
1769 			return ret;
1770 		}
1771 	}
1772 
1773 	trace_spi_message_start(msg);
1774 
1775 	if (ctlr->prepare_message) {
1776 		ret = ctlr->prepare_message(ctlr, msg);
1777 		if (ret) {
1778 			dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1779 				ret);
1780 			msg->status = ret;
1781 			spi_finalize_current_message(ctlr);
1782 			return ret;
1783 		}
1784 		msg->prepared = true;
1785 	}
1786 
1787 	ret = spi_map_msg(ctlr, msg);
1788 	if (ret) {
1789 		msg->status = ret;
1790 		spi_finalize_current_message(ctlr);
1791 		return ret;
1792 	}
1793 
1794 	if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1795 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1796 			xfer->ptp_sts_word_pre = 0;
1797 			ptp_read_system_prets(xfer->ptp_sts);
1798 		}
1799 	}
1800 
1801 	/*
1802 	 * Drivers implementation of transfer_one_message() must arrange for
1803 	 * spi_finalize_current_message() to get called. Most drivers will do
1804 	 * this in the calling context, but some don't. For those cases, a
1805 	 * completion is used to guarantee that this function does not return
1806 	 * until spi_finalize_current_message() is done accessing
1807 	 * ctlr->cur_msg.
1808 	 * Use of the following two flags enable to opportunistically skip the
1809 	 * use of the completion since its use involves expensive spin locks.
1810 	 * In case of a race with the context that calls
1811 	 * spi_finalize_current_message() the completion will always be used,
1812 	 * due to strict ordering of these flags using barriers.
1813 	 */
1814 	WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1815 	WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1816 	reinit_completion(&ctlr->cur_msg_completion);
1817 	smp_wmb(); /* Make these available to spi_finalize_current_message() */
1818 
1819 	ret = ctlr->transfer_one_message(ctlr, msg);
1820 	if (ret) {
1821 		dev_err(&ctlr->dev,
1822 			"failed to transfer one message from queue\n");
1823 		return ret;
1824 	}
1825 
1826 	WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1827 	smp_mb(); /* See spi_finalize_current_message()... */
1828 	if (READ_ONCE(ctlr->cur_msg_incomplete))
1829 		wait_for_completion(&ctlr->cur_msg_completion);
1830 
1831 	return 0;
1832 }
1833 
1834 /**
1835  * __spi_pump_messages - function which processes SPI message queue
1836  * @ctlr: controller to process queue for
1837  * @in_kthread: true if we are in the context of the message pump thread
1838  *
1839  * This function checks if there is any SPI message in the queue that
1840  * needs processing and if so call out to the driver to initialize hardware
1841  * and transfer each message.
1842  *
1843  * Note that it is called both from the kthread itself and also from
1844  * inside spi_sync(); the queue extraction handling at the top of the
1845  * function should deal with this safely.
1846  */
1847 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1848 {
1849 	struct spi_message *msg;
1850 	bool was_busy = false;
1851 	unsigned long flags;
1852 	int ret;
1853 
1854 	/* Take the I/O mutex */
1855 	mutex_lock(&ctlr->io_mutex);
1856 
1857 	/* Lock queue */
1858 	spin_lock_irqsave(&ctlr->queue_lock, flags);
1859 
1860 	/* Make sure we are not already running a message */
1861 	if (ctlr->cur_msg)
1862 		goto out_unlock;
1863 
1864 	/* Check if the queue is idle */
1865 	if (list_empty(&ctlr->queue) || !ctlr->running) {
1866 		if (!ctlr->busy)
1867 			goto out_unlock;
1868 
1869 		/* Defer any non-atomic teardown to the thread */
1870 		if (!in_kthread) {
1871 			if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1872 			    !ctlr->unprepare_transfer_hardware) {
1873 				spi_idle_runtime_pm(ctlr);
1874 				ctlr->busy = false;
1875 				ctlr->queue_empty = true;
1876 				trace_spi_controller_idle(ctlr);
1877 			} else {
1878 				kthread_queue_work(ctlr->kworker,
1879 						   &ctlr->pump_messages);
1880 			}
1881 			goto out_unlock;
1882 		}
1883 
1884 		ctlr->busy = false;
1885 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1886 
1887 		kfree(ctlr->dummy_rx);
1888 		ctlr->dummy_rx = NULL;
1889 		kfree(ctlr->dummy_tx);
1890 		ctlr->dummy_tx = NULL;
1891 		if (ctlr->unprepare_transfer_hardware &&
1892 		    ctlr->unprepare_transfer_hardware(ctlr))
1893 			dev_err(&ctlr->dev,
1894 				"failed to unprepare transfer hardware\n");
1895 		spi_idle_runtime_pm(ctlr);
1896 		trace_spi_controller_idle(ctlr);
1897 
1898 		spin_lock_irqsave(&ctlr->queue_lock, flags);
1899 		ctlr->queue_empty = true;
1900 		goto out_unlock;
1901 	}
1902 
1903 	/* Extract head of queue */
1904 	msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1905 	ctlr->cur_msg = msg;
1906 
1907 	list_del_init(&msg->queue);
1908 	if (ctlr->busy)
1909 		was_busy = true;
1910 	else
1911 		ctlr->busy = true;
1912 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1913 
1914 	ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
1915 	kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1916 
1917 	ctlr->cur_msg = NULL;
1918 	ctlr->fallback = false;
1919 
1920 	mutex_unlock(&ctlr->io_mutex);
1921 
1922 	/* Prod the scheduler in case transfer_one() was busy waiting */
1923 	if (!ret)
1924 		cond_resched();
1925 	return;
1926 
1927 out_unlock:
1928 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1929 	mutex_unlock(&ctlr->io_mutex);
1930 }
1931 
1932 /**
1933  * spi_pump_messages - kthread work function which processes spi message queue
1934  * @work: pointer to kthread work struct contained in the controller struct
1935  */
1936 static void spi_pump_messages(struct kthread_work *work)
1937 {
1938 	struct spi_controller *ctlr =
1939 		container_of(work, struct spi_controller, pump_messages);
1940 
1941 	__spi_pump_messages(ctlr, true);
1942 }
1943 
1944 /**
1945  * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1946  * @ctlr: Pointer to the spi_controller structure of the driver
1947  * @xfer: Pointer to the transfer being timestamped
1948  * @progress: How many words (not bytes) have been transferred so far
1949  * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1950  *	      transfer, for less jitter in time measurement. Only compatible
1951  *	      with PIO drivers. If true, must follow up with
1952  *	      spi_take_timestamp_post or otherwise system will crash.
1953  *	      WARNING: for fully predictable results, the CPU frequency must
1954  *	      also be under control (governor).
1955  *
1956  * This is a helper for drivers to collect the beginning of the TX timestamp
1957  * for the requested byte from the SPI transfer. The frequency with which this
1958  * function must be called (once per word, once for the whole transfer, once
1959  * per batch of words etc) is arbitrary as long as the @tx buffer offset is
1960  * greater than or equal to the requested byte at the time of the call. The
1961  * timestamp is only taken once, at the first such call. It is assumed that
1962  * the driver advances its @tx buffer pointer monotonically.
1963  */
1964 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1965 			    struct spi_transfer *xfer,
1966 			    size_t progress, bool irqs_off)
1967 {
1968 	if (!xfer->ptp_sts)
1969 		return;
1970 
1971 	if (xfer->timestamped)
1972 		return;
1973 
1974 	if (progress > xfer->ptp_sts_word_pre)
1975 		return;
1976 
1977 	/* Capture the resolution of the timestamp */
1978 	xfer->ptp_sts_word_pre = progress;
1979 
1980 	if (irqs_off) {
1981 		local_irq_save(ctlr->irq_flags);
1982 		preempt_disable();
1983 	}
1984 
1985 	ptp_read_system_prets(xfer->ptp_sts);
1986 }
1987 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1988 
1989 /**
1990  * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1991  * @ctlr: Pointer to the spi_controller structure of the driver
1992  * @xfer: Pointer to the transfer being timestamped
1993  * @progress: How many words (not bytes) have been transferred so far
1994  * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1995  *
1996  * This is a helper for drivers to collect the end of the TX timestamp for
1997  * the requested byte from the SPI transfer. Can be called with an arbitrary
1998  * frequency: only the first call where @tx exceeds or is equal to the
1999  * requested word will be timestamped.
2000  */
2001 void spi_take_timestamp_post(struct spi_controller *ctlr,
2002 			     struct spi_transfer *xfer,
2003 			     size_t progress, bool irqs_off)
2004 {
2005 	if (!xfer->ptp_sts)
2006 		return;
2007 
2008 	if (xfer->timestamped)
2009 		return;
2010 
2011 	if (progress < xfer->ptp_sts_word_post)
2012 		return;
2013 
2014 	ptp_read_system_postts(xfer->ptp_sts);
2015 
2016 	if (irqs_off) {
2017 		local_irq_restore(ctlr->irq_flags);
2018 		preempt_enable();
2019 	}
2020 
2021 	/* Capture the resolution of the timestamp */
2022 	xfer->ptp_sts_word_post = progress;
2023 
2024 	xfer->timestamped = 1;
2025 }
2026 EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
2027 
2028 /**
2029  * spi_set_thread_rt - set the controller to pump at realtime priority
2030  * @ctlr: controller to boost priority of
2031  *
2032  * This can be called because the controller requested realtime priority
2033  * (by setting the ->rt value before calling spi_register_controller()) or
2034  * because a device on the bus said that its transfers needed realtime
2035  * priority.
2036  *
2037  * NOTE: at the moment if any device on a bus says it needs realtime then
2038  * the thread will be at realtime priority for all transfers on that
2039  * controller.  If this eventually becomes a problem we may see if we can
2040  * find a way to boost the priority only temporarily during relevant
2041  * transfers.
2042  */
2043 static void spi_set_thread_rt(struct spi_controller *ctlr)
2044 {
2045 	dev_info(&ctlr->dev,
2046 		"will run message pump with realtime priority\n");
2047 	sched_set_fifo(ctlr->kworker->task);
2048 }
2049 
2050 static int spi_init_queue(struct spi_controller *ctlr)
2051 {
2052 	ctlr->running = false;
2053 	ctlr->busy = false;
2054 	ctlr->queue_empty = true;
2055 
2056 	ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
2057 	if (IS_ERR(ctlr->kworker)) {
2058 		dev_err(&ctlr->dev, "failed to create message pump kworker\n");
2059 		return PTR_ERR(ctlr->kworker);
2060 	}
2061 
2062 	kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
2063 
2064 	/*
2065 	 * Controller config will indicate if this controller should run the
2066 	 * message pump with high (realtime) priority to reduce the transfer
2067 	 * latency on the bus by minimising the delay between a transfer
2068 	 * request and the scheduling of the message pump thread. Without this
2069 	 * setting the message pump thread will remain at default priority.
2070 	 */
2071 	if (ctlr->rt)
2072 		spi_set_thread_rt(ctlr);
2073 
2074 	return 0;
2075 }
2076 
2077 /**
2078  * spi_get_next_queued_message() - called by driver to check for queued
2079  * messages
2080  * @ctlr: the controller to check for queued messages
2081  *
2082  * If there are more messages in the queue, the next message is returned from
2083  * this call.
2084  *
2085  * Return: the next message in the queue, else NULL if the queue is empty.
2086  */
2087 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
2088 {
2089 	struct spi_message *next;
2090 	unsigned long flags;
2091 
2092 	/* Get a pointer to the next message, if any */
2093 	spin_lock_irqsave(&ctlr->queue_lock, flags);
2094 	next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
2095 					queue);
2096 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2097 
2098 	return next;
2099 }
2100 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
2101 
2102 /*
2103  * __spi_unoptimize_message - shared implementation of spi_unoptimize_message()
2104  *                            and spi_maybe_unoptimize_message()
2105  * @msg: the message to unoptimize
2106  *
2107  * Peripheral drivers should use spi_unoptimize_message() and callers inside
2108  * core should use spi_maybe_unoptimize_message() rather than calling this
2109  * function directly.
2110  *
2111  * It is not valid to call this on a message that is not currently optimized.
2112  */
2113 static void __spi_unoptimize_message(struct spi_message *msg)
2114 {
2115 	struct spi_controller *ctlr = msg->spi->controller;
2116 
2117 	if (ctlr->unoptimize_message)
2118 		ctlr->unoptimize_message(msg);
2119 
2120 	spi_res_release(ctlr, msg);
2121 
2122 	msg->optimized = false;
2123 	msg->opt_state = NULL;
2124 }
2125 
2126 /*
2127  * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral
2128  * @msg: the message to unoptimize
2129  *
2130  * This function is used to unoptimize a message if and only if it was
2131  * optimized by the core (via spi_maybe_optimize_message()).
2132  */
2133 static void spi_maybe_unoptimize_message(struct spi_message *msg)
2134 {
2135 	if (!msg->pre_optimized && msg->optimized &&
2136 	    !msg->spi->controller->defer_optimize_message)
2137 		__spi_unoptimize_message(msg);
2138 }
2139 
2140 /**
2141  * spi_finalize_current_message() - the current message is complete
2142  * @ctlr: the controller to return the message to
2143  *
2144  * Called by the driver to notify the core that the message in the front of the
2145  * queue is complete and can be removed from the queue.
2146  */
2147 void spi_finalize_current_message(struct spi_controller *ctlr)
2148 {
2149 	struct spi_transfer *xfer;
2150 	struct spi_message *mesg;
2151 	int ret;
2152 
2153 	mesg = ctlr->cur_msg;
2154 
2155 	if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2156 		list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
2157 			ptp_read_system_postts(xfer->ptp_sts);
2158 			xfer->ptp_sts_word_post = xfer->len;
2159 		}
2160 	}
2161 
2162 	if (unlikely(ctlr->ptp_sts_supported))
2163 		list_for_each_entry(xfer, &mesg->transfers, transfer_list)
2164 			WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
2165 
2166 	spi_unmap_msg(ctlr, mesg);
2167 
2168 	if (mesg->prepared && ctlr->unprepare_message) {
2169 		ret = ctlr->unprepare_message(ctlr, mesg);
2170 		if (ret) {
2171 			dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2172 				ret);
2173 		}
2174 	}
2175 
2176 	mesg->prepared = false;
2177 
2178 	spi_maybe_unoptimize_message(mesg);
2179 
2180 	WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2181 	smp_mb(); /* See __spi_pump_transfer_message()... */
2182 	if (READ_ONCE(ctlr->cur_msg_need_completion))
2183 		complete(&ctlr->cur_msg_completion);
2184 
2185 	trace_spi_message_done(mesg);
2186 
2187 	mesg->state = NULL;
2188 	if (mesg->complete)
2189 		mesg->complete(mesg->context);
2190 }
2191 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
2192 
2193 static int spi_start_queue(struct spi_controller *ctlr)
2194 {
2195 	unsigned long flags;
2196 
2197 	spin_lock_irqsave(&ctlr->queue_lock, flags);
2198 
2199 	if (ctlr->running || ctlr->busy) {
2200 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2201 		return -EBUSY;
2202 	}
2203 
2204 	ctlr->running = true;
2205 	ctlr->cur_msg = NULL;
2206 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2207 
2208 	kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2209 
2210 	return 0;
2211 }
2212 
2213 static int spi_stop_queue(struct spi_controller *ctlr)
2214 {
2215 	unsigned int limit = 500;
2216 	unsigned long flags;
2217 
2218 	/*
2219 	 * This is a bit lame, but is optimized for the common execution path.
2220 	 * A wait_queue on the ctlr->busy could be used, but then the common
2221 	 * execution path (pump_messages) would be required to call wake_up or
2222 	 * friends on every SPI message. Do this instead.
2223 	 */
2224 	do {
2225 		spin_lock_irqsave(&ctlr->queue_lock, flags);
2226 		if (list_empty(&ctlr->queue) && !ctlr->busy) {
2227 			ctlr->running = false;
2228 			spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2229 			return 0;
2230 		}
2231 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2232 		usleep_range(10000, 11000);
2233 	} while (--limit);
2234 
2235 	return -EBUSY;
2236 }
2237 
2238 static int spi_destroy_queue(struct spi_controller *ctlr)
2239 {
2240 	int ret;
2241 
2242 	ret = spi_stop_queue(ctlr);
2243 
2244 	/*
2245 	 * kthread_flush_worker will block until all work is done.
2246 	 * If the reason that stop_queue timed out is that the work will never
2247 	 * finish, then it does no good to call flush/stop thread, so
2248 	 * return anyway.
2249 	 */
2250 	if (ret) {
2251 		dev_err(&ctlr->dev, "problem destroying queue\n");
2252 		return ret;
2253 	}
2254 
2255 	kthread_destroy_worker(ctlr->kworker);
2256 
2257 	return 0;
2258 }
2259 
2260 static int __spi_queued_transfer(struct spi_device *spi,
2261 				 struct spi_message *msg,
2262 				 bool need_pump)
2263 {
2264 	struct spi_controller *ctlr = spi->controller;
2265 	unsigned long flags;
2266 
2267 	spin_lock_irqsave(&ctlr->queue_lock, flags);
2268 
2269 	if (!ctlr->running) {
2270 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2271 		return -ESHUTDOWN;
2272 	}
2273 	msg->actual_length = 0;
2274 	msg->status = -EINPROGRESS;
2275 
2276 	list_add_tail(&msg->queue, &ctlr->queue);
2277 	ctlr->queue_empty = false;
2278 	if (!ctlr->busy && need_pump)
2279 		kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2280 
2281 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2282 	return 0;
2283 }
2284 
2285 /**
2286  * spi_queued_transfer - transfer function for queued transfers
2287  * @spi: SPI device which is requesting transfer
2288  * @msg: SPI message which is to handled is queued to driver queue
2289  *
2290  * Return: zero on success, else a negative error code.
2291  */
2292 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2293 {
2294 	return __spi_queued_transfer(spi, msg, true);
2295 }
2296 
2297 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2298 {
2299 	int ret;
2300 
2301 	ctlr->transfer = spi_queued_transfer;
2302 	if (!ctlr->transfer_one_message)
2303 		ctlr->transfer_one_message = spi_transfer_one_message;
2304 
2305 	/* Initialize and start queue */
2306 	ret = spi_init_queue(ctlr);
2307 	if (ret) {
2308 		dev_err(&ctlr->dev, "problem initializing queue\n");
2309 		goto err_init_queue;
2310 	}
2311 	ctlr->queued = true;
2312 	ret = spi_start_queue(ctlr);
2313 	if (ret) {
2314 		dev_err(&ctlr->dev, "problem starting queue\n");
2315 		goto err_start_queue;
2316 	}
2317 
2318 	return 0;
2319 
2320 err_start_queue:
2321 	spi_destroy_queue(ctlr);
2322 err_init_queue:
2323 	return ret;
2324 }
2325 
2326 /**
2327  * spi_flush_queue - Send all pending messages in the queue from the callers'
2328  *		     context
2329  * @ctlr: controller to process queue for
2330  *
2331  * This should be used when one wants to ensure all pending messages have been
2332  * sent before doing something. Is used by the spi-mem code to make sure SPI
2333  * memory operations do not preempt regular SPI transfers that have been queued
2334  * before the spi-mem operation.
2335  */
2336 void spi_flush_queue(struct spi_controller *ctlr)
2337 {
2338 	if (ctlr->transfer == spi_queued_transfer)
2339 		__spi_pump_messages(ctlr, false);
2340 }
2341 
2342 /*-------------------------------------------------------------------------*/
2343 
2344 #if defined(CONFIG_OF)
2345 static void of_spi_parse_dt_cs_delay(struct device_node *nc,
2346 				     struct spi_delay *delay, const char *prop)
2347 {
2348 	u32 value;
2349 
2350 	if (!of_property_read_u32(nc, prop, &value)) {
2351 		if (value > U16_MAX) {
2352 			delay->value = DIV_ROUND_UP(value, 1000);
2353 			delay->unit = SPI_DELAY_UNIT_USECS;
2354 		} else {
2355 			delay->value = value;
2356 			delay->unit = SPI_DELAY_UNIT_NSECS;
2357 		}
2358 	}
2359 }
2360 
2361 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2362 			   struct device_node *nc)
2363 {
2364 	u32 value, cs[SPI_CS_CNT_MAX];
2365 	int rc, idx;
2366 
2367 	/* Mode (clock phase/polarity/etc.) */
2368 	if (of_property_read_bool(nc, "spi-cpha"))
2369 		spi->mode |= SPI_CPHA;
2370 	if (of_property_read_bool(nc, "spi-cpol"))
2371 		spi->mode |= SPI_CPOL;
2372 	if (of_property_read_bool(nc, "spi-3wire"))
2373 		spi->mode |= SPI_3WIRE;
2374 	if (of_property_read_bool(nc, "spi-lsb-first"))
2375 		spi->mode |= SPI_LSB_FIRST;
2376 	if (of_property_read_bool(nc, "spi-cs-high"))
2377 		spi->mode |= SPI_CS_HIGH;
2378 
2379 	/* Device DUAL/QUAD mode */
2380 	if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2381 		switch (value) {
2382 		case 0:
2383 			spi->mode |= SPI_NO_TX;
2384 			break;
2385 		case 1:
2386 			break;
2387 		case 2:
2388 			spi->mode |= SPI_TX_DUAL;
2389 			break;
2390 		case 4:
2391 			spi->mode |= SPI_TX_QUAD;
2392 			break;
2393 		case 8:
2394 			spi->mode |= SPI_TX_OCTAL;
2395 			break;
2396 		default:
2397 			dev_warn(&ctlr->dev,
2398 				"spi-tx-bus-width %d not supported\n",
2399 				value);
2400 			break;
2401 		}
2402 	}
2403 
2404 	if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2405 		switch (value) {
2406 		case 0:
2407 			spi->mode |= SPI_NO_RX;
2408 			break;
2409 		case 1:
2410 			break;
2411 		case 2:
2412 			spi->mode |= SPI_RX_DUAL;
2413 			break;
2414 		case 4:
2415 			spi->mode |= SPI_RX_QUAD;
2416 			break;
2417 		case 8:
2418 			spi->mode |= SPI_RX_OCTAL;
2419 			break;
2420 		default:
2421 			dev_warn(&ctlr->dev,
2422 				"spi-rx-bus-width %d not supported\n",
2423 				value);
2424 			break;
2425 		}
2426 	}
2427 
2428 	if (spi_controller_is_target(ctlr)) {
2429 		if (!of_node_name_eq(nc, "slave")) {
2430 			dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2431 				nc);
2432 			return -EINVAL;
2433 		}
2434 		return 0;
2435 	}
2436 
2437 	if (ctlr->num_chipselect > SPI_CS_CNT_MAX) {
2438 		dev_err(&ctlr->dev, "No. of CS is more than max. no. of supported CS\n");
2439 		return -EINVAL;
2440 	}
2441 
2442 	spi_set_all_cs_unused(spi);
2443 
2444 	/* Device address */
2445 	rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1,
2446 						 SPI_CS_CNT_MAX);
2447 	if (rc < 0) {
2448 		dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2449 			nc, rc);
2450 		return rc;
2451 	}
2452 	if (rc > ctlr->num_chipselect) {
2453 		dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n",
2454 			nc, rc);
2455 		return rc;
2456 	}
2457 	if ((of_property_read_bool(nc, "parallel-memories")) &&
2458 	    (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) {
2459 		dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n");
2460 		return -EINVAL;
2461 	}
2462 	for (idx = 0; idx < rc; idx++)
2463 		spi_set_chipselect(spi, idx, cs[idx]);
2464 
2465 	/*
2466 	 * By default spi->chip_select[0] will hold the physical CS number,
2467 	 * so set bit 0 in spi->cs_index_mask.
2468 	 */
2469 	spi->cs_index_mask = BIT(0);
2470 
2471 	/* Device speed */
2472 	if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2473 		spi->max_speed_hz = value;
2474 
2475 	/* Device CS delays */
2476 	of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
2477 	of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
2478 	of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
2479 
2480 	return 0;
2481 }
2482 
2483 static struct spi_device *
2484 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2485 {
2486 	struct spi_device *spi;
2487 	int rc;
2488 
2489 	/* Alloc an spi_device */
2490 	spi = spi_alloc_device(ctlr);
2491 	if (!spi) {
2492 		dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2493 		rc = -ENOMEM;
2494 		goto err_out;
2495 	}
2496 
2497 	/* Select device driver */
2498 	rc = of_alias_from_compatible(nc, spi->modalias,
2499 				      sizeof(spi->modalias));
2500 	if (rc < 0) {
2501 		dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2502 		goto err_out;
2503 	}
2504 
2505 	rc = of_spi_parse_dt(ctlr, spi, nc);
2506 	if (rc)
2507 		goto err_out;
2508 
2509 	/* Store a pointer to the node in the device structure */
2510 	of_node_get(nc);
2511 
2512 	device_set_node(&spi->dev, of_fwnode_handle(nc));
2513 
2514 	/* Register the new device */
2515 	rc = spi_add_device(spi);
2516 	if (rc) {
2517 		dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2518 		goto err_of_node_put;
2519 	}
2520 
2521 	return spi;
2522 
2523 err_of_node_put:
2524 	of_node_put(nc);
2525 err_out:
2526 	spi_dev_put(spi);
2527 	return ERR_PTR(rc);
2528 }
2529 
2530 /**
2531  * of_register_spi_devices() - Register child devices onto the SPI bus
2532  * @ctlr:	Pointer to spi_controller device
2533  *
2534  * Registers an spi_device for each child node of controller node which
2535  * represents a valid SPI slave.
2536  */
2537 static void of_register_spi_devices(struct spi_controller *ctlr)
2538 {
2539 	struct spi_device *spi;
2540 	struct device_node *nc;
2541 
2542 	for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2543 		if (of_node_test_and_set_flag(nc, OF_POPULATED))
2544 			continue;
2545 		spi = of_register_spi_device(ctlr, nc);
2546 		if (IS_ERR(spi)) {
2547 			dev_warn(&ctlr->dev,
2548 				 "Failed to create SPI device for %pOF\n", nc);
2549 			of_node_clear_flag(nc, OF_POPULATED);
2550 		}
2551 	}
2552 }
2553 #else
2554 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2555 #endif
2556 
2557 /**
2558  * spi_new_ancillary_device() - Register ancillary SPI device
2559  * @spi:         Pointer to the main SPI device registering the ancillary device
2560  * @chip_select: Chip Select of the ancillary device
2561  *
2562  * Register an ancillary SPI device; for example some chips have a chip-select
2563  * for normal device usage and another one for setup/firmware upload.
2564  *
2565  * This may only be called from main SPI device's probe routine.
2566  *
2567  * Return: 0 on success; negative errno on failure
2568  */
2569 struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2570 					     u8 chip_select)
2571 {
2572 	struct spi_controller *ctlr = spi->controller;
2573 	struct spi_device *ancillary;
2574 	int rc;
2575 
2576 	/* Alloc an spi_device */
2577 	ancillary = spi_alloc_device(ctlr);
2578 	if (!ancillary) {
2579 		rc = -ENOMEM;
2580 		goto err_out;
2581 	}
2582 
2583 	strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2584 
2585 	/* Use provided chip-select for ancillary device */
2586 	spi_set_all_cs_unused(ancillary);
2587 	spi_set_chipselect(ancillary, 0, chip_select);
2588 
2589 	/* Take over SPI mode/speed from SPI main device */
2590 	ancillary->max_speed_hz = spi->max_speed_hz;
2591 	ancillary->mode = spi->mode;
2592 	/*
2593 	 * By default spi->chip_select[0] will hold the physical CS number,
2594 	 * so set bit 0 in spi->cs_index_mask.
2595 	 */
2596 	ancillary->cs_index_mask = BIT(0);
2597 
2598 	WARN_ON(!mutex_is_locked(&ctlr->add_lock));
2599 
2600 	/* Register the new device */
2601 	rc = __spi_add_device(ancillary);
2602 	if (rc) {
2603 		dev_err(&spi->dev, "failed to register ancillary device\n");
2604 		goto err_out;
2605 	}
2606 
2607 	return ancillary;
2608 
2609 err_out:
2610 	spi_dev_put(ancillary);
2611 	return ERR_PTR(rc);
2612 }
2613 EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2614 
2615 #ifdef CONFIG_ACPI
2616 struct acpi_spi_lookup {
2617 	struct spi_controller 	*ctlr;
2618 	u32			max_speed_hz;
2619 	u32			mode;
2620 	int			irq;
2621 	u8			bits_per_word;
2622 	u8			chip_select;
2623 	int			n;
2624 	int			index;
2625 };
2626 
2627 static int acpi_spi_count(struct acpi_resource *ares, void *data)
2628 {
2629 	struct acpi_resource_spi_serialbus *sb;
2630 	int *count = data;
2631 
2632 	if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2633 		return 1;
2634 
2635 	sb = &ares->data.spi_serial_bus;
2636 	if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2637 		return 1;
2638 
2639 	*count = *count + 1;
2640 
2641 	return 1;
2642 }
2643 
2644 /**
2645  * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2646  * @adev:	ACPI device
2647  *
2648  * Return: the number of SpiSerialBus resources in the ACPI-device's
2649  * resource-list; or a negative error code.
2650  */
2651 int acpi_spi_count_resources(struct acpi_device *adev)
2652 {
2653 	LIST_HEAD(r);
2654 	int count = 0;
2655 	int ret;
2656 
2657 	ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2658 	if (ret < 0)
2659 		return ret;
2660 
2661 	acpi_dev_free_resource_list(&r);
2662 
2663 	return count;
2664 }
2665 EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2666 
2667 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2668 					    struct acpi_spi_lookup *lookup)
2669 {
2670 	const union acpi_object *obj;
2671 
2672 	if (!x86_apple_machine)
2673 		return;
2674 
2675 	if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2676 	    && obj->buffer.length >= 4)
2677 		lookup->max_speed_hz  = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2678 
2679 	if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2680 	    && obj->buffer.length == 8)
2681 		lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2682 
2683 	if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2684 	    && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2685 		lookup->mode |= SPI_LSB_FIRST;
2686 
2687 	if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2688 	    && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2689 		lookup->mode |= SPI_CPOL;
2690 
2691 	if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2692 	    && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2693 		lookup->mode |= SPI_CPHA;
2694 }
2695 
2696 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2697 {
2698 	struct acpi_spi_lookup *lookup = data;
2699 	struct spi_controller *ctlr = lookup->ctlr;
2700 
2701 	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2702 		struct acpi_resource_spi_serialbus *sb;
2703 		acpi_handle parent_handle;
2704 		acpi_status status;
2705 
2706 		sb = &ares->data.spi_serial_bus;
2707 		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2708 
2709 			if (lookup->index != -1 && lookup->n++ != lookup->index)
2710 				return 1;
2711 
2712 			status = acpi_get_handle(NULL,
2713 						 sb->resource_source.string_ptr,
2714 						 &parent_handle);
2715 
2716 			if (ACPI_FAILURE(status))
2717 				return -ENODEV;
2718 
2719 			if (ctlr) {
2720 				if (!device_match_acpi_handle(ctlr->dev.parent, parent_handle))
2721 					return -ENODEV;
2722 			} else {
2723 				struct acpi_device *adev;
2724 
2725 				adev = acpi_fetch_acpi_dev(parent_handle);
2726 				if (!adev)
2727 					return -ENODEV;
2728 
2729 				ctlr = acpi_spi_find_controller_by_adev(adev);
2730 				if (!ctlr)
2731 					return -EPROBE_DEFER;
2732 
2733 				lookup->ctlr = ctlr;
2734 			}
2735 
2736 			/*
2737 			 * ACPI DeviceSelection numbering is handled by the
2738 			 * host controller driver in Windows and can vary
2739 			 * from driver to driver. In Linux we always expect
2740 			 * 0 .. max - 1 so we need to ask the driver to
2741 			 * translate between the two schemes.
2742 			 */
2743 			if (ctlr->fw_translate_cs) {
2744 				int cs = ctlr->fw_translate_cs(ctlr,
2745 						sb->device_selection);
2746 				if (cs < 0)
2747 					return cs;
2748 				lookup->chip_select = cs;
2749 			} else {
2750 				lookup->chip_select = sb->device_selection;
2751 			}
2752 
2753 			lookup->max_speed_hz = sb->connection_speed;
2754 			lookup->bits_per_word = sb->data_bit_length;
2755 
2756 			if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2757 				lookup->mode |= SPI_CPHA;
2758 			if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2759 				lookup->mode |= SPI_CPOL;
2760 			if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2761 				lookup->mode |= SPI_CS_HIGH;
2762 		}
2763 	} else if (lookup->irq < 0) {
2764 		struct resource r;
2765 
2766 		if (acpi_dev_resource_interrupt(ares, 0, &r))
2767 			lookup->irq = r.start;
2768 	}
2769 
2770 	/* Always tell the ACPI core to skip this resource */
2771 	return 1;
2772 }
2773 
2774 /**
2775  * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2776  * @ctlr: controller to which the spi device belongs
2777  * @adev: ACPI Device for the spi device
2778  * @index: Index of the spi resource inside the ACPI Node
2779  *
2780  * This should be used to allocate a new SPI device from and ACPI Device node.
2781  * The caller is responsible for calling spi_add_device to register the SPI device.
2782  *
2783  * If ctlr is set to NULL, the Controller for the SPI device will be looked up
2784  * using the resource.
2785  * If index is set to -1, index is not used.
2786  * Note: If index is -1, ctlr must be set.
2787  *
2788  * Return: a pointer to the new device, or ERR_PTR on error.
2789  */
2790 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2791 					 struct acpi_device *adev,
2792 					 int index)
2793 {
2794 	acpi_handle parent_handle = NULL;
2795 	struct list_head resource_list;
2796 	struct acpi_spi_lookup lookup = {};
2797 	struct spi_device *spi;
2798 	int ret;
2799 
2800 	if (!ctlr && index == -1)
2801 		return ERR_PTR(-EINVAL);
2802 
2803 	lookup.ctlr		= ctlr;
2804 	lookup.irq		= -1;
2805 	lookup.index		= index;
2806 	lookup.n		= 0;
2807 
2808 	INIT_LIST_HEAD(&resource_list);
2809 	ret = acpi_dev_get_resources(adev, &resource_list,
2810 				     acpi_spi_add_resource, &lookup);
2811 	acpi_dev_free_resource_list(&resource_list);
2812 
2813 	if (ret < 0)
2814 		/* Found SPI in _CRS but it points to another controller */
2815 		return ERR_PTR(ret);
2816 
2817 	if (!lookup.max_speed_hz &&
2818 	    ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2819 	    device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) {
2820 		/* Apple does not use _CRS but nested devices for SPI slaves */
2821 		acpi_spi_parse_apple_properties(adev, &lookup);
2822 	}
2823 
2824 	if (!lookup.max_speed_hz)
2825 		return ERR_PTR(-ENODEV);
2826 
2827 	spi = spi_alloc_device(lookup.ctlr);
2828 	if (!spi) {
2829 		dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2830 			dev_name(&adev->dev));
2831 		return ERR_PTR(-ENOMEM);
2832 	}
2833 
2834 	spi_set_all_cs_unused(spi);
2835 	spi_set_chipselect(spi, 0, lookup.chip_select);
2836 
2837 	ACPI_COMPANION_SET(&spi->dev, adev);
2838 	spi->max_speed_hz	= lookup.max_speed_hz;
2839 	spi->mode		|= lookup.mode;
2840 	spi->irq		= lookup.irq;
2841 	spi->bits_per_word	= lookup.bits_per_word;
2842 	/*
2843 	 * By default spi->chip_select[0] will hold the physical CS number,
2844 	 * so set bit 0 in spi->cs_index_mask.
2845 	 */
2846 	spi->cs_index_mask	= BIT(0);
2847 
2848 	return spi;
2849 }
2850 EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
2851 
2852 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2853 					    struct acpi_device *adev)
2854 {
2855 	struct spi_device *spi;
2856 
2857 	if (acpi_bus_get_status(adev) || !adev->status.present ||
2858 	    acpi_device_enumerated(adev))
2859 		return AE_OK;
2860 
2861 	spi = acpi_spi_device_alloc(ctlr, adev, -1);
2862 	if (IS_ERR(spi)) {
2863 		if (PTR_ERR(spi) == -ENOMEM)
2864 			return AE_NO_MEMORY;
2865 		else
2866 			return AE_OK;
2867 	}
2868 
2869 	acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2870 			  sizeof(spi->modalias));
2871 
2872 	if (spi->irq < 0)
2873 		spi->irq = acpi_dev_gpio_irq_get(adev, 0);
2874 
2875 	acpi_device_set_enumerated(adev);
2876 
2877 	adev->power.flags.ignore_parent = true;
2878 	if (spi_add_device(spi)) {
2879 		adev->power.flags.ignore_parent = false;
2880 		dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2881 			dev_name(&adev->dev));
2882 		spi_dev_put(spi);
2883 	}
2884 
2885 	return AE_OK;
2886 }
2887 
2888 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2889 				       void *data, void **return_value)
2890 {
2891 	struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
2892 	struct spi_controller *ctlr = data;
2893 
2894 	if (!adev)
2895 		return AE_OK;
2896 
2897 	return acpi_register_spi_device(ctlr, adev);
2898 }
2899 
2900 #define SPI_ACPI_ENUMERATE_MAX_DEPTH		32
2901 
2902 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2903 {
2904 	acpi_status status;
2905 	acpi_handle handle;
2906 
2907 	handle = ACPI_HANDLE(ctlr->dev.parent);
2908 	if (!handle)
2909 		return;
2910 
2911 	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2912 				     SPI_ACPI_ENUMERATE_MAX_DEPTH,
2913 				     acpi_spi_add_device, NULL, ctlr, NULL);
2914 	if (ACPI_FAILURE(status))
2915 		dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
2916 }
2917 #else
2918 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2919 #endif /* CONFIG_ACPI */
2920 
2921 static void spi_controller_release(struct device *dev)
2922 {
2923 	struct spi_controller *ctlr;
2924 
2925 	ctlr = container_of(dev, struct spi_controller, dev);
2926 	kfree(ctlr);
2927 }
2928 
2929 static struct class spi_master_class = {
2930 	.name		= "spi_master",
2931 	.dev_release	= spi_controller_release,
2932 	.dev_groups	= spi_master_groups,
2933 };
2934 
2935 #ifdef CONFIG_SPI_SLAVE
2936 /**
2937  * spi_slave_abort - abort the ongoing transfer request on an SPI slave
2938  *		     controller
2939  * @spi: device used for the current transfer
2940  */
2941 int spi_slave_abort(struct spi_device *spi)
2942 {
2943 	struct spi_controller *ctlr = spi->controller;
2944 
2945 	if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
2946 		return ctlr->slave_abort(ctlr);
2947 
2948 	return -ENOTSUPP;
2949 }
2950 EXPORT_SYMBOL_GPL(spi_slave_abort);
2951 
2952 int spi_target_abort(struct spi_device *spi)
2953 {
2954 	struct spi_controller *ctlr = spi->controller;
2955 
2956 	if (spi_controller_is_target(ctlr) && ctlr->target_abort)
2957 		return ctlr->target_abort(ctlr);
2958 
2959 	return -ENOTSUPP;
2960 }
2961 EXPORT_SYMBOL_GPL(spi_target_abort);
2962 
2963 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2964 			  char *buf)
2965 {
2966 	struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2967 						   dev);
2968 	struct device *child;
2969 
2970 	child = device_find_any_child(&ctlr->dev);
2971 	return sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
2972 }
2973 
2974 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2975 			   const char *buf, size_t count)
2976 {
2977 	struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2978 						   dev);
2979 	struct spi_device *spi;
2980 	struct device *child;
2981 	char name[32];
2982 	int rc;
2983 
2984 	rc = sscanf(buf, "%31s", name);
2985 	if (rc != 1 || !name[0])
2986 		return -EINVAL;
2987 
2988 	child = device_find_any_child(&ctlr->dev);
2989 	if (child) {
2990 		/* Remove registered slave */
2991 		device_unregister(child);
2992 		put_device(child);
2993 	}
2994 
2995 	if (strcmp(name, "(null)")) {
2996 		/* Register new slave */
2997 		spi = spi_alloc_device(ctlr);
2998 		if (!spi)
2999 			return -ENOMEM;
3000 
3001 		strscpy(spi->modalias, name, sizeof(spi->modalias));
3002 
3003 		rc = spi_add_device(spi);
3004 		if (rc) {
3005 			spi_dev_put(spi);
3006 			return rc;
3007 		}
3008 	}
3009 
3010 	return count;
3011 }
3012 
3013 static DEVICE_ATTR_RW(slave);
3014 
3015 static struct attribute *spi_slave_attrs[] = {
3016 	&dev_attr_slave.attr,
3017 	NULL,
3018 };
3019 
3020 static const struct attribute_group spi_slave_group = {
3021 	.attrs = spi_slave_attrs,
3022 };
3023 
3024 static const struct attribute_group *spi_slave_groups[] = {
3025 	&spi_controller_statistics_group,
3026 	&spi_slave_group,
3027 	NULL,
3028 };
3029 
3030 static struct class spi_slave_class = {
3031 	.name		= "spi_slave",
3032 	.dev_release	= spi_controller_release,
3033 	.dev_groups	= spi_slave_groups,
3034 };
3035 #else
3036 extern struct class spi_slave_class;	/* dummy */
3037 #endif
3038 
3039 /**
3040  * __spi_alloc_controller - allocate an SPI master or slave controller
3041  * @dev: the controller, possibly using the platform_bus
3042  * @size: how much zeroed driver-private data to allocate; the pointer to this
3043  *	memory is in the driver_data field of the returned device, accessible
3044  *	with spi_controller_get_devdata(); the memory is cacheline aligned;
3045  *	drivers granting DMA access to portions of their private data need to
3046  *	round up @size using ALIGN(size, dma_get_cache_alignment()).
3047  * @slave: flag indicating whether to allocate an SPI master (false) or SPI
3048  *	slave (true) controller
3049  * Context: can sleep
3050  *
3051  * This call is used only by SPI controller drivers, which are the
3052  * only ones directly touching chip registers.  It's how they allocate
3053  * an spi_controller structure, prior to calling spi_register_controller().
3054  *
3055  * This must be called from context that can sleep.
3056  *
3057  * The caller is responsible for assigning the bus number and initializing the
3058  * controller's methods before calling spi_register_controller(); and (after
3059  * errors adding the device) calling spi_controller_put() to prevent a memory
3060  * leak.
3061  *
3062  * Return: the SPI controller structure on success, else NULL.
3063  */
3064 struct spi_controller *__spi_alloc_controller(struct device *dev,
3065 					      unsigned int size, bool slave)
3066 {
3067 	struct spi_controller	*ctlr;
3068 	size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
3069 
3070 	if (!dev)
3071 		return NULL;
3072 
3073 	ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
3074 	if (!ctlr)
3075 		return NULL;
3076 
3077 	device_initialize(&ctlr->dev);
3078 	INIT_LIST_HEAD(&ctlr->queue);
3079 	spin_lock_init(&ctlr->queue_lock);
3080 	spin_lock_init(&ctlr->bus_lock_spinlock);
3081 	mutex_init(&ctlr->bus_lock_mutex);
3082 	mutex_init(&ctlr->io_mutex);
3083 	mutex_init(&ctlr->add_lock);
3084 	ctlr->bus_num = -1;
3085 	ctlr->num_chipselect = 1;
3086 	ctlr->slave = slave;
3087 	if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
3088 		ctlr->dev.class = &spi_slave_class;
3089 	else
3090 		ctlr->dev.class = &spi_master_class;
3091 	ctlr->dev.parent = dev;
3092 	pm_suspend_ignore_children(&ctlr->dev, true);
3093 	spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
3094 
3095 	return ctlr;
3096 }
3097 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
3098 
3099 static void devm_spi_release_controller(struct device *dev, void *ctlr)
3100 {
3101 	spi_controller_put(*(struct spi_controller **)ctlr);
3102 }
3103 
3104 /**
3105  * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
3106  * @dev: physical device of SPI controller
3107  * @size: how much zeroed driver-private data to allocate
3108  * @slave: whether to allocate an SPI master (false) or SPI slave (true)
3109  * Context: can sleep
3110  *
3111  * Allocate an SPI controller and automatically release a reference on it
3112  * when @dev is unbound from its driver.  Drivers are thus relieved from
3113  * having to call spi_controller_put().
3114  *
3115  * The arguments to this function are identical to __spi_alloc_controller().
3116  *
3117  * Return: the SPI controller structure on success, else NULL.
3118  */
3119 struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
3120 						   unsigned int size,
3121 						   bool slave)
3122 {
3123 	struct spi_controller **ptr, *ctlr;
3124 
3125 	ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
3126 			   GFP_KERNEL);
3127 	if (!ptr)
3128 		return NULL;
3129 
3130 	ctlr = __spi_alloc_controller(dev, size, slave);
3131 	if (ctlr) {
3132 		ctlr->devm_allocated = true;
3133 		*ptr = ctlr;
3134 		devres_add(dev, ptr);
3135 	} else {
3136 		devres_free(ptr);
3137 	}
3138 
3139 	return ctlr;
3140 }
3141 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
3142 
3143 /**
3144  * spi_get_gpio_descs() - grab chip select GPIOs for the master
3145  * @ctlr: The SPI master to grab GPIO descriptors for
3146  */
3147 static int spi_get_gpio_descs(struct spi_controller *ctlr)
3148 {
3149 	int nb, i;
3150 	struct gpio_desc **cs;
3151 	struct device *dev = &ctlr->dev;
3152 	unsigned long native_cs_mask = 0;
3153 	unsigned int num_cs_gpios = 0;
3154 
3155 	nb = gpiod_count(dev, "cs");
3156 	if (nb < 0) {
3157 		/* No GPIOs at all is fine, else return the error */
3158 		if (nb == -ENOENT)
3159 			return 0;
3160 		return nb;
3161 	}
3162 
3163 	ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
3164 
3165 	cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
3166 			  GFP_KERNEL);
3167 	if (!cs)
3168 		return -ENOMEM;
3169 	ctlr->cs_gpiods = cs;
3170 
3171 	for (i = 0; i < nb; i++) {
3172 		/*
3173 		 * Most chipselects are active low, the inverted
3174 		 * semantics are handled by special quirks in gpiolib,
3175 		 * so initializing them GPIOD_OUT_LOW here means
3176 		 * "unasserted", in most cases this will drive the physical
3177 		 * line high.
3178 		 */
3179 		cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
3180 						      GPIOD_OUT_LOW);
3181 		if (IS_ERR(cs[i]))
3182 			return PTR_ERR(cs[i]);
3183 
3184 		if (cs[i]) {
3185 			/*
3186 			 * If we find a CS GPIO, name it after the device and
3187 			 * chip select line.
3188 			 */
3189 			char *gpioname;
3190 
3191 			gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
3192 						  dev_name(dev), i);
3193 			if (!gpioname)
3194 				return -ENOMEM;
3195 			gpiod_set_consumer_name(cs[i], gpioname);
3196 			num_cs_gpios++;
3197 			continue;
3198 		}
3199 
3200 		if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3201 			dev_err(dev, "Invalid native chip select %d\n", i);
3202 			return -EINVAL;
3203 		}
3204 		native_cs_mask |= BIT(i);
3205 	}
3206 
3207 	ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3208 
3209 	if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios &&
3210 	    ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3211 		dev_err(dev, "No unused native chip select available\n");
3212 		return -EINVAL;
3213 	}
3214 
3215 	return 0;
3216 }
3217 
3218 static int spi_controller_check_ops(struct spi_controller *ctlr)
3219 {
3220 	/*
3221 	 * The controller may implement only the high-level SPI-memory like
3222 	 * operations if it does not support regular SPI transfers, and this is
3223 	 * valid use case.
3224 	 * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least
3225 	 * one of the ->transfer_xxx() method be implemented.
3226 	 */
3227 	if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
3228 		if (!ctlr->transfer && !ctlr->transfer_one &&
3229 		   !ctlr->transfer_one_message) {
3230 			return -EINVAL;
3231 		}
3232 	}
3233 
3234 	return 0;
3235 }
3236 
3237 /* Allocate dynamic bus number using Linux idr */
3238 static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end)
3239 {
3240 	int id;
3241 
3242 	mutex_lock(&board_lock);
3243 	id = idr_alloc(&spi_master_idr, ctlr, start, end, GFP_KERNEL);
3244 	mutex_unlock(&board_lock);
3245 	if (WARN(id < 0, "couldn't get idr"))
3246 		return id == -ENOSPC ? -EBUSY : id;
3247 	ctlr->bus_num = id;
3248 	return 0;
3249 }
3250 
3251 /**
3252  * spi_register_controller - register SPI master or slave controller
3253  * @ctlr: initialized master, originally from spi_alloc_master() or
3254  *	spi_alloc_slave()
3255  * Context: can sleep
3256  *
3257  * SPI controllers connect to their drivers using some non-SPI bus,
3258  * such as the platform bus.  The final stage of probe() in that code
3259  * includes calling spi_register_controller() to hook up to this SPI bus glue.
3260  *
3261  * SPI controllers use board specific (often SOC specific) bus numbers,
3262  * and board-specific addressing for SPI devices combines those numbers
3263  * with chip select numbers.  Since SPI does not directly support dynamic
3264  * device identification, boards need configuration tables telling which
3265  * chip is at which address.
3266  *
3267  * This must be called from context that can sleep.  It returns zero on
3268  * success, else a negative error code (dropping the controller's refcount).
3269  * After a successful return, the caller is responsible for calling
3270  * spi_unregister_controller().
3271  *
3272  * Return: zero on success, else a negative error code.
3273  */
3274 int spi_register_controller(struct spi_controller *ctlr)
3275 {
3276 	struct device		*dev = ctlr->dev.parent;
3277 	struct boardinfo	*bi;
3278 	int			first_dynamic;
3279 	int			status;
3280 	int			idx;
3281 
3282 	if (!dev)
3283 		return -ENODEV;
3284 
3285 	/*
3286 	 * Make sure all necessary hooks are implemented before registering
3287 	 * the SPI controller.
3288 	 */
3289 	status = spi_controller_check_ops(ctlr);
3290 	if (status)
3291 		return status;
3292 
3293 	if (ctlr->bus_num < 0)
3294 		ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
3295 	if (ctlr->bus_num >= 0) {
3296 		/* Devices with a fixed bus num must check-in with the num */
3297 		status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1);
3298 		if (status)
3299 			return status;
3300 	}
3301 	if (ctlr->bus_num < 0) {
3302 		first_dynamic = of_alias_get_highest_id("spi");
3303 		if (first_dynamic < 0)
3304 			first_dynamic = 0;
3305 		else
3306 			first_dynamic++;
3307 
3308 		status = spi_controller_id_alloc(ctlr, first_dynamic, 0);
3309 		if (status)
3310 			return status;
3311 	}
3312 	ctlr->bus_lock_flag = 0;
3313 	init_completion(&ctlr->xfer_completion);
3314 	init_completion(&ctlr->cur_msg_completion);
3315 	if (!ctlr->max_dma_len)
3316 		ctlr->max_dma_len = INT_MAX;
3317 
3318 	/*
3319 	 * Register the device, then userspace will see it.
3320 	 * Registration fails if the bus ID is in use.
3321 	 */
3322 	dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3323 
3324 	if (!spi_controller_is_target(ctlr) && ctlr->use_gpio_descriptors) {
3325 		status = spi_get_gpio_descs(ctlr);
3326 		if (status)
3327 			goto free_bus_id;
3328 		/*
3329 		 * A controller using GPIO descriptors always
3330 		 * supports SPI_CS_HIGH if need be.
3331 		 */
3332 		ctlr->mode_bits |= SPI_CS_HIGH;
3333 	}
3334 
3335 	/*
3336 	 * Even if it's just one always-selected device, there must
3337 	 * be at least one chipselect.
3338 	 */
3339 	if (!ctlr->num_chipselect) {
3340 		status = -EINVAL;
3341 		goto free_bus_id;
3342 	}
3343 
3344 	/* Setting last_cs to SPI_INVALID_CS means no chip selected */
3345 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
3346 		ctlr->last_cs[idx] = SPI_INVALID_CS;
3347 
3348 	status = device_add(&ctlr->dev);
3349 	if (status < 0)
3350 		goto free_bus_id;
3351 	dev_dbg(dev, "registered %s %s\n",
3352 			spi_controller_is_target(ctlr) ? "target" : "host",
3353 			dev_name(&ctlr->dev));
3354 
3355 	/*
3356 	 * If we're using a queued driver, start the queue. Note that we don't
3357 	 * need the queueing logic if the driver is only supporting high-level
3358 	 * memory operations.
3359 	 */
3360 	if (ctlr->transfer) {
3361 		dev_info(dev, "controller is unqueued, this is deprecated\n");
3362 	} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3363 		status = spi_controller_initialize_queue(ctlr);
3364 		if (status) {
3365 			device_del(&ctlr->dev);
3366 			goto free_bus_id;
3367 		}
3368 	}
3369 	/* Add statistics */
3370 	ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3371 	if (!ctlr->pcpu_statistics) {
3372 		dev_err(dev, "Error allocating per-cpu statistics\n");
3373 		status = -ENOMEM;
3374 		goto destroy_queue;
3375 	}
3376 
3377 	mutex_lock(&board_lock);
3378 	list_add_tail(&ctlr->list, &spi_controller_list);
3379 	list_for_each_entry(bi, &board_list, list)
3380 		spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3381 	mutex_unlock(&board_lock);
3382 
3383 	/* Register devices from the device tree and ACPI */
3384 	of_register_spi_devices(ctlr);
3385 	acpi_register_spi_devices(ctlr);
3386 	return status;
3387 
3388 destroy_queue:
3389 	spi_destroy_queue(ctlr);
3390 free_bus_id:
3391 	mutex_lock(&board_lock);
3392 	idr_remove(&spi_master_idr, ctlr->bus_num);
3393 	mutex_unlock(&board_lock);
3394 	return status;
3395 }
3396 EXPORT_SYMBOL_GPL(spi_register_controller);
3397 
3398 static void devm_spi_unregister(struct device *dev, void *res)
3399 {
3400 	spi_unregister_controller(*(struct spi_controller **)res);
3401 }
3402 
3403 /**
3404  * devm_spi_register_controller - register managed SPI master or slave
3405  *	controller
3406  * @dev:    device managing SPI controller
3407  * @ctlr: initialized controller, originally from spi_alloc_master() or
3408  *	spi_alloc_slave()
3409  * Context: can sleep
3410  *
3411  * Register a SPI device as with spi_register_controller() which will
3412  * automatically be unregistered and freed.
3413  *
3414  * Return: zero on success, else a negative error code.
3415  */
3416 int devm_spi_register_controller(struct device *dev,
3417 				 struct spi_controller *ctlr)
3418 {
3419 	struct spi_controller **ptr;
3420 	int ret;
3421 
3422 	ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
3423 	if (!ptr)
3424 		return -ENOMEM;
3425 
3426 	ret = spi_register_controller(ctlr);
3427 	if (!ret) {
3428 		*ptr = ctlr;
3429 		devres_add(dev, ptr);
3430 	} else {
3431 		devres_free(ptr);
3432 	}
3433 
3434 	return ret;
3435 }
3436 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3437 
3438 static int __unregister(struct device *dev, void *null)
3439 {
3440 	spi_unregister_device(to_spi_device(dev));
3441 	return 0;
3442 }
3443 
3444 /**
3445  * spi_unregister_controller - unregister SPI master or slave controller
3446  * @ctlr: the controller being unregistered
3447  * Context: can sleep
3448  *
3449  * This call is used only by SPI controller drivers, which are the
3450  * only ones directly touching chip registers.
3451  *
3452  * This must be called from context that can sleep.
3453  *
3454  * Note that this function also drops a reference to the controller.
3455  */
3456 void spi_unregister_controller(struct spi_controller *ctlr)
3457 {
3458 	struct spi_controller *found;
3459 	int id = ctlr->bus_num;
3460 
3461 	/* Prevent addition of new devices, unregister existing ones */
3462 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3463 		mutex_lock(&ctlr->add_lock);
3464 
3465 	device_for_each_child(&ctlr->dev, NULL, __unregister);
3466 
3467 	/* First make sure that this controller was ever added */
3468 	mutex_lock(&board_lock);
3469 	found = idr_find(&spi_master_idr, id);
3470 	mutex_unlock(&board_lock);
3471 	if (ctlr->queued) {
3472 		if (spi_destroy_queue(ctlr))
3473 			dev_err(&ctlr->dev, "queue remove failed\n");
3474 	}
3475 	mutex_lock(&board_lock);
3476 	list_del(&ctlr->list);
3477 	mutex_unlock(&board_lock);
3478 
3479 	device_del(&ctlr->dev);
3480 
3481 	/* Free bus id */
3482 	mutex_lock(&board_lock);
3483 	if (found == ctlr)
3484 		idr_remove(&spi_master_idr, id);
3485 	mutex_unlock(&board_lock);
3486 
3487 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3488 		mutex_unlock(&ctlr->add_lock);
3489 
3490 	/*
3491 	 * Release the last reference on the controller if its driver
3492 	 * has not yet been converted to devm_spi_alloc_master/slave().
3493 	 */
3494 	if (!ctlr->devm_allocated)
3495 		put_device(&ctlr->dev);
3496 }
3497 EXPORT_SYMBOL_GPL(spi_unregister_controller);
3498 
3499 static inline int __spi_check_suspended(const struct spi_controller *ctlr)
3500 {
3501 	return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
3502 }
3503 
3504 static inline void __spi_mark_suspended(struct spi_controller *ctlr)
3505 {
3506 	mutex_lock(&ctlr->bus_lock_mutex);
3507 	ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
3508 	mutex_unlock(&ctlr->bus_lock_mutex);
3509 }
3510 
3511 static inline void __spi_mark_resumed(struct spi_controller *ctlr)
3512 {
3513 	mutex_lock(&ctlr->bus_lock_mutex);
3514 	ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
3515 	mutex_unlock(&ctlr->bus_lock_mutex);
3516 }
3517 
3518 int spi_controller_suspend(struct spi_controller *ctlr)
3519 {
3520 	int ret = 0;
3521 
3522 	/* Basically no-ops for non-queued controllers */
3523 	if (ctlr->queued) {
3524 		ret = spi_stop_queue(ctlr);
3525 		if (ret)
3526 			dev_err(&ctlr->dev, "queue stop failed\n");
3527 	}
3528 
3529 	__spi_mark_suspended(ctlr);
3530 	return ret;
3531 }
3532 EXPORT_SYMBOL_GPL(spi_controller_suspend);
3533 
3534 int spi_controller_resume(struct spi_controller *ctlr)
3535 {
3536 	int ret = 0;
3537 
3538 	__spi_mark_resumed(ctlr);
3539 
3540 	if (ctlr->queued) {
3541 		ret = spi_start_queue(ctlr);
3542 		if (ret)
3543 			dev_err(&ctlr->dev, "queue restart failed\n");
3544 	}
3545 	return ret;
3546 }
3547 EXPORT_SYMBOL_GPL(spi_controller_resume);
3548 
3549 /*-------------------------------------------------------------------------*/
3550 
3551 /* Core methods for spi_message alterations */
3552 
3553 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3554 					    struct spi_message *msg,
3555 					    void *res)
3556 {
3557 	struct spi_replaced_transfers *rxfer = res;
3558 	size_t i;
3559 
3560 	/* Call extra callback if requested */
3561 	if (rxfer->release)
3562 		rxfer->release(ctlr, msg, res);
3563 
3564 	/* Insert replaced transfers back into the message */
3565 	list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3566 
3567 	/* Remove the formerly inserted entries */
3568 	for (i = 0; i < rxfer->inserted; i++)
3569 		list_del(&rxfer->inserted_transfers[i].transfer_list);
3570 }
3571 
3572 /**
3573  * spi_replace_transfers - replace transfers with several transfers
3574  *                         and register change with spi_message.resources
3575  * @msg:           the spi_message we work upon
3576  * @xfer_first:    the first spi_transfer we want to replace
3577  * @remove:        number of transfers to remove
3578  * @insert:        the number of transfers we want to insert instead
3579  * @release:       extra release code necessary in some circumstances
3580  * @extradatasize: extra data to allocate (with alignment guarantees
3581  *                 of struct @spi_transfer)
3582  * @gfp:           gfp flags
3583  *
3584  * Returns: pointer to @spi_replaced_transfers,
3585  *          PTR_ERR(...) in case of errors.
3586  */
3587 static struct spi_replaced_transfers *spi_replace_transfers(
3588 	struct spi_message *msg,
3589 	struct spi_transfer *xfer_first,
3590 	size_t remove,
3591 	size_t insert,
3592 	spi_replaced_release_t release,
3593 	size_t extradatasize,
3594 	gfp_t gfp)
3595 {
3596 	struct spi_replaced_transfers *rxfer;
3597 	struct spi_transfer *xfer;
3598 	size_t i;
3599 
3600 	/* Allocate the structure using spi_res */
3601 	rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3602 			      struct_size(rxfer, inserted_transfers, insert)
3603 			      + extradatasize,
3604 			      gfp);
3605 	if (!rxfer)
3606 		return ERR_PTR(-ENOMEM);
3607 
3608 	/* The release code to invoke before running the generic release */
3609 	rxfer->release = release;
3610 
3611 	/* Assign extradata */
3612 	if (extradatasize)
3613 		rxfer->extradata =
3614 			&rxfer->inserted_transfers[insert];
3615 
3616 	/* Init the replaced_transfers list */
3617 	INIT_LIST_HEAD(&rxfer->replaced_transfers);
3618 
3619 	/*
3620 	 * Assign the list_entry after which we should reinsert
3621 	 * the @replaced_transfers - it may be spi_message.messages!
3622 	 */
3623 	rxfer->replaced_after = xfer_first->transfer_list.prev;
3624 
3625 	/* Remove the requested number of transfers */
3626 	for (i = 0; i < remove; i++) {
3627 		/*
3628 		 * If the entry after replaced_after it is msg->transfers
3629 		 * then we have been requested to remove more transfers
3630 		 * than are in the list.
3631 		 */
3632 		if (rxfer->replaced_after->next == &msg->transfers) {
3633 			dev_err(&msg->spi->dev,
3634 				"requested to remove more spi_transfers than are available\n");
3635 			/* Insert replaced transfers back into the message */
3636 			list_splice(&rxfer->replaced_transfers,
3637 				    rxfer->replaced_after);
3638 
3639 			/* Free the spi_replace_transfer structure... */
3640 			spi_res_free(rxfer);
3641 
3642 			/* ...and return with an error */
3643 			return ERR_PTR(-EINVAL);
3644 		}
3645 
3646 		/*
3647 		 * Remove the entry after replaced_after from list of
3648 		 * transfers and add it to list of replaced_transfers.
3649 		 */
3650 		list_move_tail(rxfer->replaced_after->next,
3651 			       &rxfer->replaced_transfers);
3652 	}
3653 
3654 	/*
3655 	 * Create copy of the given xfer with identical settings
3656 	 * based on the first transfer to get removed.
3657 	 */
3658 	for (i = 0; i < insert; i++) {
3659 		/* We need to run in reverse order */
3660 		xfer = &rxfer->inserted_transfers[insert - 1 - i];
3661 
3662 		/* Copy all spi_transfer data */
3663 		memcpy(xfer, xfer_first, sizeof(*xfer));
3664 
3665 		/* Add to list */
3666 		list_add(&xfer->transfer_list, rxfer->replaced_after);
3667 
3668 		/* Clear cs_change and delay for all but the last */
3669 		if (i) {
3670 			xfer->cs_change = false;
3671 			xfer->delay.value = 0;
3672 		}
3673 	}
3674 
3675 	/* Set up inserted... */
3676 	rxfer->inserted = insert;
3677 
3678 	/* ...and register it with spi_res/spi_message */
3679 	spi_res_add(msg, rxfer);
3680 
3681 	return rxfer;
3682 }
3683 
3684 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3685 					struct spi_message *msg,
3686 					struct spi_transfer **xferp,
3687 					size_t maxsize)
3688 {
3689 	struct spi_transfer *xfer = *xferp, *xfers;
3690 	struct spi_replaced_transfers *srt;
3691 	size_t offset;
3692 	size_t count, i;
3693 
3694 	/* Calculate how many we have to replace */
3695 	count = DIV_ROUND_UP(xfer->len, maxsize);
3696 
3697 	/* Create replacement */
3698 	srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, GFP_KERNEL);
3699 	if (IS_ERR(srt))
3700 		return PTR_ERR(srt);
3701 	xfers = srt->inserted_transfers;
3702 
3703 	/*
3704 	 * Now handle each of those newly inserted spi_transfers.
3705 	 * Note that the replacements spi_transfers all are preset
3706 	 * to the same values as *xferp, so tx_buf, rx_buf and len
3707 	 * are all identical (as well as most others)
3708 	 * so we just have to fix up len and the pointers.
3709 	 */
3710 
3711 	/*
3712 	 * The first transfer just needs the length modified, so we
3713 	 * run it outside the loop.
3714 	 */
3715 	xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3716 
3717 	/* All the others need rx_buf/tx_buf also set */
3718 	for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3719 		/* Update rx_buf, tx_buf and DMA */
3720 		if (xfers[i].rx_buf)
3721 			xfers[i].rx_buf += offset;
3722 		if (xfers[i].tx_buf)
3723 			xfers[i].tx_buf += offset;
3724 
3725 		/* Update length */
3726 		xfers[i].len = min(maxsize, xfers[i].len - offset);
3727 	}
3728 
3729 	/*
3730 	 * We set up xferp to the last entry we have inserted,
3731 	 * so that we skip those already split transfers.
3732 	 */
3733 	*xferp = &xfers[count - 1];
3734 
3735 	/* Increment statistics counters */
3736 	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3737 				       transfers_split_maxsize);
3738 	SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3739 				       transfers_split_maxsize);
3740 
3741 	return 0;
3742 }
3743 
3744 /**
3745  * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3746  *                               when an individual transfer exceeds a
3747  *                               certain size
3748  * @ctlr:    the @spi_controller for this transfer
3749  * @msg:   the @spi_message to transform
3750  * @maxsize:  the maximum when to apply this
3751  *
3752  * This function allocates resources that are automatically freed during the
3753  * spi message unoptimize phase so this function should only be called from
3754  * optimize_message callbacks.
3755  *
3756  * Return: status of transformation
3757  */
3758 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3759 				struct spi_message *msg,
3760 				size_t maxsize)
3761 {
3762 	struct spi_transfer *xfer;
3763 	int ret;
3764 
3765 	/*
3766 	 * Iterate over the transfer_list,
3767 	 * but note that xfer is advanced to the last transfer inserted
3768 	 * to avoid checking sizes again unnecessarily (also xfer does
3769 	 * potentially belong to a different list by the time the
3770 	 * replacement has happened).
3771 	 */
3772 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3773 		if (xfer->len > maxsize) {
3774 			ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3775 							   maxsize);
3776 			if (ret)
3777 				return ret;
3778 		}
3779 	}
3780 
3781 	return 0;
3782 }
3783 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3784 
3785 
3786 /**
3787  * spi_split_transfers_maxwords - split SPI transfers into multiple transfers
3788  *                                when an individual transfer exceeds a
3789  *                                certain number of SPI words
3790  * @ctlr:     the @spi_controller for this transfer
3791  * @msg:      the @spi_message to transform
3792  * @maxwords: the number of words to limit each transfer to
3793  *
3794  * This function allocates resources that are automatically freed during the
3795  * spi message unoptimize phase so this function should only be called from
3796  * optimize_message callbacks.
3797  *
3798  * Return: status of transformation
3799  */
3800 int spi_split_transfers_maxwords(struct spi_controller *ctlr,
3801 				 struct spi_message *msg,
3802 				 size_t maxwords)
3803 {
3804 	struct spi_transfer *xfer;
3805 
3806 	/*
3807 	 * Iterate over the transfer_list,
3808 	 * but note that xfer is advanced to the last transfer inserted
3809 	 * to avoid checking sizes again unnecessarily (also xfer does
3810 	 * potentially belong to a different list by the time the
3811 	 * replacement has happened).
3812 	 */
3813 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3814 		size_t maxsize;
3815 		int ret;
3816 
3817 		maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word));
3818 		if (xfer->len > maxsize) {
3819 			ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3820 							   maxsize);
3821 			if (ret)
3822 				return ret;
3823 		}
3824 	}
3825 
3826 	return 0;
3827 }
3828 EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords);
3829 
3830 /*-------------------------------------------------------------------------*/
3831 
3832 /*
3833  * Core methods for SPI controller protocol drivers. Some of the
3834  * other core methods are currently defined as inline functions.
3835  */
3836 
3837 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3838 					u8 bits_per_word)
3839 {
3840 	if (ctlr->bits_per_word_mask) {
3841 		/* Only 32 bits fit in the mask */
3842 		if (bits_per_word > 32)
3843 			return -EINVAL;
3844 		if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3845 			return -EINVAL;
3846 	}
3847 
3848 	return 0;
3849 }
3850 
3851 /**
3852  * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3853  * @spi: the device that requires specific CS timing configuration
3854  *
3855  * Return: zero on success, else a negative error code.
3856  */
3857 static int spi_set_cs_timing(struct spi_device *spi)
3858 {
3859 	struct device *parent = spi->controller->dev.parent;
3860 	int status = 0;
3861 
3862 	if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
3863 		if (spi->controller->auto_runtime_pm) {
3864 			status = pm_runtime_get_sync(parent);
3865 			if (status < 0) {
3866 				pm_runtime_put_noidle(parent);
3867 				dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3868 					status);
3869 				return status;
3870 			}
3871 
3872 			status = spi->controller->set_cs_timing(spi);
3873 			pm_runtime_mark_last_busy(parent);
3874 			pm_runtime_put_autosuspend(parent);
3875 		} else {
3876 			status = spi->controller->set_cs_timing(spi);
3877 		}
3878 	}
3879 	return status;
3880 }
3881 
3882 /**
3883  * spi_setup - setup SPI mode and clock rate
3884  * @spi: the device whose settings are being modified
3885  * Context: can sleep, and no requests are queued to the device
3886  *
3887  * SPI protocol drivers may need to update the transfer mode if the
3888  * device doesn't work with its default.  They may likewise need
3889  * to update clock rates or word sizes from initial values.  This function
3890  * changes those settings, and must be called from a context that can sleep.
3891  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3892  * effect the next time the device is selected and data is transferred to
3893  * or from it.  When this function returns, the SPI device is deselected.
3894  *
3895  * Note that this call will fail if the protocol driver specifies an option
3896  * that the underlying controller or its driver does not support.  For
3897  * example, not all hardware supports wire transfers using nine bit words,
3898  * LSB-first wire encoding, or active-high chipselects.
3899  *
3900  * Return: zero on success, else a negative error code.
3901  */
3902 int spi_setup(struct spi_device *spi)
3903 {
3904 	unsigned	bad_bits, ugly_bits;
3905 	int		status;
3906 
3907 	/*
3908 	 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3909 	 * are set at the same time.
3910 	 */
3911 	if ((hweight_long(spi->mode &
3912 		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3913 	    (hweight_long(spi->mode &
3914 		(SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3915 		dev_err(&spi->dev,
3916 		"setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3917 		return -EINVAL;
3918 	}
3919 	/* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
3920 	if ((spi->mode & SPI_3WIRE) && (spi->mode &
3921 		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3922 		 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3923 		return -EINVAL;
3924 	/* Check against conflicting MOSI idle configuration */
3925 	if ((spi->mode & SPI_MOSI_IDLE_LOW) && (spi->mode & SPI_MOSI_IDLE_HIGH)) {
3926 		dev_err(&spi->dev,
3927 			"setup: MOSI configured to idle low and high at the same time.\n");
3928 		return -EINVAL;
3929 	}
3930 	/*
3931 	 * Help drivers fail *cleanly* when they need options
3932 	 * that aren't supported with their current controller.
3933 	 * SPI_CS_WORD has a fallback software implementation,
3934 	 * so it is ignored here.
3935 	 */
3936 	bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3937 				 SPI_NO_TX | SPI_NO_RX);
3938 	ugly_bits = bad_bits &
3939 		    (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3940 		     SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3941 	if (ugly_bits) {
3942 		dev_warn(&spi->dev,
3943 			 "setup: ignoring unsupported mode bits %x\n",
3944 			 ugly_bits);
3945 		spi->mode &= ~ugly_bits;
3946 		bad_bits &= ~ugly_bits;
3947 	}
3948 	if (bad_bits) {
3949 		dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3950 			bad_bits);
3951 		return -EINVAL;
3952 	}
3953 
3954 	if (!spi->bits_per_word) {
3955 		spi->bits_per_word = 8;
3956 	} else {
3957 		/*
3958 		 * Some controllers may not support the default 8 bits-per-word
3959 		 * so only perform the check when this is explicitly provided.
3960 		 */
3961 		status = __spi_validate_bits_per_word(spi->controller,
3962 						      spi->bits_per_word);
3963 		if (status)
3964 			return status;
3965 	}
3966 
3967 	if (spi->controller->max_speed_hz &&
3968 	    (!spi->max_speed_hz ||
3969 	     spi->max_speed_hz > spi->controller->max_speed_hz))
3970 		spi->max_speed_hz = spi->controller->max_speed_hz;
3971 
3972 	mutex_lock(&spi->controller->io_mutex);
3973 
3974 	if (spi->controller->setup) {
3975 		status = spi->controller->setup(spi);
3976 		if (status) {
3977 			mutex_unlock(&spi->controller->io_mutex);
3978 			dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3979 				status);
3980 			return status;
3981 		}
3982 	}
3983 
3984 	status = spi_set_cs_timing(spi);
3985 	if (status) {
3986 		mutex_unlock(&spi->controller->io_mutex);
3987 		return status;
3988 	}
3989 
3990 	if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3991 		status = pm_runtime_resume_and_get(spi->controller->dev.parent);
3992 		if (status < 0) {
3993 			mutex_unlock(&spi->controller->io_mutex);
3994 			dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3995 				status);
3996 			return status;
3997 		}
3998 
3999 		/*
4000 		 * We do not want to return positive value from pm_runtime_get,
4001 		 * there are many instances of devices calling spi_setup() and
4002 		 * checking for a non-zero return value instead of a negative
4003 		 * return value.
4004 		 */
4005 		status = 0;
4006 
4007 		spi_set_cs(spi, false, true);
4008 		pm_runtime_mark_last_busy(spi->controller->dev.parent);
4009 		pm_runtime_put_autosuspend(spi->controller->dev.parent);
4010 	} else {
4011 		spi_set_cs(spi, false, true);
4012 	}
4013 
4014 	mutex_unlock(&spi->controller->io_mutex);
4015 
4016 	if (spi->rt && !spi->controller->rt) {
4017 		spi->controller->rt = true;
4018 		spi_set_thread_rt(spi->controller);
4019 	}
4020 
4021 	trace_spi_setup(spi, status);
4022 
4023 	dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
4024 			spi->mode & SPI_MODE_X_MASK,
4025 			(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
4026 			(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
4027 			(spi->mode & SPI_3WIRE) ? "3wire, " : "",
4028 			(spi->mode & SPI_LOOP) ? "loopback, " : "",
4029 			spi->bits_per_word, spi->max_speed_hz,
4030 			status);
4031 
4032 	return status;
4033 }
4034 EXPORT_SYMBOL_GPL(spi_setup);
4035 
4036 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
4037 				       struct spi_device *spi)
4038 {
4039 	int delay1, delay2;
4040 
4041 	delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
4042 	if (delay1 < 0)
4043 		return delay1;
4044 
4045 	delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
4046 	if (delay2 < 0)
4047 		return delay2;
4048 
4049 	if (delay1 < delay2)
4050 		memcpy(&xfer->word_delay, &spi->word_delay,
4051 		       sizeof(xfer->word_delay));
4052 
4053 	return 0;
4054 }
4055 
4056 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
4057 {
4058 	struct spi_controller *ctlr = spi->controller;
4059 	struct spi_transfer *xfer;
4060 	int w_size;
4061 
4062 	if (list_empty(&message->transfers))
4063 		return -EINVAL;
4064 
4065 	message->spi = spi;
4066 
4067 	/*
4068 	 * Half-duplex links include original MicroWire, and ones with
4069 	 * only one data pin like SPI_3WIRE (switches direction) or where
4070 	 * either MOSI or MISO is missing.  They can also be caused by
4071 	 * software limitations.
4072 	 */
4073 	if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
4074 	    (spi->mode & SPI_3WIRE)) {
4075 		unsigned flags = ctlr->flags;
4076 
4077 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
4078 			if (xfer->rx_buf && xfer->tx_buf)
4079 				return -EINVAL;
4080 			if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
4081 				return -EINVAL;
4082 			if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
4083 				return -EINVAL;
4084 		}
4085 	}
4086 
4087 	/*
4088 	 * Set transfer bits_per_word and max speed as spi device default if
4089 	 * it is not set for this transfer.
4090 	 * Set transfer tx_nbits and rx_nbits as single transfer default
4091 	 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
4092 	 * Ensure transfer word_delay is at least as long as that required by
4093 	 * device itself.
4094 	 */
4095 	message->frame_length = 0;
4096 	list_for_each_entry(xfer, &message->transfers, transfer_list) {
4097 		xfer->effective_speed_hz = 0;
4098 		message->frame_length += xfer->len;
4099 		if (!xfer->bits_per_word)
4100 			xfer->bits_per_word = spi->bits_per_word;
4101 
4102 		if (!xfer->speed_hz)
4103 			xfer->speed_hz = spi->max_speed_hz;
4104 
4105 		if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
4106 			xfer->speed_hz = ctlr->max_speed_hz;
4107 
4108 		if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
4109 			return -EINVAL;
4110 
4111 		/*
4112 		 * SPI transfer length should be multiple of SPI word size
4113 		 * where SPI word size should be power-of-two multiple.
4114 		 */
4115 		if (xfer->bits_per_word <= 8)
4116 			w_size = 1;
4117 		else if (xfer->bits_per_word <= 16)
4118 			w_size = 2;
4119 		else
4120 			w_size = 4;
4121 
4122 		/* No partial transfers accepted */
4123 		if (xfer->len % w_size)
4124 			return -EINVAL;
4125 
4126 		if (xfer->speed_hz && ctlr->min_speed_hz &&
4127 		    xfer->speed_hz < ctlr->min_speed_hz)
4128 			return -EINVAL;
4129 
4130 		if (xfer->tx_buf && !xfer->tx_nbits)
4131 			xfer->tx_nbits = SPI_NBITS_SINGLE;
4132 		if (xfer->rx_buf && !xfer->rx_nbits)
4133 			xfer->rx_nbits = SPI_NBITS_SINGLE;
4134 		/*
4135 		 * Check transfer tx/rx_nbits:
4136 		 * 1. check the value matches one of single, dual and quad
4137 		 * 2. check tx/rx_nbits match the mode in spi_device
4138 		 */
4139 		if (xfer->tx_buf) {
4140 			if (spi->mode & SPI_NO_TX)
4141 				return -EINVAL;
4142 			if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
4143 				xfer->tx_nbits != SPI_NBITS_DUAL &&
4144 				xfer->tx_nbits != SPI_NBITS_QUAD &&
4145 				xfer->tx_nbits != SPI_NBITS_OCTAL)
4146 				return -EINVAL;
4147 			if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
4148 				!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
4149 				return -EINVAL;
4150 			if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
4151 				!(spi->mode & SPI_TX_QUAD))
4152 				return -EINVAL;
4153 		}
4154 		/* Check transfer rx_nbits */
4155 		if (xfer->rx_buf) {
4156 			if (spi->mode & SPI_NO_RX)
4157 				return -EINVAL;
4158 			if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
4159 				xfer->rx_nbits != SPI_NBITS_DUAL &&
4160 				xfer->rx_nbits != SPI_NBITS_QUAD &&
4161 				xfer->rx_nbits != SPI_NBITS_OCTAL)
4162 				return -EINVAL;
4163 			if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
4164 				!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
4165 				return -EINVAL;
4166 			if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
4167 				!(spi->mode & SPI_RX_QUAD))
4168 				return -EINVAL;
4169 		}
4170 
4171 		if (_spi_xfer_word_delay_update(xfer, spi))
4172 			return -EINVAL;
4173 	}
4174 
4175 	message->status = -EINPROGRESS;
4176 
4177 	return 0;
4178 }
4179 
4180 /*
4181  * spi_split_transfers - generic handling of transfer splitting
4182  * @msg: the message to split
4183  *
4184  * Under certain conditions, a SPI controller may not support arbitrary
4185  * transfer sizes or other features required by a peripheral. This function
4186  * will split the transfers in the message into smaller transfers that are
4187  * supported by the controller.
4188  *
4189  * Controllers with special requirements not covered here can also split
4190  * transfers in the optimize_message() callback.
4191  *
4192  * Context: can sleep
4193  * Return: zero on success, else a negative error code
4194  */
4195 static int spi_split_transfers(struct spi_message *msg)
4196 {
4197 	struct spi_controller *ctlr = msg->spi->controller;
4198 	struct spi_transfer *xfer;
4199 	int ret;
4200 
4201 	/*
4202 	 * If an SPI controller does not support toggling the CS line on each
4203 	 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
4204 	 * for the CS line, we can emulate the CS-per-word hardware function by
4205 	 * splitting transfers into one-word transfers and ensuring that
4206 	 * cs_change is set for each transfer.
4207 	 */
4208 	if ((msg->spi->mode & SPI_CS_WORD) &&
4209 	    (!(ctlr->mode_bits & SPI_CS_WORD) || spi_is_csgpiod(msg->spi))) {
4210 		ret = spi_split_transfers_maxwords(ctlr, msg, 1);
4211 		if (ret)
4212 			return ret;
4213 
4214 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
4215 			/* Don't change cs_change on the last entry in the list */
4216 			if (list_is_last(&xfer->transfer_list, &msg->transfers))
4217 				break;
4218 
4219 			xfer->cs_change = 1;
4220 		}
4221 	} else {
4222 		ret = spi_split_transfers_maxsize(ctlr, msg,
4223 						  spi_max_transfer_size(msg->spi));
4224 		if (ret)
4225 			return ret;
4226 	}
4227 
4228 	return 0;
4229 }
4230 
4231 /*
4232  * __spi_optimize_message - shared implementation for spi_optimize_message()
4233  *                          and spi_maybe_optimize_message()
4234  * @spi: the device that will be used for the message
4235  * @msg: the message to optimize
4236  *
4237  * Peripheral drivers will call spi_optimize_message() and the spi core will
4238  * call spi_maybe_optimize_message() instead of calling this directly.
4239  *
4240  * It is not valid to call this on a message that has already been optimized.
4241  *
4242  * Return: zero on success, else a negative error code
4243  */
4244 static int __spi_optimize_message(struct spi_device *spi,
4245 				  struct spi_message *msg)
4246 {
4247 	struct spi_controller *ctlr = spi->controller;
4248 	int ret;
4249 
4250 	ret = __spi_validate(spi, msg);
4251 	if (ret)
4252 		return ret;
4253 
4254 	ret = spi_split_transfers(msg);
4255 	if (ret)
4256 		return ret;
4257 
4258 	if (ctlr->optimize_message) {
4259 		ret = ctlr->optimize_message(msg);
4260 		if (ret) {
4261 			spi_res_release(ctlr, msg);
4262 			return ret;
4263 		}
4264 	}
4265 
4266 	msg->optimized = true;
4267 
4268 	return 0;
4269 }
4270 
4271 /*
4272  * spi_maybe_optimize_message - optimize message if it isn't already pre-optimized
4273  * @spi: the device that will be used for the message
4274  * @msg: the message to optimize
4275  * Return: zero on success, else a negative error code
4276  */
4277 static int spi_maybe_optimize_message(struct spi_device *spi,
4278 				      struct spi_message *msg)
4279 {
4280 	if (spi->controller->defer_optimize_message) {
4281 		msg->spi = spi;
4282 		return 0;
4283 	}
4284 
4285 	if (msg->pre_optimized)
4286 		return 0;
4287 
4288 	return __spi_optimize_message(spi, msg);
4289 }
4290 
4291 /**
4292  * spi_optimize_message - do any one-time validation and setup for a SPI message
4293  * @spi: the device that will be used for the message
4294  * @msg: the message to optimize
4295  *
4296  * Peripheral drivers that reuse the same message repeatedly may call this to
4297  * perform as much message prep as possible once, rather than repeating it each
4298  * time a message transfer is performed to improve throughput and reduce CPU
4299  * usage.
4300  *
4301  * Once a message has been optimized, it cannot be modified with the exception
4302  * of updating the contents of any xfer->tx_buf (the pointer can't be changed,
4303  * only the data in the memory it points to).
4304  *
4305  * Calls to this function must be balanced with calls to spi_unoptimize_message()
4306  * to avoid leaking resources.
4307  *
4308  * Context: can sleep
4309  * Return: zero on success, else a negative error code
4310  */
4311 int spi_optimize_message(struct spi_device *spi, struct spi_message *msg)
4312 {
4313 	int ret;
4314 
4315 	/*
4316 	 * Pre-optimization is not supported and optimization is deferred e.g.
4317 	 * when using spi-mux.
4318 	 */
4319 	if (spi->controller->defer_optimize_message)
4320 		return 0;
4321 
4322 	ret = __spi_optimize_message(spi, msg);
4323 	if (ret)
4324 		return ret;
4325 
4326 	/*
4327 	 * This flag indicates that the peripheral driver called spi_optimize_message()
4328 	 * and therefore we shouldn't unoptimize message automatically when finalizing
4329 	 * the message but rather wait until spi_unoptimize_message() is called
4330 	 * by the peripheral driver.
4331 	 */
4332 	msg->pre_optimized = true;
4333 
4334 	return 0;
4335 }
4336 EXPORT_SYMBOL_GPL(spi_optimize_message);
4337 
4338 /**
4339  * spi_unoptimize_message - releases any resources allocated by spi_optimize_message()
4340  * @msg: the message to unoptimize
4341  *
4342  * Calls to this function must be balanced with calls to spi_optimize_message().
4343  *
4344  * Context: can sleep
4345  */
4346 void spi_unoptimize_message(struct spi_message *msg)
4347 {
4348 	if (msg->spi->controller->defer_optimize_message)
4349 		return;
4350 
4351 	__spi_unoptimize_message(msg);
4352 	msg->pre_optimized = false;
4353 }
4354 EXPORT_SYMBOL_GPL(spi_unoptimize_message);
4355 
4356 static int __spi_async(struct spi_device *spi, struct spi_message *message)
4357 {
4358 	struct spi_controller *ctlr = spi->controller;
4359 	struct spi_transfer *xfer;
4360 
4361 	/*
4362 	 * Some controllers do not support doing regular SPI transfers. Return
4363 	 * ENOTSUPP when this is the case.
4364 	 */
4365 	if (!ctlr->transfer)
4366 		return -ENOTSUPP;
4367 
4368 	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
4369 	SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
4370 
4371 	trace_spi_message_submit(message);
4372 
4373 	if (!ctlr->ptp_sts_supported) {
4374 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
4375 			xfer->ptp_sts_word_pre = 0;
4376 			ptp_read_system_prets(xfer->ptp_sts);
4377 		}
4378 	}
4379 
4380 	return ctlr->transfer(spi, message);
4381 }
4382 
4383 static void devm_spi_unoptimize_message(void *msg)
4384 {
4385 	spi_unoptimize_message(msg);
4386 }
4387 
4388 /**
4389  * devm_spi_optimize_message - managed version of spi_optimize_message()
4390  * @dev: the device that manages @msg (usually @spi->dev)
4391  * @spi: the device that will be used for the message
4392  * @msg: the message to optimize
4393  * Return: zero on success, else a negative error code
4394  *
4395  * spi_unoptimize_message() will automatically be called when the device is
4396  * removed.
4397  */
4398 int devm_spi_optimize_message(struct device *dev, struct spi_device *spi,
4399 			      struct spi_message *msg)
4400 {
4401 	int ret;
4402 
4403 	ret = spi_optimize_message(spi, msg);
4404 	if (ret)
4405 		return ret;
4406 
4407 	return devm_add_action_or_reset(dev, devm_spi_unoptimize_message, msg);
4408 }
4409 EXPORT_SYMBOL_GPL(devm_spi_optimize_message);
4410 
4411 /**
4412  * spi_async - asynchronous SPI transfer
4413  * @spi: device with which data will be exchanged
4414  * @message: describes the data transfers, including completion callback
4415  * Context: any (IRQs may be blocked, etc)
4416  *
4417  * This call may be used in_irq and other contexts which can't sleep,
4418  * as well as from task contexts which can sleep.
4419  *
4420  * The completion callback is invoked in a context which can't sleep.
4421  * Before that invocation, the value of message->status is undefined.
4422  * When the callback is issued, message->status holds either zero (to
4423  * indicate complete success) or a negative error code.  After that
4424  * callback returns, the driver which issued the transfer request may
4425  * deallocate the associated memory; it's no longer in use by any SPI
4426  * core or controller driver code.
4427  *
4428  * Note that although all messages to a spi_device are handled in
4429  * FIFO order, messages may go to different devices in other orders.
4430  * Some device might be higher priority, or have various "hard" access
4431  * time requirements, for example.
4432  *
4433  * On detection of any fault during the transfer, processing of
4434  * the entire message is aborted, and the device is deselected.
4435  * Until returning from the associated message completion callback,
4436  * no other spi_message queued to that device will be processed.
4437  * (This rule applies equally to all the synchronous transfer calls,
4438  * which are wrappers around this core asynchronous primitive.)
4439  *
4440  * Return: zero on success, else a negative error code.
4441  */
4442 int spi_async(struct spi_device *spi, struct spi_message *message)
4443 {
4444 	struct spi_controller *ctlr = spi->controller;
4445 	int ret;
4446 	unsigned long flags;
4447 
4448 	ret = spi_maybe_optimize_message(spi, message);
4449 	if (ret)
4450 		return ret;
4451 
4452 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4453 
4454 	if (ctlr->bus_lock_flag)
4455 		ret = -EBUSY;
4456 	else
4457 		ret = __spi_async(spi, message);
4458 
4459 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4460 
4461 	return ret;
4462 }
4463 EXPORT_SYMBOL_GPL(spi_async);
4464 
4465 static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
4466 {
4467 	bool was_busy;
4468 	int ret;
4469 
4470 	mutex_lock(&ctlr->io_mutex);
4471 
4472 	was_busy = ctlr->busy;
4473 
4474 	ctlr->cur_msg = msg;
4475 	ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
4476 	if (ret)
4477 		dev_err(&ctlr->dev, "noqueue transfer failed\n");
4478 	ctlr->cur_msg = NULL;
4479 	ctlr->fallback = false;
4480 
4481 	if (!was_busy) {
4482 		kfree(ctlr->dummy_rx);
4483 		ctlr->dummy_rx = NULL;
4484 		kfree(ctlr->dummy_tx);
4485 		ctlr->dummy_tx = NULL;
4486 		if (ctlr->unprepare_transfer_hardware &&
4487 		    ctlr->unprepare_transfer_hardware(ctlr))
4488 			dev_err(&ctlr->dev,
4489 				"failed to unprepare transfer hardware\n");
4490 		spi_idle_runtime_pm(ctlr);
4491 	}
4492 
4493 	mutex_unlock(&ctlr->io_mutex);
4494 }
4495 
4496 /*-------------------------------------------------------------------------*/
4497 
4498 /*
4499  * Utility methods for SPI protocol drivers, layered on
4500  * top of the core.  Some other utility methods are defined as
4501  * inline functions.
4502  */
4503 
4504 static void spi_complete(void *arg)
4505 {
4506 	complete(arg);
4507 }
4508 
4509 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
4510 {
4511 	DECLARE_COMPLETION_ONSTACK(done);
4512 	unsigned long flags;
4513 	int status;
4514 	struct spi_controller *ctlr = spi->controller;
4515 
4516 	if (__spi_check_suspended(ctlr)) {
4517 		dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
4518 		return -ESHUTDOWN;
4519 	}
4520 
4521 	status = spi_maybe_optimize_message(spi, message);
4522 	if (status)
4523 		return status;
4524 
4525 	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4526 	SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4527 
4528 	/*
4529 	 * Checking queue_empty here only guarantees async/sync message
4530 	 * ordering when coming from the same context. It does not need to
4531 	 * guard against reentrancy from a different context. The io_mutex
4532 	 * will catch those cases.
4533 	 */
4534 	if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4535 		message->actual_length = 0;
4536 		message->status = -EINPROGRESS;
4537 
4538 		trace_spi_message_submit(message);
4539 
4540 		SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4541 		SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4542 
4543 		__spi_transfer_message_noqueue(ctlr, message);
4544 
4545 		return message->status;
4546 	}
4547 
4548 	/*
4549 	 * There are messages in the async queue that could have originated
4550 	 * from the same context, so we need to preserve ordering.
4551 	 * Therefor we send the message to the async queue and wait until they
4552 	 * are completed.
4553 	 */
4554 	message->complete = spi_complete;
4555 	message->context = &done;
4556 
4557 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4558 	status = __spi_async(spi, message);
4559 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4560 
4561 	if (status == 0) {
4562 		wait_for_completion(&done);
4563 		status = message->status;
4564 	}
4565 	message->complete = NULL;
4566 	message->context = NULL;
4567 
4568 	return status;
4569 }
4570 
4571 /**
4572  * spi_sync - blocking/synchronous SPI data transfers
4573  * @spi: device with which data will be exchanged
4574  * @message: describes the data transfers
4575  * Context: can sleep
4576  *
4577  * This call may only be used from a context that may sleep.  The sleep
4578  * is non-interruptible, and has no timeout.  Low-overhead controller
4579  * drivers may DMA directly into and out of the message buffers.
4580  *
4581  * Note that the SPI device's chip select is active during the message,
4582  * and then is normally disabled between messages.  Drivers for some
4583  * frequently-used devices may want to minimize costs of selecting a chip,
4584  * by leaving it selected in anticipation that the next message will go
4585  * to the same chip.  (That may increase power usage.)
4586  *
4587  * Also, the caller is guaranteeing that the memory associated with the
4588  * message will not be freed before this call returns.
4589  *
4590  * Return: zero on success, else a negative error code.
4591  */
4592 int spi_sync(struct spi_device *spi, struct spi_message *message)
4593 {
4594 	int ret;
4595 
4596 	mutex_lock(&spi->controller->bus_lock_mutex);
4597 	ret = __spi_sync(spi, message);
4598 	mutex_unlock(&spi->controller->bus_lock_mutex);
4599 
4600 	return ret;
4601 }
4602 EXPORT_SYMBOL_GPL(spi_sync);
4603 
4604 /**
4605  * spi_sync_locked - version of spi_sync with exclusive bus usage
4606  * @spi: device with which data will be exchanged
4607  * @message: describes the data transfers
4608  * Context: can sleep
4609  *
4610  * This call may only be used from a context that may sleep.  The sleep
4611  * is non-interruptible, and has no timeout.  Low-overhead controller
4612  * drivers may DMA directly into and out of the message buffers.
4613  *
4614  * This call should be used by drivers that require exclusive access to the
4615  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4616  * be released by a spi_bus_unlock call when the exclusive access is over.
4617  *
4618  * Return: zero on success, else a negative error code.
4619  */
4620 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4621 {
4622 	return __spi_sync(spi, message);
4623 }
4624 EXPORT_SYMBOL_GPL(spi_sync_locked);
4625 
4626 /**
4627  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4628  * @ctlr: SPI bus master that should be locked for exclusive bus access
4629  * Context: can sleep
4630  *
4631  * This call may only be used from a context that may sleep.  The sleep
4632  * is non-interruptible, and has no timeout.
4633  *
4634  * This call should be used by drivers that require exclusive access to the
4635  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4636  * exclusive access is over. Data transfer must be done by spi_sync_locked
4637  * and spi_async_locked calls when the SPI bus lock is held.
4638  *
4639  * Return: always zero.
4640  */
4641 int spi_bus_lock(struct spi_controller *ctlr)
4642 {
4643 	unsigned long flags;
4644 
4645 	mutex_lock(&ctlr->bus_lock_mutex);
4646 
4647 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4648 	ctlr->bus_lock_flag = 1;
4649 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4650 
4651 	/* Mutex remains locked until spi_bus_unlock() is called */
4652 
4653 	return 0;
4654 }
4655 EXPORT_SYMBOL_GPL(spi_bus_lock);
4656 
4657 /**
4658  * spi_bus_unlock - release the lock for exclusive SPI bus usage
4659  * @ctlr: SPI bus master that was locked for exclusive bus access
4660  * Context: can sleep
4661  *
4662  * This call may only be used from a context that may sleep.  The sleep
4663  * is non-interruptible, and has no timeout.
4664  *
4665  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4666  * call.
4667  *
4668  * Return: always zero.
4669  */
4670 int spi_bus_unlock(struct spi_controller *ctlr)
4671 {
4672 	ctlr->bus_lock_flag = 0;
4673 
4674 	mutex_unlock(&ctlr->bus_lock_mutex);
4675 
4676 	return 0;
4677 }
4678 EXPORT_SYMBOL_GPL(spi_bus_unlock);
4679 
4680 /* Portable code must never pass more than 32 bytes */
4681 #define	SPI_BUFSIZ	max(32, SMP_CACHE_BYTES)
4682 
4683 static u8	*buf;
4684 
4685 /**
4686  * spi_write_then_read - SPI synchronous write followed by read
4687  * @spi: device with which data will be exchanged
4688  * @txbuf: data to be written (need not be DMA-safe)
4689  * @n_tx: size of txbuf, in bytes
4690  * @rxbuf: buffer into which data will be read (need not be DMA-safe)
4691  * @n_rx: size of rxbuf, in bytes
4692  * Context: can sleep
4693  *
4694  * This performs a half duplex MicroWire style transaction with the
4695  * device, sending txbuf and then reading rxbuf.  The return value
4696  * is zero for success, else a negative errno status code.
4697  * This call may only be used from a context that may sleep.
4698  *
4699  * Parameters to this routine are always copied using a small buffer.
4700  * Performance-sensitive or bulk transfer code should instead use
4701  * spi_{async,sync}() calls with DMA-safe buffers.
4702  *
4703  * Return: zero on success, else a negative error code.
4704  */
4705 int spi_write_then_read(struct spi_device *spi,
4706 		const void *txbuf, unsigned n_tx,
4707 		void *rxbuf, unsigned n_rx)
4708 {
4709 	static DEFINE_MUTEX(lock);
4710 
4711 	int			status;
4712 	struct spi_message	message;
4713 	struct spi_transfer	x[2];
4714 	u8			*local_buf;
4715 
4716 	/*
4717 	 * Use preallocated DMA-safe buffer if we can. We can't avoid
4718 	 * copying here, (as a pure convenience thing), but we can
4719 	 * keep heap costs out of the hot path unless someone else is
4720 	 * using the pre-allocated buffer or the transfer is too large.
4721 	 */
4722 	if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4723 		local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4724 				    GFP_KERNEL | GFP_DMA);
4725 		if (!local_buf)
4726 			return -ENOMEM;
4727 	} else {
4728 		local_buf = buf;
4729 	}
4730 
4731 	spi_message_init(&message);
4732 	memset(x, 0, sizeof(x));
4733 	if (n_tx) {
4734 		x[0].len = n_tx;
4735 		spi_message_add_tail(&x[0], &message);
4736 	}
4737 	if (n_rx) {
4738 		x[1].len = n_rx;
4739 		spi_message_add_tail(&x[1], &message);
4740 	}
4741 
4742 	memcpy(local_buf, txbuf, n_tx);
4743 	x[0].tx_buf = local_buf;
4744 	x[1].rx_buf = local_buf + n_tx;
4745 
4746 	/* Do the I/O */
4747 	status = spi_sync(spi, &message);
4748 	if (status == 0)
4749 		memcpy(rxbuf, x[1].rx_buf, n_rx);
4750 
4751 	if (x[0].tx_buf == buf)
4752 		mutex_unlock(&lock);
4753 	else
4754 		kfree(local_buf);
4755 
4756 	return status;
4757 }
4758 EXPORT_SYMBOL_GPL(spi_write_then_read);
4759 
4760 /*-------------------------------------------------------------------------*/
4761 
4762 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
4763 /* Must call put_device() when done with returned spi_device device */
4764 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4765 {
4766 	struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4767 
4768 	return dev ? to_spi_device(dev) : NULL;
4769 }
4770 
4771 /* The spi controllers are not using spi_bus, so we find it with another way */
4772 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4773 {
4774 	struct device *dev;
4775 
4776 	dev = class_find_device_by_of_node(&spi_master_class, node);
4777 	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4778 		dev = class_find_device_by_of_node(&spi_slave_class, node);
4779 	if (!dev)
4780 		return NULL;
4781 
4782 	/* Reference got in class_find_device */
4783 	return container_of(dev, struct spi_controller, dev);
4784 }
4785 
4786 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4787 			 void *arg)
4788 {
4789 	struct of_reconfig_data *rd = arg;
4790 	struct spi_controller *ctlr;
4791 	struct spi_device *spi;
4792 
4793 	switch (of_reconfig_get_state_change(action, arg)) {
4794 	case OF_RECONFIG_CHANGE_ADD:
4795 		ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4796 		if (ctlr == NULL)
4797 			return NOTIFY_OK;	/* Not for us */
4798 
4799 		if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4800 			put_device(&ctlr->dev);
4801 			return NOTIFY_OK;
4802 		}
4803 
4804 		/*
4805 		 * Clear the flag before adding the device so that fw_devlink
4806 		 * doesn't skip adding consumers to this device.
4807 		 */
4808 		rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
4809 		spi = of_register_spi_device(ctlr, rd->dn);
4810 		put_device(&ctlr->dev);
4811 
4812 		if (IS_ERR(spi)) {
4813 			pr_err("%s: failed to create for '%pOF'\n",
4814 					__func__, rd->dn);
4815 			of_node_clear_flag(rd->dn, OF_POPULATED);
4816 			return notifier_from_errno(PTR_ERR(spi));
4817 		}
4818 		break;
4819 
4820 	case OF_RECONFIG_CHANGE_REMOVE:
4821 		/* Already depopulated? */
4822 		if (!of_node_check_flag(rd->dn, OF_POPULATED))
4823 			return NOTIFY_OK;
4824 
4825 		/* Find our device by node */
4826 		spi = of_find_spi_device_by_node(rd->dn);
4827 		if (spi == NULL)
4828 			return NOTIFY_OK;	/* No? not meant for us */
4829 
4830 		/* Unregister takes one ref away */
4831 		spi_unregister_device(spi);
4832 
4833 		/* And put the reference of the find */
4834 		put_device(&spi->dev);
4835 		break;
4836 	}
4837 
4838 	return NOTIFY_OK;
4839 }
4840 
4841 static struct notifier_block spi_of_notifier = {
4842 	.notifier_call = of_spi_notify,
4843 };
4844 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4845 extern struct notifier_block spi_of_notifier;
4846 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4847 
4848 #if IS_ENABLED(CONFIG_ACPI)
4849 static int spi_acpi_controller_match(struct device *dev, const void *data)
4850 {
4851 	return ACPI_COMPANION(dev->parent) == data;
4852 }
4853 
4854 struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4855 {
4856 	struct device *dev;
4857 
4858 	dev = class_find_device(&spi_master_class, NULL, adev,
4859 				spi_acpi_controller_match);
4860 	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4861 		dev = class_find_device(&spi_slave_class, NULL, adev,
4862 					spi_acpi_controller_match);
4863 	if (!dev)
4864 		return NULL;
4865 
4866 	return container_of(dev, struct spi_controller, dev);
4867 }
4868 EXPORT_SYMBOL_GPL(acpi_spi_find_controller_by_adev);
4869 
4870 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4871 {
4872 	struct device *dev;
4873 
4874 	dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4875 	return to_spi_device(dev);
4876 }
4877 
4878 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4879 			   void *arg)
4880 {
4881 	struct acpi_device *adev = arg;
4882 	struct spi_controller *ctlr;
4883 	struct spi_device *spi;
4884 
4885 	switch (value) {
4886 	case ACPI_RECONFIG_DEVICE_ADD:
4887 		ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
4888 		if (!ctlr)
4889 			break;
4890 
4891 		acpi_register_spi_device(ctlr, adev);
4892 		put_device(&ctlr->dev);
4893 		break;
4894 	case ACPI_RECONFIG_DEVICE_REMOVE:
4895 		if (!acpi_device_enumerated(adev))
4896 			break;
4897 
4898 		spi = acpi_spi_find_device_by_adev(adev);
4899 		if (!spi)
4900 			break;
4901 
4902 		spi_unregister_device(spi);
4903 		put_device(&spi->dev);
4904 		break;
4905 	}
4906 
4907 	return NOTIFY_OK;
4908 }
4909 
4910 static struct notifier_block spi_acpi_notifier = {
4911 	.notifier_call = acpi_spi_notify,
4912 };
4913 #else
4914 extern struct notifier_block spi_acpi_notifier;
4915 #endif
4916 
4917 static int __init spi_init(void)
4918 {
4919 	int	status;
4920 
4921 	buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4922 	if (!buf) {
4923 		status = -ENOMEM;
4924 		goto err0;
4925 	}
4926 
4927 	status = bus_register(&spi_bus_type);
4928 	if (status < 0)
4929 		goto err1;
4930 
4931 	status = class_register(&spi_master_class);
4932 	if (status < 0)
4933 		goto err2;
4934 
4935 	if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4936 		status = class_register(&spi_slave_class);
4937 		if (status < 0)
4938 			goto err3;
4939 	}
4940 
4941 	if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4942 		WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4943 	if (IS_ENABLED(CONFIG_ACPI))
4944 		WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4945 
4946 	return 0;
4947 
4948 err3:
4949 	class_unregister(&spi_master_class);
4950 err2:
4951 	bus_unregister(&spi_bus_type);
4952 err1:
4953 	kfree(buf);
4954 	buf = NULL;
4955 err0:
4956 	return status;
4957 }
4958 
4959 /*
4960  * A board_info is normally registered in arch_initcall(),
4961  * but even essential drivers wait till later.
4962  *
4963  * REVISIT only boardinfo really needs static linking. The rest (device and
4964  * driver registration) _could_ be dynamically linked (modular) ... Costs
4965  * include needing to have boardinfo data structures be much more public.
4966  */
4967 postcore_initcall(spi_init);
4968