xref: /linux/drivers/spi/spi.c (revision 833bfade96561216aa2129516a5926a0326860a2)
18ae12a0dSDavid Brownell /*
2ca632f55SGrant Likely  * SPI init/core code
38ae12a0dSDavid Brownell  *
48ae12a0dSDavid Brownell  * Copyright (C) 2005 David Brownell
5d57a4282SGrant Likely  * Copyright (C) 2008 Secret Lab Technologies Ltd.
68ae12a0dSDavid Brownell  *
78ae12a0dSDavid Brownell  * This program is free software; you can redistribute it and/or modify
88ae12a0dSDavid Brownell  * it under the terms of the GNU General Public License as published by
98ae12a0dSDavid Brownell  * the Free Software Foundation; either version 2 of the License, or
108ae12a0dSDavid Brownell  * (at your option) any later version.
118ae12a0dSDavid Brownell  *
128ae12a0dSDavid Brownell  * This program is distributed in the hope that it will be useful,
138ae12a0dSDavid Brownell  * but WITHOUT ANY WARRANTY; without even the implied warranty of
148ae12a0dSDavid Brownell  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
158ae12a0dSDavid Brownell  * GNU General Public License for more details.
168ae12a0dSDavid Brownell  */
178ae12a0dSDavid Brownell 
188ae12a0dSDavid Brownell #include <linux/kernel.h>
198ae12a0dSDavid Brownell #include <linux/device.h>
208ae12a0dSDavid Brownell #include <linux/init.h>
218ae12a0dSDavid Brownell #include <linux/cache.h>
2299adef31SMark Brown #include <linux/dma-mapping.h>
2399adef31SMark Brown #include <linux/dmaengine.h>
2494040828SMatthias Kaehlcke #include <linux/mutex.h>
252b7a32f7SSinan Akman #include <linux/of_device.h>
26d57a4282SGrant Likely #include <linux/of_irq.h>
2786be408bSSylwester Nawrocki #include <linux/clk/clk-conf.h>
285a0e3ad6STejun Heo #include <linux/slab.h>
29e0626e38SAnton Vorontsov #include <linux/mod_devicetable.h>
308ae12a0dSDavid Brownell #include <linux/spi/spi.h>
3174317984SJean-Christophe PLAGNIOL-VILLARD #include <linux/of_gpio.h>
323ae22e8cSMark Brown #include <linux/pm_runtime.h>
33f48c767cSUlf Hansson #include <linux/pm_domain.h>
34025ed130SPaul Gortmaker #include <linux/export.h>
358bd75c77SClark Williams #include <linux/sched/rt.h>
36ffbbdd21SLinus Walleij #include <linux/delay.h>
37ffbbdd21SLinus Walleij #include <linux/kthread.h>
3864bee4d2SMika Westerberg #include <linux/ioport.h>
3964bee4d2SMika Westerberg #include <linux/acpi.h>
40b1b8153cSVignesh R #include <linux/highmem.h>
418ae12a0dSDavid Brownell 
4256ec1978SMark Brown #define CREATE_TRACE_POINTS
4356ec1978SMark Brown #include <trace/events/spi.h>
4456ec1978SMark Brown 
458ae12a0dSDavid Brownell static void spidev_release(struct device *dev)
468ae12a0dSDavid Brownell {
470ffa0285SHans-Peter Nilsson 	struct spi_device	*spi = to_spi_device(dev);
488ae12a0dSDavid Brownell 
498ae12a0dSDavid Brownell 	/* spi masters may cleanup for released devices */
508ae12a0dSDavid Brownell 	if (spi->master->cleanup)
518ae12a0dSDavid Brownell 		spi->master->cleanup(spi);
528ae12a0dSDavid Brownell 
530c868461SDavid Brownell 	spi_master_put(spi->master);
5407a389feSRoman Tereshonkov 	kfree(spi);
558ae12a0dSDavid Brownell }
568ae12a0dSDavid Brownell 
578ae12a0dSDavid Brownell static ssize_t
588ae12a0dSDavid Brownell modalias_show(struct device *dev, struct device_attribute *a, char *buf)
598ae12a0dSDavid Brownell {
608ae12a0dSDavid Brownell 	const struct spi_device	*spi = to_spi_device(dev);
618c4ff6d0SZhang Rui 	int len;
628c4ff6d0SZhang Rui 
638c4ff6d0SZhang Rui 	len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
648c4ff6d0SZhang Rui 	if (len != -ENODEV)
658c4ff6d0SZhang Rui 		return len;
668ae12a0dSDavid Brownell 
67d8e328b3SGrant Likely 	return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
688ae12a0dSDavid Brownell }
69aa7da564SGreg Kroah-Hartman static DEVICE_ATTR_RO(modalias);
708ae12a0dSDavid Brownell 
71eca2ebc7SMartin Sperl #define SPI_STATISTICS_ATTRS(field, file)				\
72eca2ebc7SMartin Sperl static ssize_t spi_master_##field##_show(struct device *dev,		\
73eca2ebc7SMartin Sperl 					 struct device_attribute *attr,	\
74eca2ebc7SMartin Sperl 					 char *buf)			\
75eca2ebc7SMartin Sperl {									\
76eca2ebc7SMartin Sperl 	struct spi_master *master = container_of(dev,			\
77eca2ebc7SMartin Sperl 						 struct spi_master, dev); \
78eca2ebc7SMartin Sperl 	return spi_statistics_##field##_show(&master->statistics, buf);	\
79eca2ebc7SMartin Sperl }									\
80eca2ebc7SMartin Sperl static struct device_attribute dev_attr_spi_master_##field = {		\
81eca2ebc7SMartin Sperl 	.attr = { .name = file, .mode = S_IRUGO },			\
82eca2ebc7SMartin Sperl 	.show = spi_master_##field##_show,				\
83eca2ebc7SMartin Sperl };									\
84eca2ebc7SMartin Sperl static ssize_t spi_device_##field##_show(struct device *dev,		\
85eca2ebc7SMartin Sperl 					 struct device_attribute *attr,	\
86eca2ebc7SMartin Sperl 					char *buf)			\
87eca2ebc7SMartin Sperl {									\
88d1eba93bSGeliang Tang 	struct spi_device *spi = to_spi_device(dev);			\
89eca2ebc7SMartin Sperl 	return spi_statistics_##field##_show(&spi->statistics, buf);	\
90eca2ebc7SMartin Sperl }									\
91eca2ebc7SMartin Sperl static struct device_attribute dev_attr_spi_device_##field = {		\
92eca2ebc7SMartin Sperl 	.attr = { .name = file, .mode = S_IRUGO },			\
93eca2ebc7SMartin Sperl 	.show = spi_device_##field##_show,				\
94eca2ebc7SMartin Sperl }
95eca2ebc7SMartin Sperl 
96eca2ebc7SMartin Sperl #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)	\
97eca2ebc7SMartin Sperl static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
98eca2ebc7SMartin Sperl 					    char *buf)			\
99eca2ebc7SMartin Sperl {									\
100eca2ebc7SMartin Sperl 	unsigned long flags;						\
101eca2ebc7SMartin Sperl 	ssize_t len;							\
102eca2ebc7SMartin Sperl 	spin_lock_irqsave(&stat->lock, flags);				\
103eca2ebc7SMartin Sperl 	len = sprintf(buf, format_string, stat->field);			\
104eca2ebc7SMartin Sperl 	spin_unlock_irqrestore(&stat->lock, flags);			\
105eca2ebc7SMartin Sperl 	return len;							\
106eca2ebc7SMartin Sperl }									\
107eca2ebc7SMartin Sperl SPI_STATISTICS_ATTRS(name, file)
108eca2ebc7SMartin Sperl 
109eca2ebc7SMartin Sperl #define SPI_STATISTICS_SHOW(field, format_string)			\
110eca2ebc7SMartin Sperl 	SPI_STATISTICS_SHOW_NAME(field, __stringify(field),		\
111eca2ebc7SMartin Sperl 				 field, format_string)
112eca2ebc7SMartin Sperl 
113eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(messages, "%lu");
114eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(transfers, "%lu");
115eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(errors, "%lu");
116eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(timedout, "%lu");
117eca2ebc7SMartin Sperl 
118eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_sync, "%lu");
119eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
120eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_async, "%lu");
121eca2ebc7SMartin Sperl 
122eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes, "%llu");
123eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes_rx, "%llu");
124eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes_tx, "%llu");
125eca2ebc7SMartin Sperl 
1266b7bc061SMartin Sperl #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)		\
1276b7bc061SMartin Sperl 	SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,		\
1286b7bc061SMartin Sperl 				 "transfer_bytes_histo_" number,	\
1296b7bc061SMartin Sperl 				 transfer_bytes_histo[index],  "%lu")
1306b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
1316b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
1326b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
1336b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
1346b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
1356b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
1366b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
1376b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
1386b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
1396b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
1406b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
1416b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
1426b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
1436b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
1446b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
1456b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
1466b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
1476b7bc061SMartin Sperl 
148d9f12122SMartin Sperl SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
149d9f12122SMartin Sperl 
150aa7da564SGreg Kroah-Hartman static struct attribute *spi_dev_attrs[] = {
151aa7da564SGreg Kroah-Hartman 	&dev_attr_modalias.attr,
152aa7da564SGreg Kroah-Hartman 	NULL,
1538ae12a0dSDavid Brownell };
154eca2ebc7SMartin Sperl 
155eca2ebc7SMartin Sperl static const struct attribute_group spi_dev_group = {
156eca2ebc7SMartin Sperl 	.attrs  = spi_dev_attrs,
157eca2ebc7SMartin Sperl };
158eca2ebc7SMartin Sperl 
159eca2ebc7SMartin Sperl static struct attribute *spi_device_statistics_attrs[] = {
160eca2ebc7SMartin Sperl 	&dev_attr_spi_device_messages.attr,
161eca2ebc7SMartin Sperl 	&dev_attr_spi_device_transfers.attr,
162eca2ebc7SMartin Sperl 	&dev_attr_spi_device_errors.attr,
163eca2ebc7SMartin Sperl 	&dev_attr_spi_device_timedout.attr,
164eca2ebc7SMartin Sperl 	&dev_attr_spi_device_spi_sync.attr,
165eca2ebc7SMartin Sperl 	&dev_attr_spi_device_spi_sync_immediate.attr,
166eca2ebc7SMartin Sperl 	&dev_attr_spi_device_spi_async.attr,
167eca2ebc7SMartin Sperl 	&dev_attr_spi_device_bytes.attr,
168eca2ebc7SMartin Sperl 	&dev_attr_spi_device_bytes_rx.attr,
169eca2ebc7SMartin Sperl 	&dev_attr_spi_device_bytes_tx.attr,
1706b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo0.attr,
1716b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo1.attr,
1726b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo2.attr,
1736b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo3.attr,
1746b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo4.attr,
1756b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo5.attr,
1766b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo6.attr,
1776b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo7.attr,
1786b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo8.attr,
1796b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo9.attr,
1806b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo10.attr,
1816b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo11.attr,
1826b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo12.attr,
1836b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo13.attr,
1846b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo14.attr,
1856b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo15.attr,
1866b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo16.attr,
187d9f12122SMartin Sperl 	&dev_attr_spi_device_transfers_split_maxsize.attr,
188eca2ebc7SMartin Sperl 	NULL,
189eca2ebc7SMartin Sperl };
190eca2ebc7SMartin Sperl 
191eca2ebc7SMartin Sperl static const struct attribute_group spi_device_statistics_group = {
192eca2ebc7SMartin Sperl 	.name  = "statistics",
193eca2ebc7SMartin Sperl 	.attrs  = spi_device_statistics_attrs,
194eca2ebc7SMartin Sperl };
195eca2ebc7SMartin Sperl 
196eca2ebc7SMartin Sperl static const struct attribute_group *spi_dev_groups[] = {
197eca2ebc7SMartin Sperl 	&spi_dev_group,
198eca2ebc7SMartin Sperl 	&spi_device_statistics_group,
199eca2ebc7SMartin Sperl 	NULL,
200eca2ebc7SMartin Sperl };
201eca2ebc7SMartin Sperl 
202eca2ebc7SMartin Sperl static struct attribute *spi_master_statistics_attrs[] = {
203eca2ebc7SMartin Sperl 	&dev_attr_spi_master_messages.attr,
204eca2ebc7SMartin Sperl 	&dev_attr_spi_master_transfers.attr,
205eca2ebc7SMartin Sperl 	&dev_attr_spi_master_errors.attr,
206eca2ebc7SMartin Sperl 	&dev_attr_spi_master_timedout.attr,
207eca2ebc7SMartin Sperl 	&dev_attr_spi_master_spi_sync.attr,
208eca2ebc7SMartin Sperl 	&dev_attr_spi_master_spi_sync_immediate.attr,
209eca2ebc7SMartin Sperl 	&dev_attr_spi_master_spi_async.attr,
210eca2ebc7SMartin Sperl 	&dev_attr_spi_master_bytes.attr,
211eca2ebc7SMartin Sperl 	&dev_attr_spi_master_bytes_rx.attr,
212eca2ebc7SMartin Sperl 	&dev_attr_spi_master_bytes_tx.attr,
2136b7bc061SMartin Sperl 	&dev_attr_spi_master_transfer_bytes_histo0.attr,
2146b7bc061SMartin Sperl 	&dev_attr_spi_master_transfer_bytes_histo1.attr,
2156b7bc061SMartin Sperl 	&dev_attr_spi_master_transfer_bytes_histo2.attr,
2166b7bc061SMartin Sperl 	&dev_attr_spi_master_transfer_bytes_histo3.attr,
2176b7bc061SMartin Sperl 	&dev_attr_spi_master_transfer_bytes_histo4.attr,
2186b7bc061SMartin Sperl 	&dev_attr_spi_master_transfer_bytes_histo5.attr,
2196b7bc061SMartin Sperl 	&dev_attr_spi_master_transfer_bytes_histo6.attr,
2206b7bc061SMartin Sperl 	&dev_attr_spi_master_transfer_bytes_histo7.attr,
2216b7bc061SMartin Sperl 	&dev_attr_spi_master_transfer_bytes_histo8.attr,
2226b7bc061SMartin Sperl 	&dev_attr_spi_master_transfer_bytes_histo9.attr,
2236b7bc061SMartin Sperl 	&dev_attr_spi_master_transfer_bytes_histo10.attr,
2246b7bc061SMartin Sperl 	&dev_attr_spi_master_transfer_bytes_histo11.attr,
2256b7bc061SMartin Sperl 	&dev_attr_spi_master_transfer_bytes_histo12.attr,
2266b7bc061SMartin Sperl 	&dev_attr_spi_master_transfer_bytes_histo13.attr,
2276b7bc061SMartin Sperl 	&dev_attr_spi_master_transfer_bytes_histo14.attr,
2286b7bc061SMartin Sperl 	&dev_attr_spi_master_transfer_bytes_histo15.attr,
2296b7bc061SMartin Sperl 	&dev_attr_spi_master_transfer_bytes_histo16.attr,
230d9f12122SMartin Sperl 	&dev_attr_spi_master_transfers_split_maxsize.attr,
231eca2ebc7SMartin Sperl 	NULL,
232eca2ebc7SMartin Sperl };
233eca2ebc7SMartin Sperl 
234eca2ebc7SMartin Sperl static const struct attribute_group spi_master_statistics_group = {
235eca2ebc7SMartin Sperl 	.name  = "statistics",
236eca2ebc7SMartin Sperl 	.attrs  = spi_master_statistics_attrs,
237eca2ebc7SMartin Sperl };
238eca2ebc7SMartin Sperl 
239eca2ebc7SMartin Sperl static const struct attribute_group *spi_master_groups[] = {
240eca2ebc7SMartin Sperl 	&spi_master_statistics_group,
241eca2ebc7SMartin Sperl 	NULL,
242eca2ebc7SMartin Sperl };
243eca2ebc7SMartin Sperl 
244eca2ebc7SMartin Sperl void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
245eca2ebc7SMartin Sperl 				       struct spi_transfer *xfer,
246eca2ebc7SMartin Sperl 				       struct spi_master *master)
247eca2ebc7SMartin Sperl {
248eca2ebc7SMartin Sperl 	unsigned long flags;
2496b7bc061SMartin Sperl 	int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
2506b7bc061SMartin Sperl 
2516b7bc061SMartin Sperl 	if (l2len < 0)
2526b7bc061SMartin Sperl 		l2len = 0;
253eca2ebc7SMartin Sperl 
254eca2ebc7SMartin Sperl 	spin_lock_irqsave(&stats->lock, flags);
255eca2ebc7SMartin Sperl 
256eca2ebc7SMartin Sperl 	stats->transfers++;
2576b7bc061SMartin Sperl 	stats->transfer_bytes_histo[l2len]++;
258eca2ebc7SMartin Sperl 
259eca2ebc7SMartin Sperl 	stats->bytes += xfer->len;
260eca2ebc7SMartin Sperl 	if ((xfer->tx_buf) &&
261eca2ebc7SMartin Sperl 	    (xfer->tx_buf != master->dummy_tx))
262eca2ebc7SMartin Sperl 		stats->bytes_tx += xfer->len;
263eca2ebc7SMartin Sperl 	if ((xfer->rx_buf) &&
264eca2ebc7SMartin Sperl 	    (xfer->rx_buf != master->dummy_rx))
265eca2ebc7SMartin Sperl 		stats->bytes_rx += xfer->len;
266eca2ebc7SMartin Sperl 
267eca2ebc7SMartin Sperl 	spin_unlock_irqrestore(&stats->lock, flags);
268eca2ebc7SMartin Sperl }
269eca2ebc7SMartin Sperl EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
2708ae12a0dSDavid Brownell 
2718ae12a0dSDavid Brownell /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
2728ae12a0dSDavid Brownell  * and the sysfs version makes coldplug work too.
2738ae12a0dSDavid Brownell  */
2748ae12a0dSDavid Brownell 
27575368bf6SAnton Vorontsov static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
27675368bf6SAnton Vorontsov 						const struct spi_device *sdev)
27775368bf6SAnton Vorontsov {
27875368bf6SAnton Vorontsov 	while (id->name[0]) {
27975368bf6SAnton Vorontsov 		if (!strcmp(sdev->modalias, id->name))
28075368bf6SAnton Vorontsov 			return id;
28175368bf6SAnton Vorontsov 		id++;
28275368bf6SAnton Vorontsov 	}
28375368bf6SAnton Vorontsov 	return NULL;
28475368bf6SAnton Vorontsov }
28575368bf6SAnton Vorontsov 
28675368bf6SAnton Vorontsov const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
28775368bf6SAnton Vorontsov {
28875368bf6SAnton Vorontsov 	const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
28975368bf6SAnton Vorontsov 
29075368bf6SAnton Vorontsov 	return spi_match_id(sdrv->id_table, sdev);
29175368bf6SAnton Vorontsov }
29275368bf6SAnton Vorontsov EXPORT_SYMBOL_GPL(spi_get_device_id);
29375368bf6SAnton Vorontsov 
2948ae12a0dSDavid Brownell static int spi_match_device(struct device *dev, struct device_driver *drv)
2958ae12a0dSDavid Brownell {
2968ae12a0dSDavid Brownell 	const struct spi_device	*spi = to_spi_device(dev);
29775368bf6SAnton Vorontsov 	const struct spi_driver	*sdrv = to_spi_driver(drv);
29875368bf6SAnton Vorontsov 
2992b7a32f7SSinan Akman 	/* Attempt an OF style match */
3002b7a32f7SSinan Akman 	if (of_driver_match_device(dev, drv))
3012b7a32f7SSinan Akman 		return 1;
3022b7a32f7SSinan Akman 
30364bee4d2SMika Westerberg 	/* Then try ACPI */
30464bee4d2SMika Westerberg 	if (acpi_driver_match_device(dev, drv))
30564bee4d2SMika Westerberg 		return 1;
30664bee4d2SMika Westerberg 
30775368bf6SAnton Vorontsov 	if (sdrv->id_table)
30875368bf6SAnton Vorontsov 		return !!spi_match_id(sdrv->id_table, spi);
3098ae12a0dSDavid Brownell 
31035f74fcaSKay Sievers 	return strcmp(spi->modalias, drv->name) == 0;
3118ae12a0dSDavid Brownell }
3128ae12a0dSDavid Brownell 
3137eff2e7aSKay Sievers static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
3148ae12a0dSDavid Brownell {
3158ae12a0dSDavid Brownell 	const struct spi_device		*spi = to_spi_device(dev);
3168c4ff6d0SZhang Rui 	int rc;
3178c4ff6d0SZhang Rui 
3188c4ff6d0SZhang Rui 	rc = acpi_device_uevent_modalias(dev, env);
3198c4ff6d0SZhang Rui 	if (rc != -ENODEV)
3208c4ff6d0SZhang Rui 		return rc;
3218ae12a0dSDavid Brownell 
322e0626e38SAnton Vorontsov 	add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
3238ae12a0dSDavid Brownell 	return 0;
3248ae12a0dSDavid Brownell }
3258ae12a0dSDavid Brownell 
3268ae12a0dSDavid Brownell struct bus_type spi_bus_type = {
3278ae12a0dSDavid Brownell 	.name		= "spi",
328aa7da564SGreg Kroah-Hartman 	.dev_groups	= spi_dev_groups,
3298ae12a0dSDavid Brownell 	.match		= spi_match_device,
3308ae12a0dSDavid Brownell 	.uevent		= spi_uevent,
3318ae12a0dSDavid Brownell };
3328ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_bus_type);
3338ae12a0dSDavid Brownell 
334b885244eSDavid Brownell 
335b885244eSDavid Brownell static int spi_drv_probe(struct device *dev)
336b885244eSDavid Brownell {
337b885244eSDavid Brownell 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
33844af7927SJon Hunter 	struct spi_device		*spi = to_spi_device(dev);
33933cf00e5SMika Westerberg 	int ret;
340b885244eSDavid Brownell 
34186be408bSSylwester Nawrocki 	ret = of_clk_set_defaults(dev->of_node, false);
34286be408bSSylwester Nawrocki 	if (ret)
34386be408bSSylwester Nawrocki 		return ret;
34486be408bSSylwester Nawrocki 
34544af7927SJon Hunter 	if (dev->of_node) {
34644af7927SJon Hunter 		spi->irq = of_irq_get(dev->of_node, 0);
34744af7927SJon Hunter 		if (spi->irq == -EPROBE_DEFER)
34844af7927SJon Hunter 			return -EPROBE_DEFER;
34944af7927SJon Hunter 		if (spi->irq < 0)
35044af7927SJon Hunter 			spi->irq = 0;
35144af7927SJon Hunter 	}
35244af7927SJon Hunter 
353676e7c25SUlf Hansson 	ret = dev_pm_domain_attach(dev, true);
354676e7c25SUlf Hansson 	if (ret != -EPROBE_DEFER) {
35544af7927SJon Hunter 		ret = sdrv->probe(spi);
35633cf00e5SMika Westerberg 		if (ret)
357676e7c25SUlf Hansson 			dev_pm_domain_detach(dev, true);
358676e7c25SUlf Hansson 	}
35933cf00e5SMika Westerberg 
36033cf00e5SMika Westerberg 	return ret;
361b885244eSDavid Brownell }
362b885244eSDavid Brownell 
363b885244eSDavid Brownell static int spi_drv_remove(struct device *dev)
364b885244eSDavid Brownell {
365b885244eSDavid Brownell 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
36633cf00e5SMika Westerberg 	int ret;
367b885244eSDavid Brownell 
368aec35f4eSJean Delvare 	ret = sdrv->remove(to_spi_device(dev));
369676e7c25SUlf Hansson 	dev_pm_domain_detach(dev, true);
37033cf00e5SMika Westerberg 
37133cf00e5SMika Westerberg 	return ret;
372b885244eSDavid Brownell }
373b885244eSDavid Brownell 
374b885244eSDavid Brownell static void spi_drv_shutdown(struct device *dev)
375b885244eSDavid Brownell {
376b885244eSDavid Brownell 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
377b885244eSDavid Brownell 
378b885244eSDavid Brownell 	sdrv->shutdown(to_spi_device(dev));
379b885244eSDavid Brownell }
380b885244eSDavid Brownell 
38133e34dc6SDavid Brownell /**
382ca5d2485SAndrew F. Davis  * __spi_register_driver - register a SPI driver
38388c9321dSThierry Reding  * @owner: owner module of the driver to register
38433e34dc6SDavid Brownell  * @sdrv: the driver to register
38533e34dc6SDavid Brownell  * Context: can sleep
38697d56dc6SJavier Martinez Canillas  *
38797d56dc6SJavier Martinez Canillas  * Return: zero on success, else a negative error code.
38833e34dc6SDavid Brownell  */
389ca5d2485SAndrew F. Davis int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
390b885244eSDavid Brownell {
391ca5d2485SAndrew F. Davis 	sdrv->driver.owner = owner;
392b885244eSDavid Brownell 	sdrv->driver.bus = &spi_bus_type;
393b885244eSDavid Brownell 	if (sdrv->probe)
394b885244eSDavid Brownell 		sdrv->driver.probe = spi_drv_probe;
395b885244eSDavid Brownell 	if (sdrv->remove)
396b885244eSDavid Brownell 		sdrv->driver.remove = spi_drv_remove;
397b885244eSDavid Brownell 	if (sdrv->shutdown)
398b885244eSDavid Brownell 		sdrv->driver.shutdown = spi_drv_shutdown;
399b885244eSDavid Brownell 	return driver_register(&sdrv->driver);
400b885244eSDavid Brownell }
401ca5d2485SAndrew F. Davis EXPORT_SYMBOL_GPL(__spi_register_driver);
402b885244eSDavid Brownell 
4038ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/
4048ae12a0dSDavid Brownell 
4058ae12a0dSDavid Brownell /* SPI devices should normally not be created by SPI device drivers; that
4068ae12a0dSDavid Brownell  * would make them board-specific.  Similarly with SPI master drivers.
4078ae12a0dSDavid Brownell  * Device registration normally goes into like arch/.../mach.../board-YYY.c
4088ae12a0dSDavid Brownell  * with other readonly (flashable) information about mainboard devices.
4098ae12a0dSDavid Brownell  */
4108ae12a0dSDavid Brownell 
4118ae12a0dSDavid Brownell struct boardinfo {
4128ae12a0dSDavid Brownell 	struct list_head	list;
4132b9603a0SFeng Tang 	struct spi_board_info	board_info;
4148ae12a0dSDavid Brownell };
4158ae12a0dSDavid Brownell 
4168ae12a0dSDavid Brownell static LIST_HEAD(board_list);
4172b9603a0SFeng Tang static LIST_HEAD(spi_master_list);
4182b9603a0SFeng Tang 
4192b9603a0SFeng Tang /*
4202b9603a0SFeng Tang  * Used to protect add/del opertion for board_info list and
4212b9603a0SFeng Tang  * spi_master list, and their matching process
4222b9603a0SFeng Tang  */
42394040828SMatthias Kaehlcke static DEFINE_MUTEX(board_lock);
4248ae12a0dSDavid Brownell 
425dc87c98eSGrant Likely /**
426dc87c98eSGrant Likely  * spi_alloc_device - Allocate a new SPI device
427dc87c98eSGrant Likely  * @master: Controller to which device is connected
428dc87c98eSGrant Likely  * Context: can sleep
429dc87c98eSGrant Likely  *
430dc87c98eSGrant Likely  * Allows a driver to allocate and initialize a spi_device without
431dc87c98eSGrant Likely  * registering it immediately.  This allows a driver to directly
432dc87c98eSGrant Likely  * fill the spi_device with device parameters before calling
433dc87c98eSGrant Likely  * spi_add_device() on it.
434dc87c98eSGrant Likely  *
435dc87c98eSGrant Likely  * Caller is responsible to call spi_add_device() on the returned
436dc87c98eSGrant Likely  * spi_device structure to add it to the SPI master.  If the caller
437dc87c98eSGrant Likely  * needs to discard the spi_device without adding it, then it should
438dc87c98eSGrant Likely  * call spi_dev_put() on it.
439dc87c98eSGrant Likely  *
44097d56dc6SJavier Martinez Canillas  * Return: a pointer to the new device, or NULL.
441dc87c98eSGrant Likely  */
442dc87c98eSGrant Likely struct spi_device *spi_alloc_device(struct spi_master *master)
443dc87c98eSGrant Likely {
444dc87c98eSGrant Likely 	struct spi_device	*spi;
445dc87c98eSGrant Likely 
446dc87c98eSGrant Likely 	if (!spi_master_get(master))
447dc87c98eSGrant Likely 		return NULL;
448dc87c98eSGrant Likely 
4495fe5f05eSJingoo Han 	spi = kzalloc(sizeof(*spi), GFP_KERNEL);
450dc87c98eSGrant Likely 	if (!spi) {
451dc87c98eSGrant Likely 		spi_master_put(master);
452dc87c98eSGrant Likely 		return NULL;
453dc87c98eSGrant Likely 	}
454dc87c98eSGrant Likely 
455dc87c98eSGrant Likely 	spi->master = master;
456178db7d3SLaurent Pinchart 	spi->dev.parent = &master->dev;
457dc87c98eSGrant Likely 	spi->dev.bus = &spi_bus_type;
458dc87c98eSGrant Likely 	spi->dev.release = spidev_release;
459446411e1SAndreas Larsson 	spi->cs_gpio = -ENOENT;
460eca2ebc7SMartin Sperl 
461eca2ebc7SMartin Sperl 	spin_lock_init(&spi->statistics.lock);
462eca2ebc7SMartin Sperl 
463dc87c98eSGrant Likely 	device_initialize(&spi->dev);
464dc87c98eSGrant Likely 	return spi;
465dc87c98eSGrant Likely }
466dc87c98eSGrant Likely EXPORT_SYMBOL_GPL(spi_alloc_device);
467dc87c98eSGrant Likely 
468e13ac47bSJarkko Nikula static void spi_dev_set_name(struct spi_device *spi)
469e13ac47bSJarkko Nikula {
470e13ac47bSJarkko Nikula 	struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
471e13ac47bSJarkko Nikula 
472e13ac47bSJarkko Nikula 	if (adev) {
473e13ac47bSJarkko Nikula 		dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
474e13ac47bSJarkko Nikula 		return;
475e13ac47bSJarkko Nikula 	}
476e13ac47bSJarkko Nikula 
477e13ac47bSJarkko Nikula 	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
478e13ac47bSJarkko Nikula 		     spi->chip_select);
479e13ac47bSJarkko Nikula }
480e13ac47bSJarkko Nikula 
481b6fb8d3aSMika Westerberg static int spi_dev_check(struct device *dev, void *data)
482b6fb8d3aSMika Westerberg {
483b6fb8d3aSMika Westerberg 	struct spi_device *spi = to_spi_device(dev);
484b6fb8d3aSMika Westerberg 	struct spi_device *new_spi = data;
485b6fb8d3aSMika Westerberg 
486b6fb8d3aSMika Westerberg 	if (spi->master == new_spi->master &&
487b6fb8d3aSMika Westerberg 	    spi->chip_select == new_spi->chip_select)
488b6fb8d3aSMika Westerberg 		return -EBUSY;
489b6fb8d3aSMika Westerberg 	return 0;
490b6fb8d3aSMika Westerberg }
491b6fb8d3aSMika Westerberg 
492dc87c98eSGrant Likely /**
493dc87c98eSGrant Likely  * spi_add_device - Add spi_device allocated with spi_alloc_device
494dc87c98eSGrant Likely  * @spi: spi_device to register
495dc87c98eSGrant Likely  *
496dc87c98eSGrant Likely  * Companion function to spi_alloc_device.  Devices allocated with
497dc87c98eSGrant Likely  * spi_alloc_device can be added onto the spi bus with this function.
498dc87c98eSGrant Likely  *
49997d56dc6SJavier Martinez Canillas  * Return: 0 on success; negative errno on failure
500dc87c98eSGrant Likely  */
501dc87c98eSGrant Likely int spi_add_device(struct spi_device *spi)
502dc87c98eSGrant Likely {
503e48880e0SDavid Brownell 	static DEFINE_MUTEX(spi_add_lock);
50474317984SJean-Christophe PLAGNIOL-VILLARD 	struct spi_master *master = spi->master;
50574317984SJean-Christophe PLAGNIOL-VILLARD 	struct device *dev = master->dev.parent;
506dc87c98eSGrant Likely 	int status;
507dc87c98eSGrant Likely 
508dc87c98eSGrant Likely 	/* Chipselects are numbered 0..max; validate. */
50974317984SJean-Christophe PLAGNIOL-VILLARD 	if (spi->chip_select >= master->num_chipselect) {
510dc87c98eSGrant Likely 		dev_err(dev, "cs%d >= max %d\n",
511dc87c98eSGrant Likely 			spi->chip_select,
51274317984SJean-Christophe PLAGNIOL-VILLARD 			master->num_chipselect);
513dc87c98eSGrant Likely 		return -EINVAL;
514dc87c98eSGrant Likely 	}
515dc87c98eSGrant Likely 
516dc87c98eSGrant Likely 	/* Set the bus ID string */
517e13ac47bSJarkko Nikula 	spi_dev_set_name(spi);
518e48880e0SDavid Brownell 
519e48880e0SDavid Brownell 	/* We need to make sure there's no other device with this
520e48880e0SDavid Brownell 	 * chipselect **BEFORE** we call setup(), else we'll trash
521e48880e0SDavid Brownell 	 * its configuration.  Lock against concurrent add() calls.
522e48880e0SDavid Brownell 	 */
523e48880e0SDavid Brownell 	mutex_lock(&spi_add_lock);
524e48880e0SDavid Brownell 
525b6fb8d3aSMika Westerberg 	status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
526b6fb8d3aSMika Westerberg 	if (status) {
527e48880e0SDavid Brownell 		dev_err(dev, "chipselect %d already in use\n",
528e48880e0SDavid Brownell 				spi->chip_select);
529e48880e0SDavid Brownell 		goto done;
530e48880e0SDavid Brownell 	}
531e48880e0SDavid Brownell 
53274317984SJean-Christophe PLAGNIOL-VILLARD 	if (master->cs_gpios)
53374317984SJean-Christophe PLAGNIOL-VILLARD 		spi->cs_gpio = master->cs_gpios[spi->chip_select];
53474317984SJean-Christophe PLAGNIOL-VILLARD 
535e48880e0SDavid Brownell 	/* Drivers may modify this initial i/o setup, but will
536e48880e0SDavid Brownell 	 * normally rely on the device being setup.  Devices
537e48880e0SDavid Brownell 	 * using SPI_CS_HIGH can't coexist well otherwise...
538e48880e0SDavid Brownell 	 */
5397d077197SDavid Brownell 	status = spi_setup(spi);
540dc87c98eSGrant Likely 	if (status < 0) {
541eb288a1fSLinus Walleij 		dev_err(dev, "can't setup %s, status %d\n",
542eb288a1fSLinus Walleij 				dev_name(&spi->dev), status);
543e48880e0SDavid Brownell 		goto done;
544dc87c98eSGrant Likely 	}
545dc87c98eSGrant Likely 
546e48880e0SDavid Brownell 	/* Device may be bound to an active driver when this returns */
547dc87c98eSGrant Likely 	status = device_add(&spi->dev);
548e48880e0SDavid Brownell 	if (status < 0)
549eb288a1fSLinus Walleij 		dev_err(dev, "can't add %s, status %d\n",
550eb288a1fSLinus Walleij 				dev_name(&spi->dev), status);
551e48880e0SDavid Brownell 	else
55235f74fcaSKay Sievers 		dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
553e48880e0SDavid Brownell 
554e48880e0SDavid Brownell done:
555e48880e0SDavid Brownell 	mutex_unlock(&spi_add_lock);
556e48880e0SDavid Brownell 	return status;
557dc87c98eSGrant Likely }
558dc87c98eSGrant Likely EXPORT_SYMBOL_GPL(spi_add_device);
5598ae12a0dSDavid Brownell 
56033e34dc6SDavid Brownell /**
56133e34dc6SDavid Brownell  * spi_new_device - instantiate one new SPI device
56233e34dc6SDavid Brownell  * @master: Controller to which device is connected
56333e34dc6SDavid Brownell  * @chip: Describes the SPI device
56433e34dc6SDavid Brownell  * Context: can sleep
56533e34dc6SDavid Brownell  *
56633e34dc6SDavid Brownell  * On typical mainboards, this is purely internal; and it's not needed
5678ae12a0dSDavid Brownell  * after board init creates the hard-wired devices.  Some development
5688ae12a0dSDavid Brownell  * platforms may not be able to use spi_register_board_info though, and
5698ae12a0dSDavid Brownell  * this is exported so that for example a USB or parport based adapter
5708ae12a0dSDavid Brownell  * driver could add devices (which it would learn about out-of-band).
571082c8cb4SDavid Brownell  *
57297d56dc6SJavier Martinez Canillas  * Return: the new device, or NULL.
5738ae12a0dSDavid Brownell  */
574e9d5a461SAdrian Bunk struct spi_device *spi_new_device(struct spi_master *master,
575e9d5a461SAdrian Bunk 				  struct spi_board_info *chip)
5768ae12a0dSDavid Brownell {
5778ae12a0dSDavid Brownell 	struct spi_device	*proxy;
5788ae12a0dSDavid Brownell 	int			status;
5798ae12a0dSDavid Brownell 
580082c8cb4SDavid Brownell 	/* NOTE:  caller did any chip->bus_num checks necessary.
581082c8cb4SDavid Brownell 	 *
582082c8cb4SDavid Brownell 	 * Also, unless we change the return value convention to use
583082c8cb4SDavid Brownell 	 * error-or-pointer (not NULL-or-pointer), troubleshootability
584082c8cb4SDavid Brownell 	 * suggests syslogged diagnostics are best here (ugh).
585082c8cb4SDavid Brownell 	 */
586082c8cb4SDavid Brownell 
587dc87c98eSGrant Likely 	proxy = spi_alloc_device(master);
588dc87c98eSGrant Likely 	if (!proxy)
5898ae12a0dSDavid Brownell 		return NULL;
5908ae12a0dSDavid Brownell 
591102eb975SGrant Likely 	WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
592102eb975SGrant Likely 
5938ae12a0dSDavid Brownell 	proxy->chip_select = chip->chip_select;
5948ae12a0dSDavid Brownell 	proxy->max_speed_hz = chip->max_speed_hz;
595980a01c9SDavid Brownell 	proxy->mode = chip->mode;
5968ae12a0dSDavid Brownell 	proxy->irq = chip->irq;
597102eb975SGrant Likely 	strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
5988ae12a0dSDavid Brownell 	proxy->dev.platform_data = (void *) chip->platform_data;
5998ae12a0dSDavid Brownell 	proxy->controller_data = chip->controller_data;
6008ae12a0dSDavid Brownell 	proxy->controller_state = NULL;
6018ae12a0dSDavid Brownell 
602dc87c98eSGrant Likely 	status = spi_add_device(proxy);
6038ae12a0dSDavid Brownell 	if (status < 0) {
604dc87c98eSGrant Likely 		spi_dev_put(proxy);
6058ae12a0dSDavid Brownell 		return NULL;
6068ae12a0dSDavid Brownell 	}
607dc87c98eSGrant Likely 
608dc87c98eSGrant Likely 	return proxy;
609dc87c98eSGrant Likely }
6108ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_new_device);
6118ae12a0dSDavid Brownell 
6123b1884c2SGeert Uytterhoeven /**
6133b1884c2SGeert Uytterhoeven  * spi_unregister_device - unregister a single SPI device
6143b1884c2SGeert Uytterhoeven  * @spi: spi_device to unregister
6153b1884c2SGeert Uytterhoeven  *
6163b1884c2SGeert Uytterhoeven  * Start making the passed SPI device vanish. Normally this would be handled
6173b1884c2SGeert Uytterhoeven  * by spi_unregister_master().
6183b1884c2SGeert Uytterhoeven  */
6193b1884c2SGeert Uytterhoeven void spi_unregister_device(struct spi_device *spi)
6203b1884c2SGeert Uytterhoeven {
621bd6c1644SGeert Uytterhoeven 	if (!spi)
622bd6c1644SGeert Uytterhoeven 		return;
623bd6c1644SGeert Uytterhoeven 
624bd6c1644SGeert Uytterhoeven 	if (spi->dev.of_node)
625bd6c1644SGeert Uytterhoeven 		of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
6267f24467fSOctavian Purdila 	if (ACPI_COMPANION(&spi->dev))
6277f24467fSOctavian Purdila 		acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
6283b1884c2SGeert Uytterhoeven 	device_unregister(&spi->dev);
6293b1884c2SGeert Uytterhoeven }
6303b1884c2SGeert Uytterhoeven EXPORT_SYMBOL_GPL(spi_unregister_device);
6313b1884c2SGeert Uytterhoeven 
6322b9603a0SFeng Tang static void spi_match_master_to_boardinfo(struct spi_master *master,
6332b9603a0SFeng Tang 				struct spi_board_info *bi)
6342b9603a0SFeng Tang {
6352b9603a0SFeng Tang 	struct spi_device *dev;
6362b9603a0SFeng Tang 
6372b9603a0SFeng Tang 	if (master->bus_num != bi->bus_num)
6382b9603a0SFeng Tang 		return;
6392b9603a0SFeng Tang 
6402b9603a0SFeng Tang 	dev = spi_new_device(master, bi);
6412b9603a0SFeng Tang 	if (!dev)
6422b9603a0SFeng Tang 		dev_err(master->dev.parent, "can't create new device for %s\n",
6432b9603a0SFeng Tang 			bi->modalias);
6442b9603a0SFeng Tang }
6452b9603a0SFeng Tang 
64633e34dc6SDavid Brownell /**
64733e34dc6SDavid Brownell  * spi_register_board_info - register SPI devices for a given board
64833e34dc6SDavid Brownell  * @info: array of chip descriptors
64933e34dc6SDavid Brownell  * @n: how many descriptors are provided
65033e34dc6SDavid Brownell  * Context: can sleep
65133e34dc6SDavid Brownell  *
6528ae12a0dSDavid Brownell  * Board-specific early init code calls this (probably during arch_initcall)
6538ae12a0dSDavid Brownell  * with segments of the SPI device table.  Any device nodes are created later,
6548ae12a0dSDavid Brownell  * after the relevant parent SPI controller (bus_num) is defined.  We keep
6558ae12a0dSDavid Brownell  * this table of devices forever, so that reloading a controller driver will
6568ae12a0dSDavid Brownell  * not make Linux forget about these hard-wired devices.
6578ae12a0dSDavid Brownell  *
6588ae12a0dSDavid Brownell  * Other code can also call this, e.g. a particular add-on board might provide
6598ae12a0dSDavid Brownell  * SPI devices through its expansion connector, so code initializing that board
6608ae12a0dSDavid Brownell  * would naturally declare its SPI devices.
6618ae12a0dSDavid Brownell  *
6628ae12a0dSDavid Brownell  * The board info passed can safely be __initdata ... but be careful of
6638ae12a0dSDavid Brownell  * any embedded pointers (platform_data, etc), they're copied as-is.
66497d56dc6SJavier Martinez Canillas  *
66597d56dc6SJavier Martinez Canillas  * Return: zero on success, else a negative error code.
6668ae12a0dSDavid Brownell  */
667fd4a319bSGrant Likely int spi_register_board_info(struct spi_board_info const *info, unsigned n)
6688ae12a0dSDavid Brownell {
6698ae12a0dSDavid Brownell 	struct boardinfo *bi;
6702b9603a0SFeng Tang 	int i;
6718ae12a0dSDavid Brownell 
672c7908a37SXiubo Li 	if (!n)
673c7908a37SXiubo Li 		return -EINVAL;
674c7908a37SXiubo Li 
6752b9603a0SFeng Tang 	bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
6768ae12a0dSDavid Brownell 	if (!bi)
6778ae12a0dSDavid Brownell 		return -ENOMEM;
6788ae12a0dSDavid Brownell 
6792b9603a0SFeng Tang 	for (i = 0; i < n; i++, bi++, info++) {
6802b9603a0SFeng Tang 		struct spi_master *master;
6812b9603a0SFeng Tang 
6822b9603a0SFeng Tang 		memcpy(&bi->board_info, info, sizeof(*info));
68394040828SMatthias Kaehlcke 		mutex_lock(&board_lock);
6848ae12a0dSDavid Brownell 		list_add_tail(&bi->list, &board_list);
6852b9603a0SFeng Tang 		list_for_each_entry(master, &spi_master_list, list)
6862b9603a0SFeng Tang 			spi_match_master_to_boardinfo(master, &bi->board_info);
68794040828SMatthias Kaehlcke 		mutex_unlock(&board_lock);
6882b9603a0SFeng Tang 	}
6892b9603a0SFeng Tang 
6908ae12a0dSDavid Brownell 	return 0;
6918ae12a0dSDavid Brownell }
6928ae12a0dSDavid Brownell 
6938ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/
6948ae12a0dSDavid Brownell 
695b158935fSMark Brown static void spi_set_cs(struct spi_device *spi, bool enable)
696b158935fSMark Brown {
697b158935fSMark Brown 	if (spi->mode & SPI_CS_HIGH)
698b158935fSMark Brown 		enable = !enable;
699b158935fSMark Brown 
7008eee6b9dSThor Thayer 	if (gpio_is_valid(spi->cs_gpio)) {
701b158935fSMark Brown 		gpio_set_value(spi->cs_gpio, !enable);
7028eee6b9dSThor Thayer 		/* Some SPI masters need both GPIO CS & slave_select */
7038eee6b9dSThor Thayer 		if ((spi->master->flags & SPI_MASTER_GPIO_SS) &&
7048eee6b9dSThor Thayer 		    spi->master->set_cs)
705b158935fSMark Brown 			spi->master->set_cs(spi, !enable);
7068eee6b9dSThor Thayer 	} else if (spi->master->set_cs) {
7078eee6b9dSThor Thayer 		spi->master->set_cs(spi, !enable);
7088eee6b9dSThor Thayer 	}
709b158935fSMark Brown }
710b158935fSMark Brown 
7112de440f5SGeert Uytterhoeven #ifdef CONFIG_HAS_DMA
7126ad45a27SMark Brown static int spi_map_buf(struct spi_master *master, struct device *dev,
7136ad45a27SMark Brown 		       struct sg_table *sgt, void *buf, size_t len,
7146ad45a27SMark Brown 		       enum dma_data_direction dir)
7156ad45a27SMark Brown {
7166ad45a27SMark Brown 	const bool vmalloced_buf = is_vmalloc_addr(buf);
717df88e91bSAndy Shevchenko 	unsigned int max_seg_size = dma_get_max_seg_size(dev);
718b1b8153cSVignesh R #ifdef CONFIG_HIGHMEM
719b1b8153cSVignesh R 	const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
720b1b8153cSVignesh R 				(unsigned long)buf < (PKMAP_BASE +
721b1b8153cSVignesh R 					(LAST_PKMAP * PAGE_SIZE)));
722b1b8153cSVignesh R #else
723b1b8153cSVignesh R 	const bool kmap_buf = false;
724b1b8153cSVignesh R #endif
72565598c13SAndrew Gabbasov 	int desc_len;
72665598c13SAndrew Gabbasov 	int sgs;
7276ad45a27SMark Brown 	struct page *vm_page;
7288dd4a016SJuan Gutierrez 	struct scatterlist *sg;
7296ad45a27SMark Brown 	void *sg_buf;
7306ad45a27SMark Brown 	size_t min;
7316ad45a27SMark Brown 	int i, ret;
7326ad45a27SMark Brown 
733b1b8153cSVignesh R 	if (vmalloced_buf || kmap_buf) {
734df88e91bSAndy Shevchenko 		desc_len = min_t(int, max_seg_size, PAGE_SIZE);
73565598c13SAndrew Gabbasov 		sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
7360569a88fSVignesh R 	} else if (virt_addr_valid(buf)) {
737df88e91bSAndy Shevchenko 		desc_len = min_t(int, max_seg_size, master->max_dma_len);
73865598c13SAndrew Gabbasov 		sgs = DIV_ROUND_UP(len, desc_len);
7390569a88fSVignesh R 	} else {
7400569a88fSVignesh R 		return -EINVAL;
74165598c13SAndrew Gabbasov 	}
74265598c13SAndrew Gabbasov 
7436ad45a27SMark Brown 	ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
7446ad45a27SMark Brown 	if (ret != 0)
7456ad45a27SMark Brown 		return ret;
7466ad45a27SMark Brown 
7478dd4a016SJuan Gutierrez 	sg = &sgt->sgl[0];
7486ad45a27SMark Brown 	for (i = 0; i < sgs; i++) {
7496ad45a27SMark Brown 
750b1b8153cSVignesh R 		if (vmalloced_buf || kmap_buf) {
75165598c13SAndrew Gabbasov 			min = min_t(size_t,
75265598c13SAndrew Gabbasov 				    len, desc_len - offset_in_page(buf));
753b1b8153cSVignesh R 			if (vmalloced_buf)
7546ad45a27SMark Brown 				vm_page = vmalloc_to_page(buf);
755b1b8153cSVignesh R 			else
756b1b8153cSVignesh R 				vm_page = kmap_to_page(buf);
7576ad45a27SMark Brown 			if (!vm_page) {
7586ad45a27SMark Brown 				sg_free_table(sgt);
7596ad45a27SMark Brown 				return -ENOMEM;
7606ad45a27SMark Brown 			}
7618dd4a016SJuan Gutierrez 			sg_set_page(sg, vm_page,
762c1aefbddSCharles Keepax 				    min, offset_in_page(buf));
7636ad45a27SMark Brown 		} else {
76465598c13SAndrew Gabbasov 			min = min_t(size_t, len, desc_len);
7656ad45a27SMark Brown 			sg_buf = buf;
7668dd4a016SJuan Gutierrez 			sg_set_buf(sg, sg_buf, min);
7676ad45a27SMark Brown 		}
7686ad45a27SMark Brown 
7696ad45a27SMark Brown 		buf += min;
7706ad45a27SMark Brown 		len -= min;
7718dd4a016SJuan Gutierrez 		sg = sg_next(sg);
7726ad45a27SMark Brown 	}
7736ad45a27SMark Brown 
7746ad45a27SMark Brown 	ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
77589e4b66aSGeert Uytterhoeven 	if (!ret)
77689e4b66aSGeert Uytterhoeven 		ret = -ENOMEM;
7776ad45a27SMark Brown 	if (ret < 0) {
7786ad45a27SMark Brown 		sg_free_table(sgt);
7796ad45a27SMark Brown 		return ret;
7806ad45a27SMark Brown 	}
7816ad45a27SMark Brown 
7826ad45a27SMark Brown 	sgt->nents = ret;
7836ad45a27SMark Brown 
7846ad45a27SMark Brown 	return 0;
7856ad45a27SMark Brown }
7866ad45a27SMark Brown 
7876ad45a27SMark Brown static void spi_unmap_buf(struct spi_master *master, struct device *dev,
7886ad45a27SMark Brown 			  struct sg_table *sgt, enum dma_data_direction dir)
7896ad45a27SMark Brown {
7906ad45a27SMark Brown 	if (sgt->orig_nents) {
7916ad45a27SMark Brown 		dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
7926ad45a27SMark Brown 		sg_free_table(sgt);
7936ad45a27SMark Brown 	}
7946ad45a27SMark Brown }
7956ad45a27SMark Brown 
7962de440f5SGeert Uytterhoeven static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
79799adef31SMark Brown {
79899adef31SMark Brown 	struct device *tx_dev, *rx_dev;
79999adef31SMark Brown 	struct spi_transfer *xfer;
8006ad45a27SMark Brown 	int ret;
8013a2eba9bSMark Brown 
8026ad45a27SMark Brown 	if (!master->can_dma)
80399adef31SMark Brown 		return 0;
80499adef31SMark Brown 
805c37f45b5SLeilk Liu 	if (master->dma_tx)
8063fc25421SGeert Uytterhoeven 		tx_dev = master->dma_tx->device->dev;
807c37f45b5SLeilk Liu 	else
808c37f45b5SLeilk Liu 		tx_dev = &master->dev;
809c37f45b5SLeilk Liu 
810c37f45b5SLeilk Liu 	if (master->dma_rx)
8113fc25421SGeert Uytterhoeven 		rx_dev = master->dma_rx->device->dev;
812c37f45b5SLeilk Liu 	else
813c37f45b5SLeilk Liu 		rx_dev = &master->dev;
81499adef31SMark Brown 
81599adef31SMark Brown 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
81699adef31SMark Brown 		if (!master->can_dma(master, msg->spi, xfer))
81799adef31SMark Brown 			continue;
81899adef31SMark Brown 
81999adef31SMark Brown 		if (xfer->tx_buf != NULL) {
8206ad45a27SMark Brown 			ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
8216ad45a27SMark Brown 					  (void *)xfer->tx_buf, xfer->len,
82299adef31SMark Brown 					  DMA_TO_DEVICE);
8236ad45a27SMark Brown 			if (ret != 0)
8246ad45a27SMark Brown 				return ret;
82599adef31SMark Brown 		}
82699adef31SMark Brown 
82799adef31SMark Brown 		if (xfer->rx_buf != NULL) {
8286ad45a27SMark Brown 			ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
82999adef31SMark Brown 					  xfer->rx_buf, xfer->len,
83099adef31SMark Brown 					  DMA_FROM_DEVICE);
8316ad45a27SMark Brown 			if (ret != 0) {
8326ad45a27SMark Brown 				spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
8336ad45a27SMark Brown 					      DMA_TO_DEVICE);
8346ad45a27SMark Brown 				return ret;
83599adef31SMark Brown 			}
83699adef31SMark Brown 		}
83799adef31SMark Brown 	}
83899adef31SMark Brown 
83999adef31SMark Brown 	master->cur_msg_mapped = true;
84099adef31SMark Brown 
84199adef31SMark Brown 	return 0;
84299adef31SMark Brown }
84399adef31SMark Brown 
8444b786458SMartin Sperl static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
84599adef31SMark Brown {
84699adef31SMark Brown 	struct spi_transfer *xfer;
84799adef31SMark Brown 	struct device *tx_dev, *rx_dev;
84899adef31SMark Brown 
8496ad45a27SMark Brown 	if (!master->cur_msg_mapped || !master->can_dma)
85099adef31SMark Brown 		return 0;
85199adef31SMark Brown 
852c37f45b5SLeilk Liu 	if (master->dma_tx)
8533fc25421SGeert Uytterhoeven 		tx_dev = master->dma_tx->device->dev;
854c37f45b5SLeilk Liu 	else
855c37f45b5SLeilk Liu 		tx_dev = &master->dev;
856c37f45b5SLeilk Liu 
857c37f45b5SLeilk Liu 	if (master->dma_rx)
8583fc25421SGeert Uytterhoeven 		rx_dev = master->dma_rx->device->dev;
859c37f45b5SLeilk Liu 	else
860c37f45b5SLeilk Liu 		rx_dev = &master->dev;
86199adef31SMark Brown 
86299adef31SMark Brown 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
86399adef31SMark Brown 		if (!master->can_dma(master, msg->spi, xfer))
86499adef31SMark Brown 			continue;
86599adef31SMark Brown 
8666ad45a27SMark Brown 		spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
8676ad45a27SMark Brown 		spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
86899adef31SMark Brown 	}
86999adef31SMark Brown 
87099adef31SMark Brown 	return 0;
87199adef31SMark Brown }
8722de440f5SGeert Uytterhoeven #else /* !CONFIG_HAS_DMA */
873f4502dd1SVignesh R static inline int spi_map_buf(struct spi_master *master,
874f4502dd1SVignesh R 			      struct device *dev, struct sg_table *sgt,
875f4502dd1SVignesh R 			      void *buf, size_t len,
876f4502dd1SVignesh R 			      enum dma_data_direction dir)
877f4502dd1SVignesh R {
878f4502dd1SVignesh R 	return -EINVAL;
879f4502dd1SVignesh R }
880f4502dd1SVignesh R 
881f4502dd1SVignesh R static inline void spi_unmap_buf(struct spi_master *master,
882f4502dd1SVignesh R 				 struct device *dev, struct sg_table *sgt,
883f4502dd1SVignesh R 				 enum dma_data_direction dir)
884f4502dd1SVignesh R {
885f4502dd1SVignesh R }
886f4502dd1SVignesh R 
8872de440f5SGeert Uytterhoeven static inline int __spi_map_msg(struct spi_master *master,
8882de440f5SGeert Uytterhoeven 				struct spi_message *msg)
8892de440f5SGeert Uytterhoeven {
8902de440f5SGeert Uytterhoeven 	return 0;
8912de440f5SGeert Uytterhoeven }
8922de440f5SGeert Uytterhoeven 
8934b786458SMartin Sperl static inline int __spi_unmap_msg(struct spi_master *master,
8942de440f5SGeert Uytterhoeven 				  struct spi_message *msg)
8952de440f5SGeert Uytterhoeven {
8962de440f5SGeert Uytterhoeven 	return 0;
8972de440f5SGeert Uytterhoeven }
8982de440f5SGeert Uytterhoeven #endif /* !CONFIG_HAS_DMA */
8992de440f5SGeert Uytterhoeven 
9004b786458SMartin Sperl static inline int spi_unmap_msg(struct spi_master *master,
9014b786458SMartin Sperl 				struct spi_message *msg)
9024b786458SMartin Sperl {
9034b786458SMartin Sperl 	struct spi_transfer *xfer;
9044b786458SMartin Sperl 
9054b786458SMartin Sperl 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
9064b786458SMartin Sperl 		/*
9074b786458SMartin Sperl 		 * Restore the original value of tx_buf or rx_buf if they are
9084b786458SMartin Sperl 		 * NULL.
9094b786458SMartin Sperl 		 */
9104b786458SMartin Sperl 		if (xfer->tx_buf == master->dummy_tx)
9114b786458SMartin Sperl 			xfer->tx_buf = NULL;
9124b786458SMartin Sperl 		if (xfer->rx_buf == master->dummy_rx)
9134b786458SMartin Sperl 			xfer->rx_buf = NULL;
9144b786458SMartin Sperl 	}
9154b786458SMartin Sperl 
9164b786458SMartin Sperl 	return __spi_unmap_msg(master, msg);
9174b786458SMartin Sperl }
9184b786458SMartin Sperl 
9192de440f5SGeert Uytterhoeven static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
9202de440f5SGeert Uytterhoeven {
9212de440f5SGeert Uytterhoeven 	struct spi_transfer *xfer;
9222de440f5SGeert Uytterhoeven 	void *tmp;
9232de440f5SGeert Uytterhoeven 	unsigned int max_tx, max_rx;
9242de440f5SGeert Uytterhoeven 
9252de440f5SGeert Uytterhoeven 	if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
9262de440f5SGeert Uytterhoeven 		max_tx = 0;
9272de440f5SGeert Uytterhoeven 		max_rx = 0;
9282de440f5SGeert Uytterhoeven 
9292de440f5SGeert Uytterhoeven 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
9302de440f5SGeert Uytterhoeven 			if ((master->flags & SPI_MASTER_MUST_TX) &&
9312de440f5SGeert Uytterhoeven 			    !xfer->tx_buf)
9322de440f5SGeert Uytterhoeven 				max_tx = max(xfer->len, max_tx);
9332de440f5SGeert Uytterhoeven 			if ((master->flags & SPI_MASTER_MUST_RX) &&
9342de440f5SGeert Uytterhoeven 			    !xfer->rx_buf)
9352de440f5SGeert Uytterhoeven 				max_rx = max(xfer->len, max_rx);
9362de440f5SGeert Uytterhoeven 		}
9372de440f5SGeert Uytterhoeven 
9382de440f5SGeert Uytterhoeven 		if (max_tx) {
9392de440f5SGeert Uytterhoeven 			tmp = krealloc(master->dummy_tx, max_tx,
9402de440f5SGeert Uytterhoeven 				       GFP_KERNEL | GFP_DMA);
9412de440f5SGeert Uytterhoeven 			if (!tmp)
9422de440f5SGeert Uytterhoeven 				return -ENOMEM;
9432de440f5SGeert Uytterhoeven 			master->dummy_tx = tmp;
9442de440f5SGeert Uytterhoeven 			memset(tmp, 0, max_tx);
9452de440f5SGeert Uytterhoeven 		}
9462de440f5SGeert Uytterhoeven 
9472de440f5SGeert Uytterhoeven 		if (max_rx) {
9482de440f5SGeert Uytterhoeven 			tmp = krealloc(master->dummy_rx, max_rx,
9492de440f5SGeert Uytterhoeven 				       GFP_KERNEL | GFP_DMA);
9502de440f5SGeert Uytterhoeven 			if (!tmp)
9512de440f5SGeert Uytterhoeven 				return -ENOMEM;
9522de440f5SGeert Uytterhoeven 			master->dummy_rx = tmp;
9532de440f5SGeert Uytterhoeven 		}
9542de440f5SGeert Uytterhoeven 
9552de440f5SGeert Uytterhoeven 		if (max_tx || max_rx) {
9562de440f5SGeert Uytterhoeven 			list_for_each_entry(xfer, &msg->transfers,
9572de440f5SGeert Uytterhoeven 					    transfer_list) {
9582de440f5SGeert Uytterhoeven 				if (!xfer->tx_buf)
9592de440f5SGeert Uytterhoeven 					xfer->tx_buf = master->dummy_tx;
9602de440f5SGeert Uytterhoeven 				if (!xfer->rx_buf)
9612de440f5SGeert Uytterhoeven 					xfer->rx_buf = master->dummy_rx;
9622de440f5SGeert Uytterhoeven 			}
9632de440f5SGeert Uytterhoeven 		}
9642de440f5SGeert Uytterhoeven 	}
9652de440f5SGeert Uytterhoeven 
9662de440f5SGeert Uytterhoeven 	return __spi_map_msg(master, msg);
9672de440f5SGeert Uytterhoeven }
96899adef31SMark Brown 
969b158935fSMark Brown /*
970b158935fSMark Brown  * spi_transfer_one_message - Default implementation of transfer_one_message()
971b158935fSMark Brown  *
972b158935fSMark Brown  * This is a standard implementation of transfer_one_message() for
9738ba811a7SMoritz Fischer  * drivers which implement a transfer_one() operation.  It provides
974b158935fSMark Brown  * standard handling of delays and chip select management.
975b158935fSMark Brown  */
976b158935fSMark Brown static int spi_transfer_one_message(struct spi_master *master,
977b158935fSMark Brown 				    struct spi_message *msg)
978b158935fSMark Brown {
979b158935fSMark Brown 	struct spi_transfer *xfer;
980b158935fSMark Brown 	bool keep_cs = false;
981b158935fSMark Brown 	int ret = 0;
982d0716ddeSSien Wu 	unsigned long long ms = 1;
983eca2ebc7SMartin Sperl 	struct spi_statistics *statm = &master->statistics;
984eca2ebc7SMartin Sperl 	struct spi_statistics *stats = &msg->spi->statistics;
985b158935fSMark Brown 
986b158935fSMark Brown 	spi_set_cs(msg->spi, true);
987b158935fSMark Brown 
988eca2ebc7SMartin Sperl 	SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
989eca2ebc7SMartin Sperl 	SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
990eca2ebc7SMartin Sperl 
991b158935fSMark Brown 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
992b158935fSMark Brown 		trace_spi_transfer_start(msg, xfer);
993b158935fSMark Brown 
994eca2ebc7SMartin Sperl 		spi_statistics_add_transfer_stats(statm, xfer, master);
995eca2ebc7SMartin Sperl 		spi_statistics_add_transfer_stats(stats, xfer, master);
996eca2ebc7SMartin Sperl 
99738ec10f6SMark Brown 		if (xfer->tx_buf || xfer->rx_buf) {
99816735d02SWolfram Sang 			reinit_completion(&master->xfer_completion);
999b158935fSMark Brown 
1000b158935fSMark Brown 			ret = master->transfer_one(master, msg->spi, xfer);
1001b158935fSMark Brown 			if (ret < 0) {
1002eca2ebc7SMartin Sperl 				SPI_STATISTICS_INCREMENT_FIELD(statm,
1003eca2ebc7SMartin Sperl 							       errors);
1004eca2ebc7SMartin Sperl 				SPI_STATISTICS_INCREMENT_FIELD(stats,
1005eca2ebc7SMartin Sperl 							       errors);
1006b158935fSMark Brown 				dev_err(&msg->spi->dev,
1007b158935fSMark Brown 					"SPI transfer failed: %d\n", ret);
1008b158935fSMark Brown 				goto out;
1009b158935fSMark Brown 			}
1010b158935fSMark Brown 
101113a42798SAxel Lin 			if (ret > 0) {
101213a42798SAxel Lin 				ret = 0;
1013d0716ddeSSien Wu 				ms = 8LL * 1000LL * xfer->len;
1014d0716ddeSSien Wu 				do_div(ms, xfer->speed_hz);
1015*833bfadeSHauke Mehrtens 				ms += ms + 200; /* some tolerance */
101616a0ce4eSMark Brown 
1017d0716ddeSSien Wu 				if (ms > UINT_MAX)
1018d0716ddeSSien Wu 					ms = UINT_MAX;
1019d0716ddeSSien Wu 
102016a0ce4eSMark Brown 				ms = wait_for_completion_timeout(&master->xfer_completion,
102116a0ce4eSMark Brown 								 msecs_to_jiffies(ms));
102216a0ce4eSMark Brown 			}
102316a0ce4eSMark Brown 
102416a0ce4eSMark Brown 			if (ms == 0) {
1025eca2ebc7SMartin Sperl 				SPI_STATISTICS_INCREMENT_FIELD(statm,
1026eca2ebc7SMartin Sperl 							       timedout);
1027eca2ebc7SMartin Sperl 				SPI_STATISTICS_INCREMENT_FIELD(stats,
1028eca2ebc7SMartin Sperl 							       timedout);
102938ec10f6SMark Brown 				dev_err(&msg->spi->dev,
103038ec10f6SMark Brown 					"SPI transfer timed out\n");
103116a0ce4eSMark Brown 				msg->status = -ETIMEDOUT;
103213a42798SAxel Lin 			}
103338ec10f6SMark Brown 		} else {
103438ec10f6SMark Brown 			if (xfer->len)
103538ec10f6SMark Brown 				dev_err(&msg->spi->dev,
103638ec10f6SMark Brown 					"Bufferless transfer has length %u\n",
103738ec10f6SMark Brown 					xfer->len);
103838ec10f6SMark Brown 		}
1039b158935fSMark Brown 
1040b158935fSMark Brown 		trace_spi_transfer_stop(msg, xfer);
1041b158935fSMark Brown 
1042b158935fSMark Brown 		if (msg->status != -EINPROGRESS)
1043b158935fSMark Brown 			goto out;
1044b158935fSMark Brown 
10458244bd3aSDaniel Kurtz 		if (xfer->delay_usecs) {
10468244bd3aSDaniel Kurtz 			u16 us = xfer->delay_usecs;
10478244bd3aSDaniel Kurtz 
10488244bd3aSDaniel Kurtz 			if (us <= 10)
10498244bd3aSDaniel Kurtz 				udelay(us);
10508244bd3aSDaniel Kurtz 			else
10518244bd3aSDaniel Kurtz 				usleep_range(us, us + DIV_ROUND_UP(us, 10));
10528244bd3aSDaniel Kurtz 		}
1053b158935fSMark Brown 
1054b158935fSMark Brown 		if (xfer->cs_change) {
1055b158935fSMark Brown 			if (list_is_last(&xfer->transfer_list,
1056b158935fSMark Brown 					 &msg->transfers)) {
1057b158935fSMark Brown 				keep_cs = true;
1058b158935fSMark Brown 			} else {
10590b73aa63SMark Brown 				spi_set_cs(msg->spi, false);
10600b73aa63SMark Brown 				udelay(10);
10610b73aa63SMark Brown 				spi_set_cs(msg->spi, true);
1062b158935fSMark Brown 			}
1063b158935fSMark Brown 		}
1064b158935fSMark Brown 
1065b158935fSMark Brown 		msg->actual_length += xfer->len;
1066b158935fSMark Brown 	}
1067b158935fSMark Brown 
1068b158935fSMark Brown out:
1069b158935fSMark Brown 	if (ret != 0 || !keep_cs)
1070b158935fSMark Brown 		spi_set_cs(msg->spi, false);
1071b158935fSMark Brown 
1072b158935fSMark Brown 	if (msg->status == -EINPROGRESS)
1073b158935fSMark Brown 		msg->status = ret;
1074b158935fSMark Brown 
1075ff61eb42SGeert Uytterhoeven 	if (msg->status && master->handle_err)
1076b716c4ffSAndy Shevchenko 		master->handle_err(master, msg);
1077b716c4ffSAndy Shevchenko 
1078d780c371SMartin Sperl 	spi_res_release(master, msg);
1079d780c371SMartin Sperl 
1080b158935fSMark Brown 	spi_finalize_current_message(master);
1081b158935fSMark Brown 
1082b158935fSMark Brown 	return ret;
1083b158935fSMark Brown }
1084b158935fSMark Brown 
1085b158935fSMark Brown /**
1086b158935fSMark Brown  * spi_finalize_current_transfer - report completion of a transfer
10872c675689SThierry Reding  * @master: the master reporting completion
1088b158935fSMark Brown  *
1089b158935fSMark Brown  * Called by SPI drivers using the core transfer_one_message()
1090b158935fSMark Brown  * implementation to notify it that the current interrupt driven
10919e8f4882SGeert Uytterhoeven  * transfer has finished and the next one may be scheduled.
1092b158935fSMark Brown  */
1093b158935fSMark Brown void spi_finalize_current_transfer(struct spi_master *master)
1094b158935fSMark Brown {
1095b158935fSMark Brown 	complete(&master->xfer_completion);
1096b158935fSMark Brown }
1097b158935fSMark Brown EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1098b158935fSMark Brown 
1099ffbbdd21SLinus Walleij /**
1100fc9e0f71SMark Brown  * __spi_pump_messages - function which processes spi message queue
1101fc9e0f71SMark Brown  * @master: master to process queue for
1102fc9e0f71SMark Brown  * @in_kthread: true if we are in the context of the message pump thread
1103ffbbdd21SLinus Walleij  *
1104ffbbdd21SLinus Walleij  * This function checks if there is any spi message in the queue that
1105ffbbdd21SLinus Walleij  * needs processing and if so call out to the driver to initialize hardware
1106ffbbdd21SLinus Walleij  * and transfer each message.
1107ffbbdd21SLinus Walleij  *
11080461a414SMark Brown  * Note that it is called both from the kthread itself and also from
11090461a414SMark Brown  * inside spi_sync(); the queue extraction handling at the top of the
11100461a414SMark Brown  * function should deal with this safely.
1111ffbbdd21SLinus Walleij  */
1112ef4d96ecSMark Brown static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1113ffbbdd21SLinus Walleij {
1114ffbbdd21SLinus Walleij 	unsigned long flags;
1115ffbbdd21SLinus Walleij 	bool was_busy = false;
1116ffbbdd21SLinus Walleij 	int ret;
1117ffbbdd21SLinus Walleij 
1118983aee5dSMark Brown 	/* Lock queue */
1119ffbbdd21SLinus Walleij 	spin_lock_irqsave(&master->queue_lock, flags);
1120983aee5dSMark Brown 
1121983aee5dSMark Brown 	/* Make sure we are not already running a message */
1122983aee5dSMark Brown 	if (master->cur_msg) {
1123983aee5dSMark Brown 		spin_unlock_irqrestore(&master->queue_lock, flags);
1124983aee5dSMark Brown 		return;
1125983aee5dSMark Brown 	}
1126983aee5dSMark Brown 
11270461a414SMark Brown 	/* If another context is idling the device then defer */
11280461a414SMark Brown 	if (master->idling) {
11293989144fSPetr Mladek 		kthread_queue_work(&master->kworker, &master->pump_messages);
11300461a414SMark Brown 		spin_unlock_irqrestore(&master->queue_lock, flags);
11310461a414SMark Brown 		return;
11320461a414SMark Brown 	}
11330461a414SMark Brown 
1134983aee5dSMark Brown 	/* Check if the queue is idle */
1135ffbbdd21SLinus Walleij 	if (list_empty(&master->queue) || !master->running) {
1136b0b36b86SBryan Freed 		if (!master->busy) {
11379af4acc0SDan Carpenter 			spin_unlock_irqrestore(&master->queue_lock, flags);
1138ffbbdd21SLinus Walleij 			return;
1139ffbbdd21SLinus Walleij 		}
1140fc9e0f71SMark Brown 
1141fc9e0f71SMark Brown 		/* Only do teardown in the thread */
1142fc9e0f71SMark Brown 		if (!in_kthread) {
11433989144fSPetr Mladek 			kthread_queue_work(&master->kworker,
1144fc9e0f71SMark Brown 					   &master->pump_messages);
1145ffbbdd21SLinus Walleij 			spin_unlock_irqrestore(&master->queue_lock, flags);
1146fc9e0f71SMark Brown 			return;
1147fc9e0f71SMark Brown 		}
1148fc9e0f71SMark Brown 
1149ffbbdd21SLinus Walleij 		master->busy = false;
11500461a414SMark Brown 		master->idling = true;
1151ffbbdd21SLinus Walleij 		spin_unlock_irqrestore(&master->queue_lock, flags);
11520461a414SMark Brown 
11533a2eba9bSMark Brown 		kfree(master->dummy_rx);
11543a2eba9bSMark Brown 		master->dummy_rx = NULL;
11553a2eba9bSMark Brown 		kfree(master->dummy_tx);
11563a2eba9bSMark Brown 		master->dummy_tx = NULL;
1157b0b36b86SBryan Freed 		if (master->unprepare_transfer_hardware &&
1158b0b36b86SBryan Freed 		    master->unprepare_transfer_hardware(master))
1159b0b36b86SBryan Freed 			dev_err(&master->dev,
1160b0b36b86SBryan Freed 				"failed to unprepare transfer hardware\n");
116149834de2SMark Brown 		if (master->auto_runtime_pm) {
116249834de2SMark Brown 			pm_runtime_mark_last_busy(master->dev.parent);
116349834de2SMark Brown 			pm_runtime_put_autosuspend(master->dev.parent);
116449834de2SMark Brown 		}
116556ec1978SMark Brown 		trace_spi_master_idle(master);
1166ffbbdd21SLinus Walleij 
11670461a414SMark Brown 		spin_lock_irqsave(&master->queue_lock, flags);
11680461a414SMark Brown 		master->idling = false;
1169ffbbdd21SLinus Walleij 		spin_unlock_irqrestore(&master->queue_lock, flags);
1170ffbbdd21SLinus Walleij 		return;
1171ffbbdd21SLinus Walleij 	}
1172ffbbdd21SLinus Walleij 
1173ffbbdd21SLinus Walleij 	/* Extract head of queue */
1174ffbbdd21SLinus Walleij 	master->cur_msg =
1175a89e2d27SAxel Lin 		list_first_entry(&master->queue, struct spi_message, queue);
1176ffbbdd21SLinus Walleij 
1177ffbbdd21SLinus Walleij 	list_del_init(&master->cur_msg->queue);
1178ffbbdd21SLinus Walleij 	if (master->busy)
1179ffbbdd21SLinus Walleij 		was_busy = true;
1180ffbbdd21SLinus Walleij 	else
1181ffbbdd21SLinus Walleij 		master->busy = true;
1182ffbbdd21SLinus Walleij 	spin_unlock_irqrestore(&master->queue_lock, flags);
1183ffbbdd21SLinus Walleij 
1184ef4d96ecSMark Brown 	mutex_lock(&master->io_mutex);
1185ef4d96ecSMark Brown 
118649834de2SMark Brown 	if (!was_busy && master->auto_runtime_pm) {
118749834de2SMark Brown 		ret = pm_runtime_get_sync(master->dev.parent);
118849834de2SMark Brown 		if (ret < 0) {
118949834de2SMark Brown 			dev_err(&master->dev, "Failed to power device: %d\n",
119049834de2SMark Brown 				ret);
1191764f2166SMark Brown 			mutex_unlock(&master->io_mutex);
119249834de2SMark Brown 			return;
119349834de2SMark Brown 		}
119449834de2SMark Brown 	}
119549834de2SMark Brown 
119656ec1978SMark Brown 	if (!was_busy)
119756ec1978SMark Brown 		trace_spi_master_busy(master);
119856ec1978SMark Brown 
11997dfd2bd7SShubhrajyoti D 	if (!was_busy && master->prepare_transfer_hardware) {
1200ffbbdd21SLinus Walleij 		ret = master->prepare_transfer_hardware(master);
1201ffbbdd21SLinus Walleij 		if (ret) {
1202ffbbdd21SLinus Walleij 			dev_err(&master->dev,
1203ffbbdd21SLinus Walleij 				"failed to prepare transfer hardware\n");
120449834de2SMark Brown 
120549834de2SMark Brown 			if (master->auto_runtime_pm)
120649834de2SMark Brown 				pm_runtime_put(master->dev.parent);
1207764f2166SMark Brown 			mutex_unlock(&master->io_mutex);
1208ffbbdd21SLinus Walleij 			return;
1209ffbbdd21SLinus Walleij 		}
1210ffbbdd21SLinus Walleij 	}
1211ffbbdd21SLinus Walleij 
121256ec1978SMark Brown 	trace_spi_message_start(master->cur_msg);
121356ec1978SMark Brown 
12142841a5fcSMark Brown 	if (master->prepare_message) {
12152841a5fcSMark Brown 		ret = master->prepare_message(master, master->cur_msg);
12162841a5fcSMark Brown 		if (ret) {
12172841a5fcSMark Brown 			dev_err(&master->dev,
12182841a5fcSMark Brown 				"failed to prepare message: %d\n", ret);
12192841a5fcSMark Brown 			master->cur_msg->status = ret;
12202841a5fcSMark Brown 			spi_finalize_current_message(master);
122149023d2eSJon Hunter 			goto out;
12222841a5fcSMark Brown 		}
12232841a5fcSMark Brown 		master->cur_msg_prepared = true;
12242841a5fcSMark Brown 	}
12252841a5fcSMark Brown 
122699adef31SMark Brown 	ret = spi_map_msg(master, master->cur_msg);
122799adef31SMark Brown 	if (ret) {
122899adef31SMark Brown 		master->cur_msg->status = ret;
122999adef31SMark Brown 		spi_finalize_current_message(master);
123049023d2eSJon Hunter 		goto out;
123199adef31SMark Brown 	}
123299adef31SMark Brown 
1233ffbbdd21SLinus Walleij 	ret = master->transfer_one_message(master, master->cur_msg);
1234ffbbdd21SLinus Walleij 	if (ret) {
1235ffbbdd21SLinus Walleij 		dev_err(&master->dev,
12361f802f82SGeert Uytterhoeven 			"failed to transfer one message from queue\n");
123749023d2eSJon Hunter 		goto out;
1238ffbbdd21SLinus Walleij 	}
123949023d2eSJon Hunter 
124049023d2eSJon Hunter out:
1241ef4d96ecSMark Brown 	mutex_unlock(&master->io_mutex);
124262826970SMark Brown 
124362826970SMark Brown 	/* Prod the scheduler in case transfer_one() was busy waiting */
124449023d2eSJon Hunter 	if (!ret)
124562826970SMark Brown 		cond_resched();
1246ffbbdd21SLinus Walleij }
1247ffbbdd21SLinus Walleij 
1248fc9e0f71SMark Brown /**
1249fc9e0f71SMark Brown  * spi_pump_messages - kthread work function which processes spi message queue
1250fc9e0f71SMark Brown  * @work: pointer to kthread work struct contained in the master struct
1251fc9e0f71SMark Brown  */
1252fc9e0f71SMark Brown static void spi_pump_messages(struct kthread_work *work)
1253fc9e0f71SMark Brown {
1254fc9e0f71SMark Brown 	struct spi_master *master =
1255fc9e0f71SMark Brown 		container_of(work, struct spi_master, pump_messages);
1256fc9e0f71SMark Brown 
1257ef4d96ecSMark Brown 	__spi_pump_messages(master, true);
1258fc9e0f71SMark Brown }
1259fc9e0f71SMark Brown 
1260ffbbdd21SLinus Walleij static int spi_init_queue(struct spi_master *master)
1261ffbbdd21SLinus Walleij {
1262ffbbdd21SLinus Walleij 	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1263ffbbdd21SLinus Walleij 
1264ffbbdd21SLinus Walleij 	master->running = false;
1265ffbbdd21SLinus Walleij 	master->busy = false;
1266ffbbdd21SLinus Walleij 
12673989144fSPetr Mladek 	kthread_init_worker(&master->kworker);
1268ffbbdd21SLinus Walleij 	master->kworker_task = kthread_run(kthread_worker_fn,
1269f170168bSKees Cook 					   &master->kworker, "%s",
1270ffbbdd21SLinus Walleij 					   dev_name(&master->dev));
1271ffbbdd21SLinus Walleij 	if (IS_ERR(master->kworker_task)) {
1272ffbbdd21SLinus Walleij 		dev_err(&master->dev, "failed to create message pump task\n");
127398a8f5a0SJarkko Nikula 		return PTR_ERR(master->kworker_task);
1274ffbbdd21SLinus Walleij 	}
12753989144fSPetr Mladek 	kthread_init_work(&master->pump_messages, spi_pump_messages);
1276ffbbdd21SLinus Walleij 
1277ffbbdd21SLinus Walleij 	/*
1278ffbbdd21SLinus Walleij 	 * Master config will indicate if this controller should run the
1279ffbbdd21SLinus Walleij 	 * message pump with high (realtime) priority to reduce the transfer
1280ffbbdd21SLinus Walleij 	 * latency on the bus by minimising the delay between a transfer
1281ffbbdd21SLinus Walleij 	 * request and the scheduling of the message pump thread. Without this
1282ffbbdd21SLinus Walleij 	 * setting the message pump thread will remain at default priority.
1283ffbbdd21SLinus Walleij 	 */
1284ffbbdd21SLinus Walleij 	if (master->rt) {
1285ffbbdd21SLinus Walleij 		dev_info(&master->dev,
1286ffbbdd21SLinus Walleij 			"will run message pump with realtime priority\n");
1287ffbbdd21SLinus Walleij 		sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
1288ffbbdd21SLinus Walleij 	}
1289ffbbdd21SLinus Walleij 
1290ffbbdd21SLinus Walleij 	return 0;
1291ffbbdd21SLinus Walleij }
1292ffbbdd21SLinus Walleij 
1293ffbbdd21SLinus Walleij /**
1294ffbbdd21SLinus Walleij  * spi_get_next_queued_message() - called by driver to check for queued
1295ffbbdd21SLinus Walleij  * messages
1296ffbbdd21SLinus Walleij  * @master: the master to check for queued messages
1297ffbbdd21SLinus Walleij  *
1298ffbbdd21SLinus Walleij  * If there are more messages in the queue, the next message is returned from
1299ffbbdd21SLinus Walleij  * this call.
130097d56dc6SJavier Martinez Canillas  *
130197d56dc6SJavier Martinez Canillas  * Return: the next message in the queue, else NULL if the queue is empty.
1302ffbbdd21SLinus Walleij  */
1303ffbbdd21SLinus Walleij struct spi_message *spi_get_next_queued_message(struct spi_master *master)
1304ffbbdd21SLinus Walleij {
1305ffbbdd21SLinus Walleij 	struct spi_message *next;
1306ffbbdd21SLinus Walleij 	unsigned long flags;
1307ffbbdd21SLinus Walleij 
1308ffbbdd21SLinus Walleij 	/* get a pointer to the next message, if any */
1309ffbbdd21SLinus Walleij 	spin_lock_irqsave(&master->queue_lock, flags);
13101cfd97f9SAxel Lin 	next = list_first_entry_or_null(&master->queue, struct spi_message,
13111cfd97f9SAxel Lin 					queue);
1312ffbbdd21SLinus Walleij 	spin_unlock_irqrestore(&master->queue_lock, flags);
1313ffbbdd21SLinus Walleij 
1314ffbbdd21SLinus Walleij 	return next;
1315ffbbdd21SLinus Walleij }
1316ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1317ffbbdd21SLinus Walleij 
1318ffbbdd21SLinus Walleij /**
1319ffbbdd21SLinus Walleij  * spi_finalize_current_message() - the current message is complete
1320ffbbdd21SLinus Walleij  * @master: the master to return the message to
1321ffbbdd21SLinus Walleij  *
1322ffbbdd21SLinus Walleij  * Called by the driver to notify the core that the message in the front of the
1323ffbbdd21SLinus Walleij  * queue is complete and can be removed from the queue.
1324ffbbdd21SLinus Walleij  */
1325ffbbdd21SLinus Walleij void spi_finalize_current_message(struct spi_master *master)
1326ffbbdd21SLinus Walleij {
1327ffbbdd21SLinus Walleij 	struct spi_message *mesg;
1328ffbbdd21SLinus Walleij 	unsigned long flags;
13292841a5fcSMark Brown 	int ret;
1330ffbbdd21SLinus Walleij 
1331ffbbdd21SLinus Walleij 	spin_lock_irqsave(&master->queue_lock, flags);
1332ffbbdd21SLinus Walleij 	mesg = master->cur_msg;
1333ffbbdd21SLinus Walleij 	spin_unlock_irqrestore(&master->queue_lock, flags);
1334ffbbdd21SLinus Walleij 
133599adef31SMark Brown 	spi_unmap_msg(master, mesg);
133699adef31SMark Brown 
13372841a5fcSMark Brown 	if (master->cur_msg_prepared && master->unprepare_message) {
13382841a5fcSMark Brown 		ret = master->unprepare_message(master, mesg);
13392841a5fcSMark Brown 		if (ret) {
13402841a5fcSMark Brown 			dev_err(&master->dev,
13412841a5fcSMark Brown 				"failed to unprepare message: %d\n", ret);
13422841a5fcSMark Brown 		}
13432841a5fcSMark Brown 	}
1344391949b6SUwe Kleine-König 
13458e76ef88SMartin Sperl 	spin_lock_irqsave(&master->queue_lock, flags);
13468e76ef88SMartin Sperl 	master->cur_msg = NULL;
13472841a5fcSMark Brown 	master->cur_msg_prepared = false;
13483989144fSPetr Mladek 	kthread_queue_work(&master->kworker, &master->pump_messages);
13498e76ef88SMartin Sperl 	spin_unlock_irqrestore(&master->queue_lock, flags);
13508e76ef88SMartin Sperl 
13518e76ef88SMartin Sperl 	trace_spi_message_done(mesg);
13522841a5fcSMark Brown 
1353ffbbdd21SLinus Walleij 	mesg->state = NULL;
1354ffbbdd21SLinus Walleij 	if (mesg->complete)
1355ffbbdd21SLinus Walleij 		mesg->complete(mesg->context);
1356ffbbdd21SLinus Walleij }
1357ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1358ffbbdd21SLinus Walleij 
1359ffbbdd21SLinus Walleij static int spi_start_queue(struct spi_master *master)
1360ffbbdd21SLinus Walleij {
1361ffbbdd21SLinus Walleij 	unsigned long flags;
1362ffbbdd21SLinus Walleij 
1363ffbbdd21SLinus Walleij 	spin_lock_irqsave(&master->queue_lock, flags);
1364ffbbdd21SLinus Walleij 
1365ffbbdd21SLinus Walleij 	if (master->running || master->busy) {
1366ffbbdd21SLinus Walleij 		spin_unlock_irqrestore(&master->queue_lock, flags);
1367ffbbdd21SLinus Walleij 		return -EBUSY;
1368ffbbdd21SLinus Walleij 	}
1369ffbbdd21SLinus Walleij 
1370ffbbdd21SLinus Walleij 	master->running = true;
1371ffbbdd21SLinus Walleij 	master->cur_msg = NULL;
1372ffbbdd21SLinus Walleij 	spin_unlock_irqrestore(&master->queue_lock, flags);
1373ffbbdd21SLinus Walleij 
13743989144fSPetr Mladek 	kthread_queue_work(&master->kworker, &master->pump_messages);
1375ffbbdd21SLinus Walleij 
1376ffbbdd21SLinus Walleij 	return 0;
1377ffbbdd21SLinus Walleij }
1378ffbbdd21SLinus Walleij 
1379ffbbdd21SLinus Walleij static int spi_stop_queue(struct spi_master *master)
1380ffbbdd21SLinus Walleij {
1381ffbbdd21SLinus Walleij 	unsigned long flags;
1382ffbbdd21SLinus Walleij 	unsigned limit = 500;
1383ffbbdd21SLinus Walleij 	int ret = 0;
1384ffbbdd21SLinus Walleij 
1385ffbbdd21SLinus Walleij 	spin_lock_irqsave(&master->queue_lock, flags);
1386ffbbdd21SLinus Walleij 
1387ffbbdd21SLinus Walleij 	/*
1388ffbbdd21SLinus Walleij 	 * This is a bit lame, but is optimized for the common execution path.
1389ffbbdd21SLinus Walleij 	 * A wait_queue on the master->busy could be used, but then the common
1390ffbbdd21SLinus Walleij 	 * execution path (pump_messages) would be required to call wake_up or
1391ffbbdd21SLinus Walleij 	 * friends on every SPI message. Do this instead.
1392ffbbdd21SLinus Walleij 	 */
1393ffbbdd21SLinus Walleij 	while ((!list_empty(&master->queue) || master->busy) && limit--) {
1394ffbbdd21SLinus Walleij 		spin_unlock_irqrestore(&master->queue_lock, flags);
1395f97b26b0SAxel Lin 		usleep_range(10000, 11000);
1396ffbbdd21SLinus Walleij 		spin_lock_irqsave(&master->queue_lock, flags);
1397ffbbdd21SLinus Walleij 	}
1398ffbbdd21SLinus Walleij 
1399ffbbdd21SLinus Walleij 	if (!list_empty(&master->queue) || master->busy)
1400ffbbdd21SLinus Walleij 		ret = -EBUSY;
1401ffbbdd21SLinus Walleij 	else
1402ffbbdd21SLinus Walleij 		master->running = false;
1403ffbbdd21SLinus Walleij 
1404ffbbdd21SLinus Walleij 	spin_unlock_irqrestore(&master->queue_lock, flags);
1405ffbbdd21SLinus Walleij 
1406ffbbdd21SLinus Walleij 	if (ret) {
1407ffbbdd21SLinus Walleij 		dev_warn(&master->dev,
1408ffbbdd21SLinus Walleij 			 "could not stop message queue\n");
1409ffbbdd21SLinus Walleij 		return ret;
1410ffbbdd21SLinus Walleij 	}
1411ffbbdd21SLinus Walleij 	return ret;
1412ffbbdd21SLinus Walleij }
1413ffbbdd21SLinus Walleij 
1414ffbbdd21SLinus Walleij static int spi_destroy_queue(struct spi_master *master)
1415ffbbdd21SLinus Walleij {
1416ffbbdd21SLinus Walleij 	int ret;
1417ffbbdd21SLinus Walleij 
1418ffbbdd21SLinus Walleij 	ret = spi_stop_queue(master);
1419ffbbdd21SLinus Walleij 
1420ffbbdd21SLinus Walleij 	/*
14213989144fSPetr Mladek 	 * kthread_flush_worker will block until all work is done.
1422ffbbdd21SLinus Walleij 	 * If the reason that stop_queue timed out is that the work will never
1423ffbbdd21SLinus Walleij 	 * finish, then it does no good to call flush/stop thread, so
1424ffbbdd21SLinus Walleij 	 * return anyway.
1425ffbbdd21SLinus Walleij 	 */
1426ffbbdd21SLinus Walleij 	if (ret) {
1427ffbbdd21SLinus Walleij 		dev_err(&master->dev, "problem destroying queue\n");
1428ffbbdd21SLinus Walleij 		return ret;
1429ffbbdd21SLinus Walleij 	}
1430ffbbdd21SLinus Walleij 
14313989144fSPetr Mladek 	kthread_flush_worker(&master->kworker);
1432ffbbdd21SLinus Walleij 	kthread_stop(master->kworker_task);
1433ffbbdd21SLinus Walleij 
1434ffbbdd21SLinus Walleij 	return 0;
1435ffbbdd21SLinus Walleij }
1436ffbbdd21SLinus Walleij 
14370461a414SMark Brown static int __spi_queued_transfer(struct spi_device *spi,
14380461a414SMark Brown 				 struct spi_message *msg,
14390461a414SMark Brown 				 bool need_pump)
1440ffbbdd21SLinus Walleij {
1441ffbbdd21SLinus Walleij 	struct spi_master *master = spi->master;
1442ffbbdd21SLinus Walleij 	unsigned long flags;
1443ffbbdd21SLinus Walleij 
1444ffbbdd21SLinus Walleij 	spin_lock_irqsave(&master->queue_lock, flags);
1445ffbbdd21SLinus Walleij 
1446ffbbdd21SLinus Walleij 	if (!master->running) {
1447ffbbdd21SLinus Walleij 		spin_unlock_irqrestore(&master->queue_lock, flags);
1448ffbbdd21SLinus Walleij 		return -ESHUTDOWN;
1449ffbbdd21SLinus Walleij 	}
1450ffbbdd21SLinus Walleij 	msg->actual_length = 0;
1451ffbbdd21SLinus Walleij 	msg->status = -EINPROGRESS;
1452ffbbdd21SLinus Walleij 
1453ffbbdd21SLinus Walleij 	list_add_tail(&msg->queue, &master->queue);
14540461a414SMark Brown 	if (!master->busy && need_pump)
14553989144fSPetr Mladek 		kthread_queue_work(&master->kworker, &master->pump_messages);
1456ffbbdd21SLinus Walleij 
1457ffbbdd21SLinus Walleij 	spin_unlock_irqrestore(&master->queue_lock, flags);
1458ffbbdd21SLinus Walleij 	return 0;
1459ffbbdd21SLinus Walleij }
1460ffbbdd21SLinus Walleij 
14610461a414SMark Brown /**
14620461a414SMark Brown  * spi_queued_transfer - transfer function for queued transfers
14630461a414SMark Brown  * @spi: spi device which is requesting transfer
14640461a414SMark Brown  * @msg: spi message which is to handled is queued to driver queue
146597d56dc6SJavier Martinez Canillas  *
146697d56dc6SJavier Martinez Canillas  * Return: zero on success, else a negative error code.
14670461a414SMark Brown  */
14680461a414SMark Brown static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
14690461a414SMark Brown {
14700461a414SMark Brown 	return __spi_queued_transfer(spi, msg, true);
14710461a414SMark Brown }
14720461a414SMark Brown 
1473ffbbdd21SLinus Walleij static int spi_master_initialize_queue(struct spi_master *master)
1474ffbbdd21SLinus Walleij {
1475ffbbdd21SLinus Walleij 	int ret;
1476ffbbdd21SLinus Walleij 
1477ffbbdd21SLinus Walleij 	master->transfer = spi_queued_transfer;
1478b158935fSMark Brown 	if (!master->transfer_one_message)
1479b158935fSMark Brown 		master->transfer_one_message = spi_transfer_one_message;
1480ffbbdd21SLinus Walleij 
1481ffbbdd21SLinus Walleij 	/* Initialize and start queue */
1482ffbbdd21SLinus Walleij 	ret = spi_init_queue(master);
1483ffbbdd21SLinus Walleij 	if (ret) {
1484ffbbdd21SLinus Walleij 		dev_err(&master->dev, "problem initializing queue\n");
1485ffbbdd21SLinus Walleij 		goto err_init_queue;
1486ffbbdd21SLinus Walleij 	}
1487c3676d5cSMark Brown 	master->queued = true;
1488ffbbdd21SLinus Walleij 	ret = spi_start_queue(master);
1489ffbbdd21SLinus Walleij 	if (ret) {
1490ffbbdd21SLinus Walleij 		dev_err(&master->dev, "problem starting queue\n");
1491ffbbdd21SLinus Walleij 		goto err_start_queue;
1492ffbbdd21SLinus Walleij 	}
1493ffbbdd21SLinus Walleij 
1494ffbbdd21SLinus Walleij 	return 0;
1495ffbbdd21SLinus Walleij 
1496ffbbdd21SLinus Walleij err_start_queue:
1497ffbbdd21SLinus Walleij 	spi_destroy_queue(master);
1498c3676d5cSMark Brown err_init_queue:
1499ffbbdd21SLinus Walleij 	return ret;
1500ffbbdd21SLinus Walleij }
1501ffbbdd21SLinus Walleij 
1502ffbbdd21SLinus Walleij /*-------------------------------------------------------------------------*/
1503ffbbdd21SLinus Walleij 
15047cb94361SAndreas Larsson #if defined(CONFIG_OF)
1505aff5e3f8SPantelis Antoniou static struct spi_device *
1506aff5e3f8SPantelis Antoniou of_register_spi_device(struct spi_master *master, struct device_node *nc)
1507d57a4282SGrant Likely {
1508d57a4282SGrant Likely 	struct spi_device *spi;
1509d57a4282SGrant Likely 	int rc;
151089da4293STrent Piepho 	u32 value;
1511d57a4282SGrant Likely 
1512d57a4282SGrant Likely 	/* Alloc an spi_device */
1513d57a4282SGrant Likely 	spi = spi_alloc_device(master);
1514d57a4282SGrant Likely 	if (!spi) {
1515d57a4282SGrant Likely 		dev_err(&master->dev, "spi_device alloc error for %s\n",
1516d57a4282SGrant Likely 			nc->full_name);
1517aff5e3f8SPantelis Antoniou 		rc = -ENOMEM;
1518aff5e3f8SPantelis Antoniou 		goto err_out;
1519d57a4282SGrant Likely 	}
1520d57a4282SGrant Likely 
1521d57a4282SGrant Likely 	/* Select device driver */
1522aff5e3f8SPantelis Antoniou 	rc = of_modalias_node(nc, spi->modalias,
1523aff5e3f8SPantelis Antoniou 				sizeof(spi->modalias));
1524aff5e3f8SPantelis Antoniou 	if (rc < 0) {
1525d57a4282SGrant Likely 		dev_err(&master->dev, "cannot find modalias for %s\n",
1526d57a4282SGrant Likely 			nc->full_name);
1527aff5e3f8SPantelis Antoniou 		goto err_out;
1528d57a4282SGrant Likely 	}
1529d57a4282SGrant Likely 
1530d57a4282SGrant Likely 	/* Device address */
153189da4293STrent Piepho 	rc = of_property_read_u32(nc, "reg", &value);
153289da4293STrent Piepho 	if (rc) {
153389da4293STrent Piepho 		dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
153489da4293STrent Piepho 			nc->full_name, rc);
1535aff5e3f8SPantelis Antoniou 		goto err_out;
1536d57a4282SGrant Likely 	}
153789da4293STrent Piepho 	spi->chip_select = value;
1538d57a4282SGrant Likely 
1539d57a4282SGrant Likely 	/* Mode (clock phase/polarity/etc.) */
1540d57a4282SGrant Likely 	if (of_find_property(nc, "spi-cpha", NULL))
1541d57a4282SGrant Likely 		spi->mode |= SPI_CPHA;
1542d57a4282SGrant Likely 	if (of_find_property(nc, "spi-cpol", NULL))
1543d57a4282SGrant Likely 		spi->mode |= SPI_CPOL;
1544d57a4282SGrant Likely 	if (of_find_property(nc, "spi-cs-high", NULL))
1545d57a4282SGrant Likely 		spi->mode |= SPI_CS_HIGH;
1546c20151dfSLars-Peter Clausen 	if (of_find_property(nc, "spi-3wire", NULL))
1547c20151dfSLars-Peter Clausen 		spi->mode |= SPI_3WIRE;
1548cd6339e6SZhao Qiang 	if (of_find_property(nc, "spi-lsb-first", NULL))
1549cd6339e6SZhao Qiang 		spi->mode |= SPI_LSB_FIRST;
1550d57a4282SGrant Likely 
1551f477b7fbSwangyuhang 	/* Device DUAL/QUAD mode */
155289da4293STrent Piepho 	if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
155389da4293STrent Piepho 		switch (value) {
155489da4293STrent Piepho 		case 1:
1555f477b7fbSwangyuhang 			break;
155689da4293STrent Piepho 		case 2:
1557f477b7fbSwangyuhang 			spi->mode |= SPI_TX_DUAL;
1558f477b7fbSwangyuhang 			break;
155989da4293STrent Piepho 		case 4:
1560f477b7fbSwangyuhang 			spi->mode |= SPI_TX_QUAD;
1561f477b7fbSwangyuhang 			break;
1562f477b7fbSwangyuhang 		default:
156380874d8cSGeert Uytterhoeven 			dev_warn(&master->dev,
1564a110f93dSwangyuhang 				"spi-tx-bus-width %d not supported\n",
156589da4293STrent Piepho 				value);
156680874d8cSGeert Uytterhoeven 			break;
1567f477b7fbSwangyuhang 		}
1568a822e99cSMark Brown 	}
1569f477b7fbSwangyuhang 
157089da4293STrent Piepho 	if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
157189da4293STrent Piepho 		switch (value) {
157289da4293STrent Piepho 		case 1:
1573f477b7fbSwangyuhang 			break;
157489da4293STrent Piepho 		case 2:
1575f477b7fbSwangyuhang 			spi->mode |= SPI_RX_DUAL;
1576f477b7fbSwangyuhang 			break;
157789da4293STrent Piepho 		case 4:
1578f477b7fbSwangyuhang 			spi->mode |= SPI_RX_QUAD;
1579f477b7fbSwangyuhang 			break;
1580f477b7fbSwangyuhang 		default:
158180874d8cSGeert Uytterhoeven 			dev_warn(&master->dev,
1582a110f93dSwangyuhang 				"spi-rx-bus-width %d not supported\n",
158389da4293STrent Piepho 				value);
158480874d8cSGeert Uytterhoeven 			break;
1585f477b7fbSwangyuhang 		}
1586a822e99cSMark Brown 	}
1587f477b7fbSwangyuhang 
1588d57a4282SGrant Likely 	/* Device speed */
158989da4293STrent Piepho 	rc = of_property_read_u32(nc, "spi-max-frequency", &value);
159089da4293STrent Piepho 	if (rc) {
159189da4293STrent Piepho 		dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
159289da4293STrent Piepho 			nc->full_name, rc);
1593aff5e3f8SPantelis Antoniou 		goto err_out;
1594d57a4282SGrant Likely 	}
159589da4293STrent Piepho 	spi->max_speed_hz = value;
1596d57a4282SGrant Likely 
1597d57a4282SGrant Likely 	/* Store a pointer to the node in the device structure */
1598d57a4282SGrant Likely 	of_node_get(nc);
1599d57a4282SGrant Likely 	spi->dev.of_node = nc;
1600d57a4282SGrant Likely 
1601d57a4282SGrant Likely 	/* Register the new device */
1602d57a4282SGrant Likely 	rc = spi_add_device(spi);
1603d57a4282SGrant Likely 	if (rc) {
1604d57a4282SGrant Likely 		dev_err(&master->dev, "spi_device register error %s\n",
1605d57a4282SGrant Likely 			nc->full_name);
1606aff5e3f8SPantelis Antoniou 		goto err_out;
1607d57a4282SGrant Likely 	}
1608d57a4282SGrant Likely 
1609aff5e3f8SPantelis Antoniou 	return spi;
1610aff5e3f8SPantelis Antoniou 
1611aff5e3f8SPantelis Antoniou err_out:
1612aff5e3f8SPantelis Antoniou 	spi_dev_put(spi);
1613aff5e3f8SPantelis Antoniou 	return ERR_PTR(rc);
1614aff5e3f8SPantelis Antoniou }
1615aff5e3f8SPantelis Antoniou 
1616aff5e3f8SPantelis Antoniou /**
1617aff5e3f8SPantelis Antoniou  * of_register_spi_devices() - Register child devices onto the SPI bus
1618aff5e3f8SPantelis Antoniou  * @master:	Pointer to spi_master device
1619aff5e3f8SPantelis Antoniou  *
1620aff5e3f8SPantelis Antoniou  * Registers an spi_device for each child node of master node which has a 'reg'
1621aff5e3f8SPantelis Antoniou  * property.
1622aff5e3f8SPantelis Antoniou  */
1623aff5e3f8SPantelis Antoniou static void of_register_spi_devices(struct spi_master *master)
1624aff5e3f8SPantelis Antoniou {
1625aff5e3f8SPantelis Antoniou 	struct spi_device *spi;
1626aff5e3f8SPantelis Antoniou 	struct device_node *nc;
1627aff5e3f8SPantelis Antoniou 
1628aff5e3f8SPantelis Antoniou 	if (!master->dev.of_node)
1629aff5e3f8SPantelis Antoniou 		return;
1630aff5e3f8SPantelis Antoniou 
1631aff5e3f8SPantelis Antoniou 	for_each_available_child_of_node(master->dev.of_node, nc) {
1632bd6c1644SGeert Uytterhoeven 		if (of_node_test_and_set_flag(nc, OF_POPULATED))
1633bd6c1644SGeert Uytterhoeven 			continue;
1634aff5e3f8SPantelis Antoniou 		spi = of_register_spi_device(master, nc);
1635e0af98a7SRalf Ramsauer 		if (IS_ERR(spi)) {
1636aff5e3f8SPantelis Antoniou 			dev_warn(&master->dev, "Failed to create SPI device for %s\n",
1637aff5e3f8SPantelis Antoniou 				nc->full_name);
1638e0af98a7SRalf Ramsauer 			of_node_clear_flag(nc, OF_POPULATED);
1639e0af98a7SRalf Ramsauer 		}
1640d57a4282SGrant Likely 	}
1641d57a4282SGrant Likely }
1642d57a4282SGrant Likely #else
1643d57a4282SGrant Likely static void of_register_spi_devices(struct spi_master *master) { }
1644d57a4282SGrant Likely #endif
1645d57a4282SGrant Likely 
164664bee4d2SMika Westerberg #ifdef CONFIG_ACPI
164764bee4d2SMika Westerberg static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
164864bee4d2SMika Westerberg {
164964bee4d2SMika Westerberg 	struct spi_device *spi = data;
1650a0a90718SMika Westerberg 	struct spi_master *master = spi->master;
165164bee4d2SMika Westerberg 
165264bee4d2SMika Westerberg 	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
165364bee4d2SMika Westerberg 		struct acpi_resource_spi_serialbus *sb;
165464bee4d2SMika Westerberg 
165564bee4d2SMika Westerberg 		sb = &ares->data.spi_serial_bus;
165664bee4d2SMika Westerberg 		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1657a0a90718SMika Westerberg 			/*
1658a0a90718SMika Westerberg 			 * ACPI DeviceSelection numbering is handled by the
1659a0a90718SMika Westerberg 			 * host controller driver in Windows and can vary
1660a0a90718SMika Westerberg 			 * from driver to driver. In Linux we always expect
1661a0a90718SMika Westerberg 			 * 0 .. max - 1 so we need to ask the driver to
1662a0a90718SMika Westerberg 			 * translate between the two schemes.
1663a0a90718SMika Westerberg 			 */
1664a0a90718SMika Westerberg 			if (master->fw_translate_cs) {
1665a0a90718SMika Westerberg 				int cs = master->fw_translate_cs(master,
1666a0a90718SMika Westerberg 						sb->device_selection);
1667a0a90718SMika Westerberg 				if (cs < 0)
1668a0a90718SMika Westerberg 					return cs;
1669a0a90718SMika Westerberg 				spi->chip_select = cs;
1670a0a90718SMika Westerberg 			} else {
167164bee4d2SMika Westerberg 				spi->chip_select = sb->device_selection;
1672a0a90718SMika Westerberg 			}
1673a0a90718SMika Westerberg 
167464bee4d2SMika Westerberg 			spi->max_speed_hz = sb->connection_speed;
167564bee4d2SMika Westerberg 
167664bee4d2SMika Westerberg 			if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
167764bee4d2SMika Westerberg 				spi->mode |= SPI_CPHA;
167864bee4d2SMika Westerberg 			if (sb->clock_polarity == ACPI_SPI_START_HIGH)
167964bee4d2SMika Westerberg 				spi->mode |= SPI_CPOL;
168064bee4d2SMika Westerberg 			if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
168164bee4d2SMika Westerberg 				spi->mode |= SPI_CS_HIGH;
168264bee4d2SMika Westerberg 		}
168364bee4d2SMika Westerberg 	} else if (spi->irq < 0) {
168464bee4d2SMika Westerberg 		struct resource r;
168564bee4d2SMika Westerberg 
168664bee4d2SMika Westerberg 		if (acpi_dev_resource_interrupt(ares, 0, &r))
168764bee4d2SMika Westerberg 			spi->irq = r.start;
168864bee4d2SMika Westerberg 	}
168964bee4d2SMika Westerberg 
169064bee4d2SMika Westerberg 	/* Always tell the ACPI core to skip this resource */
169164bee4d2SMika Westerberg 	return 1;
169264bee4d2SMika Westerberg }
169364bee4d2SMika Westerberg 
16947f24467fSOctavian Purdila static acpi_status acpi_register_spi_device(struct spi_master *master,
16957f24467fSOctavian Purdila 					    struct acpi_device *adev)
169664bee4d2SMika Westerberg {
169764bee4d2SMika Westerberg 	struct list_head resource_list;
169864bee4d2SMika Westerberg 	struct spi_device *spi;
169964bee4d2SMika Westerberg 	int ret;
170064bee4d2SMika Westerberg 
17017f24467fSOctavian Purdila 	if (acpi_bus_get_status(adev) || !adev->status.present ||
17027f24467fSOctavian Purdila 	    acpi_device_enumerated(adev))
170364bee4d2SMika Westerberg 		return AE_OK;
170464bee4d2SMika Westerberg 
170564bee4d2SMika Westerberg 	spi = spi_alloc_device(master);
170664bee4d2SMika Westerberg 	if (!spi) {
170764bee4d2SMika Westerberg 		dev_err(&master->dev, "failed to allocate SPI device for %s\n",
170864bee4d2SMika Westerberg 			dev_name(&adev->dev));
170964bee4d2SMika Westerberg 		return AE_NO_MEMORY;
171064bee4d2SMika Westerberg 	}
171164bee4d2SMika Westerberg 
17127b199811SRafael J. Wysocki 	ACPI_COMPANION_SET(&spi->dev, adev);
171364bee4d2SMika Westerberg 	spi->irq = -1;
171464bee4d2SMika Westerberg 
171564bee4d2SMika Westerberg 	INIT_LIST_HEAD(&resource_list);
171664bee4d2SMika Westerberg 	ret = acpi_dev_get_resources(adev, &resource_list,
171764bee4d2SMika Westerberg 				     acpi_spi_add_resource, spi);
171864bee4d2SMika Westerberg 	acpi_dev_free_resource_list(&resource_list);
171964bee4d2SMika Westerberg 
172064bee4d2SMika Westerberg 	if (ret < 0 || !spi->max_speed_hz) {
172164bee4d2SMika Westerberg 		spi_dev_put(spi);
172264bee4d2SMika Westerberg 		return AE_OK;
172364bee4d2SMika Westerberg 	}
172464bee4d2SMika Westerberg 
172533ada67dSChristophe RICARD 	if (spi->irq < 0)
172633ada67dSChristophe RICARD 		spi->irq = acpi_dev_gpio_irq_get(adev, 0);
172733ada67dSChristophe RICARD 
17287f24467fSOctavian Purdila 	acpi_device_set_enumerated(adev);
17297f24467fSOctavian Purdila 
173033cf00e5SMika Westerberg 	adev->power.flags.ignore_parent = true;
1731cf9eb39cSJarkko Nikula 	strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
173264bee4d2SMika Westerberg 	if (spi_add_device(spi)) {
173333cf00e5SMika Westerberg 		adev->power.flags.ignore_parent = false;
173464bee4d2SMika Westerberg 		dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
173564bee4d2SMika Westerberg 			dev_name(&adev->dev));
173664bee4d2SMika Westerberg 		spi_dev_put(spi);
173764bee4d2SMika Westerberg 	}
173864bee4d2SMika Westerberg 
173964bee4d2SMika Westerberg 	return AE_OK;
174064bee4d2SMika Westerberg }
174164bee4d2SMika Westerberg 
17427f24467fSOctavian Purdila static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
17437f24467fSOctavian Purdila 				       void *data, void **return_value)
17447f24467fSOctavian Purdila {
17457f24467fSOctavian Purdila 	struct spi_master *master = data;
17467f24467fSOctavian Purdila 	struct acpi_device *adev;
17477f24467fSOctavian Purdila 
17487f24467fSOctavian Purdila 	if (acpi_bus_get_device(handle, &adev))
17497f24467fSOctavian Purdila 		return AE_OK;
17507f24467fSOctavian Purdila 
17517f24467fSOctavian Purdila 	return acpi_register_spi_device(master, adev);
17527f24467fSOctavian Purdila }
17537f24467fSOctavian Purdila 
175464bee4d2SMika Westerberg static void acpi_register_spi_devices(struct spi_master *master)
175564bee4d2SMika Westerberg {
175664bee4d2SMika Westerberg 	acpi_status status;
175764bee4d2SMika Westerberg 	acpi_handle handle;
175864bee4d2SMika Westerberg 
175929896178SRafael J. Wysocki 	handle = ACPI_HANDLE(master->dev.parent);
176064bee4d2SMika Westerberg 	if (!handle)
176164bee4d2SMika Westerberg 		return;
176264bee4d2SMika Westerberg 
176364bee4d2SMika Westerberg 	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
176464bee4d2SMika Westerberg 				     acpi_spi_add_device, NULL,
176564bee4d2SMika Westerberg 				     master, NULL);
176664bee4d2SMika Westerberg 	if (ACPI_FAILURE(status))
176764bee4d2SMika Westerberg 		dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
176864bee4d2SMika Westerberg }
176964bee4d2SMika Westerberg #else
177064bee4d2SMika Westerberg static inline void acpi_register_spi_devices(struct spi_master *master) {}
177164bee4d2SMika Westerberg #endif /* CONFIG_ACPI */
177264bee4d2SMika Westerberg 
177349dce689STony Jones static void spi_master_release(struct device *dev)
17748ae12a0dSDavid Brownell {
17758ae12a0dSDavid Brownell 	struct spi_master *master;
17768ae12a0dSDavid Brownell 
177749dce689STony Jones 	master = container_of(dev, struct spi_master, dev);
17788ae12a0dSDavid Brownell 	kfree(master);
17798ae12a0dSDavid Brownell }
17808ae12a0dSDavid Brownell 
17818ae12a0dSDavid Brownell static struct class spi_master_class = {
17828ae12a0dSDavid Brownell 	.name		= "spi_master",
17838ae12a0dSDavid Brownell 	.owner		= THIS_MODULE,
178449dce689STony Jones 	.dev_release	= spi_master_release,
1785eca2ebc7SMartin Sperl 	.dev_groups	= spi_master_groups,
17868ae12a0dSDavid Brownell };
17878ae12a0dSDavid Brownell 
17888ae12a0dSDavid Brownell 
17898ae12a0dSDavid Brownell /**
17908ae12a0dSDavid Brownell  * spi_alloc_master - allocate SPI master controller
17918ae12a0dSDavid Brownell  * @dev: the controller, possibly using the platform_bus
179233e34dc6SDavid Brownell  * @size: how much zeroed driver-private data to allocate; the pointer to this
179349dce689STony Jones  *	memory is in the driver_data field of the returned device,
17940c868461SDavid Brownell  *	accessible with spi_master_get_devdata().
179533e34dc6SDavid Brownell  * Context: can sleep
17968ae12a0dSDavid Brownell  *
17978ae12a0dSDavid Brownell  * This call is used only by SPI master controller drivers, which are the
17988ae12a0dSDavid Brownell  * only ones directly touching chip registers.  It's how they allocate
1799ba1a0513Sdmitry pervushin  * an spi_master structure, prior to calling spi_register_master().
18008ae12a0dSDavid Brownell  *
180197d56dc6SJavier Martinez Canillas  * This must be called from context that can sleep.
18028ae12a0dSDavid Brownell  *
18038ae12a0dSDavid Brownell  * The caller is responsible for assigning the bus number and initializing
1804ba1a0513Sdmitry pervushin  * the master's methods before calling spi_register_master(); and (after errors
1805a394d635SGuenter Roeck  * adding the device) calling spi_master_put() to prevent a memory leak.
180697d56dc6SJavier Martinez Canillas  *
180797d56dc6SJavier Martinez Canillas  * Return: the SPI master structure on success, else NULL.
18088ae12a0dSDavid Brownell  */
1809e9d5a461SAdrian Bunk struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
18108ae12a0dSDavid Brownell {
18118ae12a0dSDavid Brownell 	struct spi_master	*master;
18128ae12a0dSDavid Brownell 
18130c868461SDavid Brownell 	if (!dev)
18140c868461SDavid Brownell 		return NULL;
18150c868461SDavid Brownell 
18165fe5f05eSJingoo Han 	master = kzalloc(size + sizeof(*master), GFP_KERNEL);
18178ae12a0dSDavid Brownell 	if (!master)
18188ae12a0dSDavid Brownell 		return NULL;
18198ae12a0dSDavid Brownell 
182049dce689STony Jones 	device_initialize(&master->dev);
18211e8a52e1SGrant Likely 	master->bus_num = -1;
18221e8a52e1SGrant Likely 	master->num_chipselect = 1;
182349dce689STony Jones 	master->dev.class = &spi_master_class;
1824157f38f9SJohan Hovold 	master->dev.parent = dev;
1825d7e2ee25SLinus Walleij 	pm_suspend_ignore_children(&master->dev, true);
18260c868461SDavid Brownell 	spi_master_set_devdata(master, &master[1]);
18278ae12a0dSDavid Brownell 
18288ae12a0dSDavid Brownell 	return master;
18298ae12a0dSDavid Brownell }
18308ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_alloc_master);
18318ae12a0dSDavid Brownell 
183274317984SJean-Christophe PLAGNIOL-VILLARD #ifdef CONFIG_OF
183374317984SJean-Christophe PLAGNIOL-VILLARD static int of_spi_register_master(struct spi_master *master)
183474317984SJean-Christophe PLAGNIOL-VILLARD {
1835e80beb27SGrant Likely 	int nb, i, *cs;
183674317984SJean-Christophe PLAGNIOL-VILLARD 	struct device_node *np = master->dev.of_node;
183774317984SJean-Christophe PLAGNIOL-VILLARD 
183874317984SJean-Christophe PLAGNIOL-VILLARD 	if (!np)
183974317984SJean-Christophe PLAGNIOL-VILLARD 		return 0;
184074317984SJean-Christophe PLAGNIOL-VILLARD 
184174317984SJean-Christophe PLAGNIOL-VILLARD 	nb = of_gpio_named_count(np, "cs-gpios");
18425fe5f05eSJingoo Han 	master->num_chipselect = max_t(int, nb, master->num_chipselect);
184374317984SJean-Christophe PLAGNIOL-VILLARD 
18448ec5d84eSAndreas Larsson 	/* Return error only for an incorrectly formed cs-gpios property */
18458ec5d84eSAndreas Larsson 	if (nb == 0 || nb == -ENOENT)
184674317984SJean-Christophe PLAGNIOL-VILLARD 		return 0;
18478ec5d84eSAndreas Larsson 	else if (nb < 0)
18488ec5d84eSAndreas Larsson 		return nb;
184974317984SJean-Christophe PLAGNIOL-VILLARD 
185074317984SJean-Christophe PLAGNIOL-VILLARD 	cs = devm_kzalloc(&master->dev,
185174317984SJean-Christophe PLAGNIOL-VILLARD 			  sizeof(int) * master->num_chipselect,
185274317984SJean-Christophe PLAGNIOL-VILLARD 			  GFP_KERNEL);
185374317984SJean-Christophe PLAGNIOL-VILLARD 	master->cs_gpios = cs;
185474317984SJean-Christophe PLAGNIOL-VILLARD 
185574317984SJean-Christophe PLAGNIOL-VILLARD 	if (!master->cs_gpios)
185674317984SJean-Christophe PLAGNIOL-VILLARD 		return -ENOMEM;
185774317984SJean-Christophe PLAGNIOL-VILLARD 
18580da83bb1SAndreas Larsson 	for (i = 0; i < master->num_chipselect; i++)
1859446411e1SAndreas Larsson 		cs[i] = -ENOENT;
186074317984SJean-Christophe PLAGNIOL-VILLARD 
186174317984SJean-Christophe PLAGNIOL-VILLARD 	for (i = 0; i < nb; i++)
186274317984SJean-Christophe PLAGNIOL-VILLARD 		cs[i] = of_get_named_gpio(np, "cs-gpios", i);
186374317984SJean-Christophe PLAGNIOL-VILLARD 
186474317984SJean-Christophe PLAGNIOL-VILLARD 	return 0;
186574317984SJean-Christophe PLAGNIOL-VILLARD }
186674317984SJean-Christophe PLAGNIOL-VILLARD #else
186774317984SJean-Christophe PLAGNIOL-VILLARD static int of_spi_register_master(struct spi_master *master)
186874317984SJean-Christophe PLAGNIOL-VILLARD {
186974317984SJean-Christophe PLAGNIOL-VILLARD 	return 0;
187074317984SJean-Christophe PLAGNIOL-VILLARD }
187174317984SJean-Christophe PLAGNIOL-VILLARD #endif
187274317984SJean-Christophe PLAGNIOL-VILLARD 
18738ae12a0dSDavid Brownell /**
18748ae12a0dSDavid Brownell  * spi_register_master - register SPI master controller
18758ae12a0dSDavid Brownell  * @master: initialized master, originally from spi_alloc_master()
187633e34dc6SDavid Brownell  * Context: can sleep
18778ae12a0dSDavid Brownell  *
18788ae12a0dSDavid Brownell  * SPI master controllers connect to their drivers using some non-SPI bus,
18798ae12a0dSDavid Brownell  * such as the platform bus.  The final stage of probe() in that code
18808ae12a0dSDavid Brownell  * includes calling spi_register_master() to hook up to this SPI bus glue.
18818ae12a0dSDavid Brownell  *
18828ae12a0dSDavid Brownell  * SPI controllers use board specific (often SOC specific) bus numbers,
18838ae12a0dSDavid Brownell  * and board-specific addressing for SPI devices combines those numbers
18848ae12a0dSDavid Brownell  * with chip select numbers.  Since SPI does not directly support dynamic
18858ae12a0dSDavid Brownell  * device identification, boards need configuration tables telling which
18868ae12a0dSDavid Brownell  * chip is at which address.
18878ae12a0dSDavid Brownell  *
18888ae12a0dSDavid Brownell  * This must be called from context that can sleep.  It returns zero on
18898ae12a0dSDavid Brownell  * success, else a negative error code (dropping the master's refcount).
18900c868461SDavid Brownell  * After a successful return, the caller is responsible for calling
18910c868461SDavid Brownell  * spi_unregister_master().
189297d56dc6SJavier Martinez Canillas  *
189397d56dc6SJavier Martinez Canillas  * Return: zero on success, else a negative error code.
18948ae12a0dSDavid Brownell  */
1895e9d5a461SAdrian Bunk int spi_register_master(struct spi_master *master)
18968ae12a0dSDavid Brownell {
1897e44a45aeSDavid Brownell 	static atomic_t		dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
189849dce689STony Jones 	struct device		*dev = master->dev.parent;
18992b9603a0SFeng Tang 	struct boardinfo	*bi;
19008ae12a0dSDavid Brownell 	int			status = -ENODEV;
19018ae12a0dSDavid Brownell 	int			dynamic = 0;
19028ae12a0dSDavid Brownell 
19030c868461SDavid Brownell 	if (!dev)
19040c868461SDavid Brownell 		return -ENODEV;
19050c868461SDavid Brownell 
190674317984SJean-Christophe PLAGNIOL-VILLARD 	status = of_spi_register_master(master);
190774317984SJean-Christophe PLAGNIOL-VILLARD 	if (status)
190874317984SJean-Christophe PLAGNIOL-VILLARD 		return status;
190974317984SJean-Christophe PLAGNIOL-VILLARD 
1910082c8cb4SDavid Brownell 	/* even if it's just one always-selected device, there must
1911082c8cb4SDavid Brownell 	 * be at least one chipselect
1912082c8cb4SDavid Brownell 	 */
1913082c8cb4SDavid Brownell 	if (master->num_chipselect == 0)
1914082c8cb4SDavid Brownell 		return -EINVAL;
1915082c8cb4SDavid Brownell 
1916bb29785eSGrant Likely 	if ((master->bus_num < 0) && master->dev.of_node)
1917bb29785eSGrant Likely 		master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
1918bb29785eSGrant Likely 
19198ae12a0dSDavid Brownell 	/* convention:  dynamically assigned bus IDs count down from the max */
1920a020ed75SDavid Brownell 	if (master->bus_num < 0) {
1921082c8cb4SDavid Brownell 		/* FIXME switch to an IDR based scheme, something like
1922082c8cb4SDavid Brownell 		 * I2C now uses, so we can't run out of "dynamic" IDs
1923082c8cb4SDavid Brownell 		 */
19248ae12a0dSDavid Brownell 		master->bus_num = atomic_dec_return(&dyn_bus_id);
1925b885244eSDavid Brownell 		dynamic = 1;
19268ae12a0dSDavid Brownell 	}
19278ae12a0dSDavid Brownell 
19285424d43eSMark Brown 	INIT_LIST_HEAD(&master->queue);
19295424d43eSMark Brown 	spin_lock_init(&master->queue_lock);
1930cf32b71eSErnst Schwab 	spin_lock_init(&master->bus_lock_spinlock);
1931cf32b71eSErnst Schwab 	mutex_init(&master->bus_lock_mutex);
1932ef4d96ecSMark Brown 	mutex_init(&master->io_mutex);
1933cf32b71eSErnst Schwab 	master->bus_lock_flag = 0;
1934b158935fSMark Brown 	init_completion(&master->xfer_completion);
19356ad45a27SMark Brown 	if (!master->max_dma_len)
19366ad45a27SMark Brown 		master->max_dma_len = INT_MAX;
1937cf32b71eSErnst Schwab 
19388ae12a0dSDavid Brownell 	/* register the device, then userspace will see it.
19398ae12a0dSDavid Brownell 	 * registration fails if the bus ID is in use.
19408ae12a0dSDavid Brownell 	 */
194135f74fcaSKay Sievers 	dev_set_name(&master->dev, "spi%u", master->bus_num);
194249dce689STony Jones 	status = device_add(&master->dev);
1943b885244eSDavid Brownell 	if (status < 0)
19448ae12a0dSDavid Brownell 		goto done;
194535f74fcaSKay Sievers 	dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
19468ae12a0dSDavid Brownell 			dynamic ? " (dynamic)" : "");
19478ae12a0dSDavid Brownell 
1948ffbbdd21SLinus Walleij 	/* If we're using a queued driver, start the queue */
1949ffbbdd21SLinus Walleij 	if (master->transfer)
1950ffbbdd21SLinus Walleij 		dev_info(dev, "master is unqueued, this is deprecated\n");
1951ffbbdd21SLinus Walleij 	else {
1952ffbbdd21SLinus Walleij 		status = spi_master_initialize_queue(master);
1953ffbbdd21SLinus Walleij 		if (status) {
1954e93b0724SAxel Lin 			device_del(&master->dev);
1955ffbbdd21SLinus Walleij 			goto done;
1956ffbbdd21SLinus Walleij 		}
1957ffbbdd21SLinus Walleij 	}
1958eca2ebc7SMartin Sperl 	/* add statistics */
1959eca2ebc7SMartin Sperl 	spin_lock_init(&master->statistics.lock);
1960ffbbdd21SLinus Walleij 
19612b9603a0SFeng Tang 	mutex_lock(&board_lock);
19622b9603a0SFeng Tang 	list_add_tail(&master->list, &spi_master_list);
19632b9603a0SFeng Tang 	list_for_each_entry(bi, &board_list, list)
19642b9603a0SFeng Tang 		spi_match_master_to_boardinfo(master, &bi->board_info);
19652b9603a0SFeng Tang 	mutex_unlock(&board_lock);
19662b9603a0SFeng Tang 
196764bee4d2SMika Westerberg 	/* Register devices from the device tree and ACPI */
196812b15e83SAnatolij Gustschin 	of_register_spi_devices(master);
196964bee4d2SMika Westerberg 	acpi_register_spi_devices(master);
19708ae12a0dSDavid Brownell done:
19718ae12a0dSDavid Brownell 	return status;
19728ae12a0dSDavid Brownell }
19738ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_register_master);
19748ae12a0dSDavid Brownell 
1975666d5b4cSMark Brown static void devm_spi_unregister(struct device *dev, void *res)
1976666d5b4cSMark Brown {
1977666d5b4cSMark Brown 	spi_unregister_master(*(struct spi_master **)res);
1978666d5b4cSMark Brown }
1979666d5b4cSMark Brown 
1980666d5b4cSMark Brown /**
1981666d5b4cSMark Brown  * dev_spi_register_master - register managed SPI master controller
1982666d5b4cSMark Brown  * @dev:    device managing SPI master
1983666d5b4cSMark Brown  * @master: initialized master, originally from spi_alloc_master()
1984666d5b4cSMark Brown  * Context: can sleep
1985666d5b4cSMark Brown  *
1986666d5b4cSMark Brown  * Register a SPI device as with spi_register_master() which will
1987666d5b4cSMark Brown  * automatically be unregister
198897d56dc6SJavier Martinez Canillas  *
198997d56dc6SJavier Martinez Canillas  * Return: zero on success, else a negative error code.
1990666d5b4cSMark Brown  */
1991666d5b4cSMark Brown int devm_spi_register_master(struct device *dev, struct spi_master *master)
1992666d5b4cSMark Brown {
1993666d5b4cSMark Brown 	struct spi_master **ptr;
1994666d5b4cSMark Brown 	int ret;
1995666d5b4cSMark Brown 
1996666d5b4cSMark Brown 	ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
1997666d5b4cSMark Brown 	if (!ptr)
1998666d5b4cSMark Brown 		return -ENOMEM;
1999666d5b4cSMark Brown 
2000666d5b4cSMark Brown 	ret = spi_register_master(master);
20014b92894eSStephen Warren 	if (!ret) {
2002666d5b4cSMark Brown 		*ptr = master;
2003666d5b4cSMark Brown 		devres_add(dev, ptr);
2004666d5b4cSMark Brown 	} else {
2005666d5b4cSMark Brown 		devres_free(ptr);
2006666d5b4cSMark Brown 	}
2007666d5b4cSMark Brown 
2008666d5b4cSMark Brown 	return ret;
2009666d5b4cSMark Brown }
2010666d5b4cSMark Brown EXPORT_SYMBOL_GPL(devm_spi_register_master);
2011666d5b4cSMark Brown 
201234860089SDavid Lamparter static int __unregister(struct device *dev, void *null)
20138ae12a0dSDavid Brownell {
20140c868461SDavid Brownell 	spi_unregister_device(to_spi_device(dev));
20158ae12a0dSDavid Brownell 	return 0;
20168ae12a0dSDavid Brownell }
20178ae12a0dSDavid Brownell 
20188ae12a0dSDavid Brownell /**
20198ae12a0dSDavid Brownell  * spi_unregister_master - unregister SPI master controller
20208ae12a0dSDavid Brownell  * @master: the master being unregistered
202133e34dc6SDavid Brownell  * Context: can sleep
20228ae12a0dSDavid Brownell  *
20238ae12a0dSDavid Brownell  * This call is used only by SPI master controller drivers, which are the
20248ae12a0dSDavid Brownell  * only ones directly touching chip registers.
20258ae12a0dSDavid Brownell  *
20268ae12a0dSDavid Brownell  * This must be called from context that can sleep.
20278ae12a0dSDavid Brownell  */
20288ae12a0dSDavid Brownell void spi_unregister_master(struct spi_master *master)
20298ae12a0dSDavid Brownell {
203089fc9a1aSJeff Garzik 	int dummy;
203189fc9a1aSJeff Garzik 
2032ffbbdd21SLinus Walleij 	if (master->queued) {
2033ffbbdd21SLinus Walleij 		if (spi_destroy_queue(master))
2034ffbbdd21SLinus Walleij 			dev_err(&master->dev, "queue remove failed\n");
2035ffbbdd21SLinus Walleij 	}
2036ffbbdd21SLinus Walleij 
20372b9603a0SFeng Tang 	mutex_lock(&board_lock);
20382b9603a0SFeng Tang 	list_del(&master->list);
20392b9603a0SFeng Tang 	mutex_unlock(&board_lock);
20402b9603a0SFeng Tang 
204197dbf37dSSebastian Andrzej Siewior 	dummy = device_for_each_child(&master->dev, NULL, __unregister);
204249dce689STony Jones 	device_unregister(&master->dev);
20438ae12a0dSDavid Brownell }
20448ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_unregister_master);
20458ae12a0dSDavid Brownell 
2046ffbbdd21SLinus Walleij int spi_master_suspend(struct spi_master *master)
2047ffbbdd21SLinus Walleij {
2048ffbbdd21SLinus Walleij 	int ret;
2049ffbbdd21SLinus Walleij 
2050ffbbdd21SLinus Walleij 	/* Basically no-ops for non-queued masters */
2051ffbbdd21SLinus Walleij 	if (!master->queued)
2052ffbbdd21SLinus Walleij 		return 0;
2053ffbbdd21SLinus Walleij 
2054ffbbdd21SLinus Walleij 	ret = spi_stop_queue(master);
2055ffbbdd21SLinus Walleij 	if (ret)
2056ffbbdd21SLinus Walleij 		dev_err(&master->dev, "queue stop failed\n");
2057ffbbdd21SLinus Walleij 
2058ffbbdd21SLinus Walleij 	return ret;
2059ffbbdd21SLinus Walleij }
2060ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_master_suspend);
2061ffbbdd21SLinus Walleij 
2062ffbbdd21SLinus Walleij int spi_master_resume(struct spi_master *master)
2063ffbbdd21SLinus Walleij {
2064ffbbdd21SLinus Walleij 	int ret;
2065ffbbdd21SLinus Walleij 
2066ffbbdd21SLinus Walleij 	if (!master->queued)
2067ffbbdd21SLinus Walleij 		return 0;
2068ffbbdd21SLinus Walleij 
2069ffbbdd21SLinus Walleij 	ret = spi_start_queue(master);
2070ffbbdd21SLinus Walleij 	if (ret)
2071ffbbdd21SLinus Walleij 		dev_err(&master->dev, "queue restart failed\n");
2072ffbbdd21SLinus Walleij 
2073ffbbdd21SLinus Walleij 	return ret;
2074ffbbdd21SLinus Walleij }
2075ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_master_resume);
2076ffbbdd21SLinus Walleij 
20779f3b795aSMichał Mirosław static int __spi_master_match(struct device *dev, const void *data)
20785ed2c832SDave Young {
20795ed2c832SDave Young 	struct spi_master *m;
20809f3b795aSMichał Mirosław 	const u16 *bus_num = data;
20815ed2c832SDave Young 
20825ed2c832SDave Young 	m = container_of(dev, struct spi_master, dev);
20835ed2c832SDave Young 	return m->bus_num == *bus_num;
20845ed2c832SDave Young }
20855ed2c832SDave Young 
20868ae12a0dSDavid Brownell /**
20878ae12a0dSDavid Brownell  * spi_busnum_to_master - look up master associated with bus_num
20888ae12a0dSDavid Brownell  * @bus_num: the master's bus number
208933e34dc6SDavid Brownell  * Context: can sleep
20908ae12a0dSDavid Brownell  *
20918ae12a0dSDavid Brownell  * This call may be used with devices that are registered after
20928ae12a0dSDavid Brownell  * arch init time.  It returns a refcounted pointer to the relevant
20938ae12a0dSDavid Brownell  * spi_master (which the caller must release), or NULL if there is
20948ae12a0dSDavid Brownell  * no such master registered.
209597d56dc6SJavier Martinez Canillas  *
209697d56dc6SJavier Martinez Canillas  * Return: the SPI master structure on success, else NULL.
20978ae12a0dSDavid Brownell  */
20988ae12a0dSDavid Brownell struct spi_master *spi_busnum_to_master(u16 bus_num)
20998ae12a0dSDavid Brownell {
210049dce689STony Jones 	struct device		*dev;
21011e9a51dcSAtsushi Nemoto 	struct spi_master	*master = NULL;
21028ae12a0dSDavid Brownell 
2103695794aeSGreg Kroah-Hartman 	dev = class_find_device(&spi_master_class, NULL, &bus_num,
21045ed2c832SDave Young 				__spi_master_match);
21055ed2c832SDave Young 	if (dev)
21065ed2c832SDave Young 		master = container_of(dev, struct spi_master, dev);
21075ed2c832SDave Young 	/* reference got in class_find_device */
21081e9a51dcSAtsushi Nemoto 	return master;
21098ae12a0dSDavid Brownell }
21108ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_busnum_to_master);
21118ae12a0dSDavid Brownell 
2112d780c371SMartin Sperl /*-------------------------------------------------------------------------*/
2113d780c371SMartin Sperl 
2114d780c371SMartin Sperl /* Core methods for SPI resource management */
2115d780c371SMartin Sperl 
2116d780c371SMartin Sperl /**
2117d780c371SMartin Sperl  * spi_res_alloc - allocate a spi resource that is life-cycle managed
2118d780c371SMartin Sperl  *                 during the processing of a spi_message while using
2119d780c371SMartin Sperl  *                 spi_transfer_one
2120d780c371SMartin Sperl  * @spi:     the spi device for which we allocate memory
2121d780c371SMartin Sperl  * @release: the release code to execute for this resource
2122d780c371SMartin Sperl  * @size:    size to alloc and return
2123d780c371SMartin Sperl  * @gfp:     GFP allocation flags
2124d780c371SMartin Sperl  *
2125d780c371SMartin Sperl  * Return: the pointer to the allocated data
2126d780c371SMartin Sperl  *
2127d780c371SMartin Sperl  * This may get enhanced in the future to allocate from a memory pool
2128d780c371SMartin Sperl  * of the @spi_device or @spi_master to avoid repeated allocations.
2129d780c371SMartin Sperl  */
2130d780c371SMartin Sperl void *spi_res_alloc(struct spi_device *spi,
2131d780c371SMartin Sperl 		    spi_res_release_t release,
2132d780c371SMartin Sperl 		    size_t size, gfp_t gfp)
2133d780c371SMartin Sperl {
2134d780c371SMartin Sperl 	struct spi_res *sres;
2135d780c371SMartin Sperl 
2136d780c371SMartin Sperl 	sres = kzalloc(sizeof(*sres) + size, gfp);
2137d780c371SMartin Sperl 	if (!sres)
2138d780c371SMartin Sperl 		return NULL;
2139d780c371SMartin Sperl 
2140d780c371SMartin Sperl 	INIT_LIST_HEAD(&sres->entry);
2141d780c371SMartin Sperl 	sres->release = release;
2142d780c371SMartin Sperl 
2143d780c371SMartin Sperl 	return sres->data;
2144d780c371SMartin Sperl }
2145d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_alloc);
2146d780c371SMartin Sperl 
2147d780c371SMartin Sperl /**
2148d780c371SMartin Sperl  * spi_res_free - free an spi resource
2149d780c371SMartin Sperl  * @res: pointer to the custom data of a resource
2150d780c371SMartin Sperl  *
2151d780c371SMartin Sperl  */
2152d780c371SMartin Sperl void spi_res_free(void *res)
2153d780c371SMartin Sperl {
2154d780c371SMartin Sperl 	struct spi_res *sres = container_of(res, struct spi_res, data);
2155d780c371SMartin Sperl 
2156d780c371SMartin Sperl 	if (!res)
2157d780c371SMartin Sperl 		return;
2158d780c371SMartin Sperl 
2159d780c371SMartin Sperl 	WARN_ON(!list_empty(&sres->entry));
2160d780c371SMartin Sperl 	kfree(sres);
2161d780c371SMartin Sperl }
2162d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_free);
2163d780c371SMartin Sperl 
2164d780c371SMartin Sperl /**
2165d780c371SMartin Sperl  * spi_res_add - add a spi_res to the spi_message
2166d780c371SMartin Sperl  * @message: the spi message
2167d780c371SMartin Sperl  * @res:     the spi_resource
2168d780c371SMartin Sperl  */
2169d780c371SMartin Sperl void spi_res_add(struct spi_message *message, void *res)
2170d780c371SMartin Sperl {
2171d780c371SMartin Sperl 	struct spi_res *sres = container_of(res, struct spi_res, data);
2172d780c371SMartin Sperl 
2173d780c371SMartin Sperl 	WARN_ON(!list_empty(&sres->entry));
2174d780c371SMartin Sperl 	list_add_tail(&sres->entry, &message->resources);
2175d780c371SMartin Sperl }
2176d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_add);
2177d780c371SMartin Sperl 
2178d780c371SMartin Sperl /**
2179d780c371SMartin Sperl  * spi_res_release - release all spi resources for this message
2180d780c371SMartin Sperl  * @master:  the @spi_master
2181d780c371SMartin Sperl  * @message: the @spi_message
2182d780c371SMartin Sperl  */
2183d780c371SMartin Sperl void spi_res_release(struct spi_master *master,
2184d780c371SMartin Sperl 		     struct spi_message *message)
2185d780c371SMartin Sperl {
2186d780c371SMartin Sperl 	struct spi_res *res;
2187d780c371SMartin Sperl 
2188d780c371SMartin Sperl 	while (!list_empty(&message->resources)) {
2189d780c371SMartin Sperl 		res = list_last_entry(&message->resources,
2190d780c371SMartin Sperl 				      struct spi_res, entry);
2191d780c371SMartin Sperl 
2192d780c371SMartin Sperl 		if (res->release)
2193d780c371SMartin Sperl 			res->release(master, message, res->data);
2194d780c371SMartin Sperl 
2195d780c371SMartin Sperl 		list_del(&res->entry);
2196d780c371SMartin Sperl 
2197d780c371SMartin Sperl 		kfree(res);
2198d780c371SMartin Sperl 	}
2199d780c371SMartin Sperl }
2200d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_release);
22018ae12a0dSDavid Brownell 
22028ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/
22038ae12a0dSDavid Brownell 
2204523baf5aSMartin Sperl /* Core methods for spi_message alterations */
2205523baf5aSMartin Sperl 
2206523baf5aSMartin Sperl static void __spi_replace_transfers_release(struct spi_master *master,
2207523baf5aSMartin Sperl 					    struct spi_message *msg,
2208523baf5aSMartin Sperl 					    void *res)
2209523baf5aSMartin Sperl {
2210523baf5aSMartin Sperl 	struct spi_replaced_transfers *rxfer = res;
2211523baf5aSMartin Sperl 	size_t i;
2212523baf5aSMartin Sperl 
2213523baf5aSMartin Sperl 	/* call extra callback if requested */
2214523baf5aSMartin Sperl 	if (rxfer->release)
2215523baf5aSMartin Sperl 		rxfer->release(master, msg, res);
2216523baf5aSMartin Sperl 
2217523baf5aSMartin Sperl 	/* insert replaced transfers back into the message */
2218523baf5aSMartin Sperl 	list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
2219523baf5aSMartin Sperl 
2220523baf5aSMartin Sperl 	/* remove the formerly inserted entries */
2221523baf5aSMartin Sperl 	for (i = 0; i < rxfer->inserted; i++)
2222523baf5aSMartin Sperl 		list_del(&rxfer->inserted_transfers[i].transfer_list);
2223523baf5aSMartin Sperl }
2224523baf5aSMartin Sperl 
2225523baf5aSMartin Sperl /**
2226523baf5aSMartin Sperl  * spi_replace_transfers - replace transfers with several transfers
2227523baf5aSMartin Sperl  *                         and register change with spi_message.resources
2228523baf5aSMartin Sperl  * @msg:           the spi_message we work upon
2229523baf5aSMartin Sperl  * @xfer_first:    the first spi_transfer we want to replace
2230523baf5aSMartin Sperl  * @remove:        number of transfers to remove
2231523baf5aSMartin Sperl  * @insert:        the number of transfers we want to insert instead
2232523baf5aSMartin Sperl  * @release:       extra release code necessary in some circumstances
2233523baf5aSMartin Sperl  * @extradatasize: extra data to allocate (with alignment guarantees
2234523baf5aSMartin Sperl  *                 of struct @spi_transfer)
223505885397SMartin Sperl  * @gfp:           gfp flags
2236523baf5aSMartin Sperl  *
2237523baf5aSMartin Sperl  * Returns: pointer to @spi_replaced_transfers,
2238523baf5aSMartin Sperl  *          PTR_ERR(...) in case of errors.
2239523baf5aSMartin Sperl  */
2240523baf5aSMartin Sperl struct spi_replaced_transfers *spi_replace_transfers(
2241523baf5aSMartin Sperl 	struct spi_message *msg,
2242523baf5aSMartin Sperl 	struct spi_transfer *xfer_first,
2243523baf5aSMartin Sperl 	size_t remove,
2244523baf5aSMartin Sperl 	size_t insert,
2245523baf5aSMartin Sperl 	spi_replaced_release_t release,
2246523baf5aSMartin Sperl 	size_t extradatasize,
2247523baf5aSMartin Sperl 	gfp_t gfp)
2248523baf5aSMartin Sperl {
2249523baf5aSMartin Sperl 	struct spi_replaced_transfers *rxfer;
2250523baf5aSMartin Sperl 	struct spi_transfer *xfer;
2251523baf5aSMartin Sperl 	size_t i;
2252523baf5aSMartin Sperl 
2253523baf5aSMartin Sperl 	/* allocate the structure using spi_res */
2254523baf5aSMartin Sperl 	rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2255523baf5aSMartin Sperl 			      insert * sizeof(struct spi_transfer)
2256523baf5aSMartin Sperl 			      + sizeof(struct spi_replaced_transfers)
2257523baf5aSMartin Sperl 			      + extradatasize,
2258523baf5aSMartin Sperl 			      gfp);
2259523baf5aSMartin Sperl 	if (!rxfer)
2260523baf5aSMartin Sperl 		return ERR_PTR(-ENOMEM);
2261523baf5aSMartin Sperl 
2262523baf5aSMartin Sperl 	/* the release code to invoke before running the generic release */
2263523baf5aSMartin Sperl 	rxfer->release = release;
2264523baf5aSMartin Sperl 
2265523baf5aSMartin Sperl 	/* assign extradata */
2266523baf5aSMartin Sperl 	if (extradatasize)
2267523baf5aSMartin Sperl 		rxfer->extradata =
2268523baf5aSMartin Sperl 			&rxfer->inserted_transfers[insert];
2269523baf5aSMartin Sperl 
2270523baf5aSMartin Sperl 	/* init the replaced_transfers list */
2271523baf5aSMartin Sperl 	INIT_LIST_HEAD(&rxfer->replaced_transfers);
2272523baf5aSMartin Sperl 
2273523baf5aSMartin Sperl 	/* assign the list_entry after which we should reinsert
2274523baf5aSMartin Sperl 	 * the @replaced_transfers - it may be spi_message.messages!
2275523baf5aSMartin Sperl 	 */
2276523baf5aSMartin Sperl 	rxfer->replaced_after = xfer_first->transfer_list.prev;
2277523baf5aSMartin Sperl 
2278523baf5aSMartin Sperl 	/* remove the requested number of transfers */
2279523baf5aSMartin Sperl 	for (i = 0; i < remove; i++) {
2280523baf5aSMartin Sperl 		/* if the entry after replaced_after it is msg->transfers
2281523baf5aSMartin Sperl 		 * then we have been requested to remove more transfers
2282523baf5aSMartin Sperl 		 * than are in the list
2283523baf5aSMartin Sperl 		 */
2284523baf5aSMartin Sperl 		if (rxfer->replaced_after->next == &msg->transfers) {
2285523baf5aSMartin Sperl 			dev_err(&msg->spi->dev,
2286523baf5aSMartin Sperl 				"requested to remove more spi_transfers than are available\n");
2287523baf5aSMartin Sperl 			/* insert replaced transfers back into the message */
2288523baf5aSMartin Sperl 			list_splice(&rxfer->replaced_transfers,
2289523baf5aSMartin Sperl 				    rxfer->replaced_after);
2290523baf5aSMartin Sperl 
2291523baf5aSMartin Sperl 			/* free the spi_replace_transfer structure */
2292523baf5aSMartin Sperl 			spi_res_free(rxfer);
2293523baf5aSMartin Sperl 
2294523baf5aSMartin Sperl 			/* and return with an error */
2295523baf5aSMartin Sperl 			return ERR_PTR(-EINVAL);
2296523baf5aSMartin Sperl 		}
2297523baf5aSMartin Sperl 
2298523baf5aSMartin Sperl 		/* remove the entry after replaced_after from list of
2299523baf5aSMartin Sperl 		 * transfers and add it to list of replaced_transfers
2300523baf5aSMartin Sperl 		 */
2301523baf5aSMartin Sperl 		list_move_tail(rxfer->replaced_after->next,
2302523baf5aSMartin Sperl 			       &rxfer->replaced_transfers);
2303523baf5aSMartin Sperl 	}
2304523baf5aSMartin Sperl 
2305523baf5aSMartin Sperl 	/* create copy of the given xfer with identical settings
2306523baf5aSMartin Sperl 	 * based on the first transfer to get removed
2307523baf5aSMartin Sperl 	 */
2308523baf5aSMartin Sperl 	for (i = 0; i < insert; i++) {
2309523baf5aSMartin Sperl 		/* we need to run in reverse order */
2310523baf5aSMartin Sperl 		xfer = &rxfer->inserted_transfers[insert - 1 - i];
2311523baf5aSMartin Sperl 
2312523baf5aSMartin Sperl 		/* copy all spi_transfer data */
2313523baf5aSMartin Sperl 		memcpy(xfer, xfer_first, sizeof(*xfer));
2314523baf5aSMartin Sperl 
2315523baf5aSMartin Sperl 		/* add to list */
2316523baf5aSMartin Sperl 		list_add(&xfer->transfer_list, rxfer->replaced_after);
2317523baf5aSMartin Sperl 
2318523baf5aSMartin Sperl 		/* clear cs_change and delay_usecs for all but the last */
2319523baf5aSMartin Sperl 		if (i) {
2320523baf5aSMartin Sperl 			xfer->cs_change = false;
2321523baf5aSMartin Sperl 			xfer->delay_usecs = 0;
2322523baf5aSMartin Sperl 		}
2323523baf5aSMartin Sperl 	}
2324523baf5aSMartin Sperl 
2325523baf5aSMartin Sperl 	/* set up inserted */
2326523baf5aSMartin Sperl 	rxfer->inserted = insert;
2327523baf5aSMartin Sperl 
2328523baf5aSMartin Sperl 	/* and register it with spi_res/spi_message */
2329523baf5aSMartin Sperl 	spi_res_add(msg, rxfer);
2330523baf5aSMartin Sperl 
2331523baf5aSMartin Sperl 	return rxfer;
2332523baf5aSMartin Sperl }
2333523baf5aSMartin Sperl EXPORT_SYMBOL_GPL(spi_replace_transfers);
2334523baf5aSMartin Sperl 
233508933418SFabio Estevam static int __spi_split_transfer_maxsize(struct spi_master *master,
2336d9f12122SMartin Sperl 					struct spi_message *msg,
2337d9f12122SMartin Sperl 					struct spi_transfer **xferp,
2338d9f12122SMartin Sperl 					size_t maxsize,
2339d9f12122SMartin Sperl 					gfp_t gfp)
2340d9f12122SMartin Sperl {
2341d9f12122SMartin Sperl 	struct spi_transfer *xfer = *xferp, *xfers;
2342d9f12122SMartin Sperl 	struct spi_replaced_transfers *srt;
2343d9f12122SMartin Sperl 	size_t offset;
2344d9f12122SMartin Sperl 	size_t count, i;
2345d9f12122SMartin Sperl 
2346d9f12122SMartin Sperl 	/* warn once about this fact that we are splitting a transfer */
2347d9f12122SMartin Sperl 	dev_warn_once(&msg->spi->dev,
23487d62f51eSFabio Estevam 		      "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
2349d9f12122SMartin Sperl 		      xfer->len, maxsize);
2350d9f12122SMartin Sperl 
2351d9f12122SMartin Sperl 	/* calculate how many we have to replace */
2352d9f12122SMartin Sperl 	count = DIV_ROUND_UP(xfer->len, maxsize);
2353d9f12122SMartin Sperl 
2354d9f12122SMartin Sperl 	/* create replacement */
2355d9f12122SMartin Sperl 	srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
2356657d32efSDan Carpenter 	if (IS_ERR(srt))
2357657d32efSDan Carpenter 		return PTR_ERR(srt);
2358d9f12122SMartin Sperl 	xfers = srt->inserted_transfers;
2359d9f12122SMartin Sperl 
2360d9f12122SMartin Sperl 	/* now handle each of those newly inserted spi_transfers
2361d9f12122SMartin Sperl 	 * note that the replacements spi_transfers all are preset
2362d9f12122SMartin Sperl 	 * to the same values as *xferp, so tx_buf, rx_buf and len
2363d9f12122SMartin Sperl 	 * are all identical (as well as most others)
2364d9f12122SMartin Sperl 	 * so we just have to fix up len and the pointers.
2365d9f12122SMartin Sperl 	 *
2366d9f12122SMartin Sperl 	 * this also includes support for the depreciated
2367d9f12122SMartin Sperl 	 * spi_message.is_dma_mapped interface
2368d9f12122SMartin Sperl 	 */
2369d9f12122SMartin Sperl 
2370d9f12122SMartin Sperl 	/* the first transfer just needs the length modified, so we
2371d9f12122SMartin Sperl 	 * run it outside the loop
2372d9f12122SMartin Sperl 	 */
2373c8dab77aSFabio Estevam 	xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
2374d9f12122SMartin Sperl 
2375d9f12122SMartin Sperl 	/* all the others need rx_buf/tx_buf also set */
2376d9f12122SMartin Sperl 	for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
2377d9f12122SMartin Sperl 		/* update rx_buf, tx_buf and dma */
2378d9f12122SMartin Sperl 		if (xfers[i].rx_buf)
2379d9f12122SMartin Sperl 			xfers[i].rx_buf += offset;
2380d9f12122SMartin Sperl 		if (xfers[i].rx_dma)
2381d9f12122SMartin Sperl 			xfers[i].rx_dma += offset;
2382d9f12122SMartin Sperl 		if (xfers[i].tx_buf)
2383d9f12122SMartin Sperl 			xfers[i].tx_buf += offset;
2384d9f12122SMartin Sperl 		if (xfers[i].tx_dma)
2385d9f12122SMartin Sperl 			xfers[i].tx_dma += offset;
2386d9f12122SMartin Sperl 
2387d9f12122SMartin Sperl 		/* update length */
2388d9f12122SMartin Sperl 		xfers[i].len = min(maxsize, xfers[i].len - offset);
2389d9f12122SMartin Sperl 	}
2390d9f12122SMartin Sperl 
2391d9f12122SMartin Sperl 	/* we set up xferp to the last entry we have inserted,
2392d9f12122SMartin Sperl 	 * so that we skip those already split transfers
2393d9f12122SMartin Sperl 	 */
2394d9f12122SMartin Sperl 	*xferp = &xfers[count - 1];
2395d9f12122SMartin Sperl 
2396d9f12122SMartin Sperl 	/* increment statistics counters */
2397d9f12122SMartin Sperl 	SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2398d9f12122SMartin Sperl 				       transfers_split_maxsize);
2399d9f12122SMartin Sperl 	SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2400d9f12122SMartin Sperl 				       transfers_split_maxsize);
2401d9f12122SMartin Sperl 
2402d9f12122SMartin Sperl 	return 0;
2403d9f12122SMartin Sperl }
2404d9f12122SMartin Sperl 
2405d9f12122SMartin Sperl /**
2406d9f12122SMartin Sperl  * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
2407d9f12122SMartin Sperl  *                              when an individual transfer exceeds a
2408d9f12122SMartin Sperl  *                              certain size
2409d9f12122SMartin Sperl  * @master:    the @spi_master for this transfer
24103700ce95SMasanari Iida  * @msg:   the @spi_message to transform
24113700ce95SMasanari Iida  * @maxsize:  the maximum when to apply this
241210f11a22SJavier Martinez Canillas  * @gfp: GFP allocation flags
2413d9f12122SMartin Sperl  *
2414d9f12122SMartin Sperl  * Return: status of transformation
2415d9f12122SMartin Sperl  */
2416d9f12122SMartin Sperl int spi_split_transfers_maxsize(struct spi_master *master,
2417d9f12122SMartin Sperl 				struct spi_message *msg,
2418d9f12122SMartin Sperl 				size_t maxsize,
2419d9f12122SMartin Sperl 				gfp_t gfp)
2420d9f12122SMartin Sperl {
2421d9f12122SMartin Sperl 	struct spi_transfer *xfer;
2422d9f12122SMartin Sperl 	int ret;
2423d9f12122SMartin Sperl 
2424d9f12122SMartin Sperl 	/* iterate over the transfer_list,
2425d9f12122SMartin Sperl 	 * but note that xfer is advanced to the last transfer inserted
2426d9f12122SMartin Sperl 	 * to avoid checking sizes again unnecessarily (also xfer does
2427d9f12122SMartin Sperl 	 * potentiall belong to a different list by the time the
2428d9f12122SMartin Sperl 	 * replacement has happened
2429d9f12122SMartin Sperl 	 */
2430d9f12122SMartin Sperl 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2431d9f12122SMartin Sperl 		if (xfer->len > maxsize) {
2432d9f12122SMartin Sperl 			ret = __spi_split_transfer_maxsize(
2433d9f12122SMartin Sperl 				master, msg, &xfer, maxsize, gfp);
2434d9f12122SMartin Sperl 			if (ret)
2435d9f12122SMartin Sperl 				return ret;
2436d9f12122SMartin Sperl 		}
2437d9f12122SMartin Sperl 	}
2438d9f12122SMartin Sperl 
2439d9f12122SMartin Sperl 	return 0;
2440d9f12122SMartin Sperl }
2441d9f12122SMartin Sperl EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
24428ae12a0dSDavid Brownell 
24438ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/
24448ae12a0dSDavid Brownell 
24457d077197SDavid Brownell /* Core methods for SPI master protocol drivers.  Some of the
24467d077197SDavid Brownell  * other core methods are currently defined as inline functions.
24477d077197SDavid Brownell  */
24487d077197SDavid Brownell 
244963ab645fSStefan Brüns static int __spi_validate_bits_per_word(struct spi_master *master, u8 bits_per_word)
245063ab645fSStefan Brüns {
245163ab645fSStefan Brüns 	if (master->bits_per_word_mask) {
245263ab645fSStefan Brüns 		/* Only 32 bits fit in the mask */
245363ab645fSStefan Brüns 		if (bits_per_word > 32)
245463ab645fSStefan Brüns 			return -EINVAL;
245563ab645fSStefan Brüns 		if (!(master->bits_per_word_mask &
245663ab645fSStefan Brüns 				SPI_BPW_MASK(bits_per_word)))
245763ab645fSStefan Brüns 			return -EINVAL;
245863ab645fSStefan Brüns 	}
245963ab645fSStefan Brüns 
246063ab645fSStefan Brüns 	return 0;
246163ab645fSStefan Brüns }
246263ab645fSStefan Brüns 
24637d077197SDavid Brownell /**
24647d077197SDavid Brownell  * spi_setup - setup SPI mode and clock rate
24657d077197SDavid Brownell  * @spi: the device whose settings are being modified
24667d077197SDavid Brownell  * Context: can sleep, and no requests are queued to the device
24677d077197SDavid Brownell  *
24687d077197SDavid Brownell  * SPI protocol drivers may need to update the transfer mode if the
24697d077197SDavid Brownell  * device doesn't work with its default.  They may likewise need
24707d077197SDavid Brownell  * to update clock rates or word sizes from initial values.  This function
24717d077197SDavid Brownell  * changes those settings, and must be called from a context that can sleep.
24727d077197SDavid Brownell  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
24737d077197SDavid Brownell  * effect the next time the device is selected and data is transferred to
24747d077197SDavid Brownell  * or from it.  When this function returns, the spi device is deselected.
24757d077197SDavid Brownell  *
24767d077197SDavid Brownell  * Note that this call will fail if the protocol driver specifies an option
24777d077197SDavid Brownell  * that the underlying controller or its driver does not support.  For
24787d077197SDavid Brownell  * example, not all hardware supports wire transfers using nine bit words,
24797d077197SDavid Brownell  * LSB-first wire encoding, or active-high chipselects.
248097d56dc6SJavier Martinez Canillas  *
248197d56dc6SJavier Martinez Canillas  * Return: zero on success, else a negative error code.
24827d077197SDavid Brownell  */
24837d077197SDavid Brownell int spi_setup(struct spi_device *spi)
24847d077197SDavid Brownell {
248583596fbeSGeert Uytterhoeven 	unsigned	bad_bits, ugly_bits;
24865ab8d262SAndy Shevchenko 	int		status;
24877d077197SDavid Brownell 
2488f477b7fbSwangyuhang 	/* check mode to prevent that DUAL and QUAD set at the same time
2489f477b7fbSwangyuhang 	 */
2490f477b7fbSwangyuhang 	if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
2491f477b7fbSwangyuhang 		((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
2492f477b7fbSwangyuhang 		dev_err(&spi->dev,
2493f477b7fbSwangyuhang 		"setup: can not select dual and quad at the same time\n");
2494f477b7fbSwangyuhang 		return -EINVAL;
2495f477b7fbSwangyuhang 	}
2496f477b7fbSwangyuhang 	/* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
2497f477b7fbSwangyuhang 	 */
2498f477b7fbSwangyuhang 	if ((spi->mode & SPI_3WIRE) && (spi->mode &
2499f477b7fbSwangyuhang 		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
2500f477b7fbSwangyuhang 		return -EINVAL;
2501e7db06b5SDavid Brownell 	/* help drivers fail *cleanly* when they need options
2502e7db06b5SDavid Brownell 	 * that aren't supported with their current master
2503e7db06b5SDavid Brownell 	 */
2504e7db06b5SDavid Brownell 	bad_bits = spi->mode & ~spi->master->mode_bits;
250583596fbeSGeert Uytterhoeven 	ugly_bits = bad_bits &
250683596fbeSGeert Uytterhoeven 		    (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
250783596fbeSGeert Uytterhoeven 	if (ugly_bits) {
250883596fbeSGeert Uytterhoeven 		dev_warn(&spi->dev,
250983596fbeSGeert Uytterhoeven 			 "setup: ignoring unsupported mode bits %x\n",
251083596fbeSGeert Uytterhoeven 			 ugly_bits);
251183596fbeSGeert Uytterhoeven 		spi->mode &= ~ugly_bits;
251283596fbeSGeert Uytterhoeven 		bad_bits &= ~ugly_bits;
251383596fbeSGeert Uytterhoeven 	}
2514e7db06b5SDavid Brownell 	if (bad_bits) {
2515eb288a1fSLinus Walleij 		dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
2516e7db06b5SDavid Brownell 			bad_bits);
2517e7db06b5SDavid Brownell 		return -EINVAL;
2518e7db06b5SDavid Brownell 	}
2519e7db06b5SDavid Brownell 
25207d077197SDavid Brownell 	if (!spi->bits_per_word)
25217d077197SDavid Brownell 		spi->bits_per_word = 8;
25227d077197SDavid Brownell 
25235ab8d262SAndy Shevchenko 	status = __spi_validate_bits_per_word(spi->master, spi->bits_per_word);
25245ab8d262SAndy Shevchenko 	if (status)
25255ab8d262SAndy Shevchenko 		return status;
252663ab645fSStefan Brüns 
2527052eb2d4SAxel Lin 	if (!spi->max_speed_hz)
2528052eb2d4SAxel Lin 		spi->max_speed_hz = spi->master->max_speed_hz;
2529052eb2d4SAxel Lin 
2530caae070cSLaxman Dewangan 	if (spi->master->setup)
25317d077197SDavid Brownell 		status = spi->master->setup(spi);
25327d077197SDavid Brownell 
2533abeedb01SFranklin S Cooper Jr 	spi_set_cs(spi, false);
2534abeedb01SFranklin S Cooper Jr 
25355fe5f05eSJingoo Han 	dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
25367d077197SDavid Brownell 			(int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
25377d077197SDavid Brownell 			(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
25387d077197SDavid Brownell 			(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
25397d077197SDavid Brownell 			(spi->mode & SPI_3WIRE) ? "3wire, " : "",
25407d077197SDavid Brownell 			(spi->mode & SPI_LOOP) ? "loopback, " : "",
25417d077197SDavid Brownell 			spi->bits_per_word, spi->max_speed_hz,
25427d077197SDavid Brownell 			status);
25437d077197SDavid Brownell 
25447d077197SDavid Brownell 	return status;
25457d077197SDavid Brownell }
25467d077197SDavid Brownell EXPORT_SYMBOL_GPL(spi_setup);
25477d077197SDavid Brownell 
254890808738SMark Brown static int __spi_validate(struct spi_device *spi, struct spi_message *message)
2549cf32b71eSErnst Schwab {
2550cf32b71eSErnst Schwab 	struct spi_master *master = spi->master;
2551e6811d1dSLaxman Dewangan 	struct spi_transfer *xfer;
25526ea31293SAtsushi Nemoto 	int w_size;
2553cf32b71eSErnst Schwab 
255424a0013aSMark Brown 	if (list_empty(&message->transfers))
255524a0013aSMark Brown 		return -EINVAL;
255624a0013aSMark Brown 
2557cf32b71eSErnst Schwab 	/* Half-duplex links include original MicroWire, and ones with
2558cf32b71eSErnst Schwab 	 * only one data pin like SPI_3WIRE (switches direction) or where
2559cf32b71eSErnst Schwab 	 * either MOSI or MISO is missing.  They can also be caused by
2560cf32b71eSErnst Schwab 	 * software limitations.
2561cf32b71eSErnst Schwab 	 */
2562cf32b71eSErnst Schwab 	if ((master->flags & SPI_MASTER_HALF_DUPLEX)
2563cf32b71eSErnst Schwab 			|| (spi->mode & SPI_3WIRE)) {
2564cf32b71eSErnst Schwab 		unsigned flags = master->flags;
2565cf32b71eSErnst Schwab 
2566cf32b71eSErnst Schwab 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
2567cf32b71eSErnst Schwab 			if (xfer->rx_buf && xfer->tx_buf)
2568cf32b71eSErnst Schwab 				return -EINVAL;
2569cf32b71eSErnst Schwab 			if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
2570cf32b71eSErnst Schwab 				return -EINVAL;
2571cf32b71eSErnst Schwab 			if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
2572cf32b71eSErnst Schwab 				return -EINVAL;
2573cf32b71eSErnst Schwab 		}
2574cf32b71eSErnst Schwab 	}
2575cf32b71eSErnst Schwab 
2576e6811d1dSLaxman Dewangan 	/**
2577059b8ffeSLaxman Dewangan 	 * Set transfer bits_per_word and max speed as spi device default if
2578059b8ffeSLaxman Dewangan 	 * it is not set for this transfer.
2579f477b7fbSwangyuhang 	 * Set transfer tx_nbits and rx_nbits as single transfer default
2580f477b7fbSwangyuhang 	 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
2581e6811d1dSLaxman Dewangan 	 */
258277e80588SMartin Sperl 	message->frame_length = 0;
2583e6811d1dSLaxman Dewangan 	list_for_each_entry(xfer, &message->transfers, transfer_list) {
2584078726ceSSourav Poddar 		message->frame_length += xfer->len;
2585e6811d1dSLaxman Dewangan 		if (!xfer->bits_per_word)
2586e6811d1dSLaxman Dewangan 			xfer->bits_per_word = spi->bits_per_word;
2587a6f87fadSAxel Lin 
2588a6f87fadSAxel Lin 		if (!xfer->speed_hz)
2589059b8ffeSLaxman Dewangan 			xfer->speed_hz = spi->max_speed_hz;
25907dc9fbc3SMark Brown 		if (!xfer->speed_hz)
25917dc9fbc3SMark Brown 			xfer->speed_hz = master->max_speed_hz;
2592a6f87fadSAxel Lin 
259356ede94aSGabor Juhos 		if (master->max_speed_hz &&
259456ede94aSGabor Juhos 		    xfer->speed_hz > master->max_speed_hz)
259556ede94aSGabor Juhos 			xfer->speed_hz = master->max_speed_hz;
259656ede94aSGabor Juhos 
259763ab645fSStefan Brüns 		if (__spi_validate_bits_per_word(master, xfer->bits_per_word))
2598543bb255SStephen Warren 			return -EINVAL;
2599a2fd4f9fSMark Brown 
26004d94bd21SIvan T. Ivanov 		/*
26014d94bd21SIvan T. Ivanov 		 * SPI transfer length should be multiple of SPI word size
26024d94bd21SIvan T. Ivanov 		 * where SPI word size should be power-of-two multiple
26034d94bd21SIvan T. Ivanov 		 */
26044d94bd21SIvan T. Ivanov 		if (xfer->bits_per_word <= 8)
26054d94bd21SIvan T. Ivanov 			w_size = 1;
26064d94bd21SIvan T. Ivanov 		else if (xfer->bits_per_word <= 16)
26074d94bd21SIvan T. Ivanov 			w_size = 2;
26084d94bd21SIvan T. Ivanov 		else
26094d94bd21SIvan T. Ivanov 			w_size = 4;
26104d94bd21SIvan T. Ivanov 
26114d94bd21SIvan T. Ivanov 		/* No partial transfers accepted */
26126ea31293SAtsushi Nemoto 		if (xfer->len % w_size)
26134d94bd21SIvan T. Ivanov 			return -EINVAL;
26144d94bd21SIvan T. Ivanov 
2615a2fd4f9fSMark Brown 		if (xfer->speed_hz && master->min_speed_hz &&
2616a2fd4f9fSMark Brown 		    xfer->speed_hz < master->min_speed_hz)
2617a2fd4f9fSMark Brown 			return -EINVAL;
2618f477b7fbSwangyuhang 
2619f477b7fbSwangyuhang 		if (xfer->tx_buf && !xfer->tx_nbits)
2620f477b7fbSwangyuhang 			xfer->tx_nbits = SPI_NBITS_SINGLE;
2621f477b7fbSwangyuhang 		if (xfer->rx_buf && !xfer->rx_nbits)
2622f477b7fbSwangyuhang 			xfer->rx_nbits = SPI_NBITS_SINGLE;
2623f477b7fbSwangyuhang 		/* check transfer tx/rx_nbits:
26241afd9989SGeert Uytterhoeven 		 * 1. check the value matches one of single, dual and quad
26251afd9989SGeert Uytterhoeven 		 * 2. check tx/rx_nbits match the mode in spi_device
2626f477b7fbSwangyuhang 		 */
2627db90a441SSourav Poddar 		if (xfer->tx_buf) {
2628f477b7fbSwangyuhang 			if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
2629f477b7fbSwangyuhang 				xfer->tx_nbits != SPI_NBITS_DUAL &&
2630f477b7fbSwangyuhang 				xfer->tx_nbits != SPI_NBITS_QUAD)
2631a2fd4f9fSMark Brown 				return -EINVAL;
2632f477b7fbSwangyuhang 			if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
2633f477b7fbSwangyuhang 				!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2634f477b7fbSwangyuhang 				return -EINVAL;
2635f477b7fbSwangyuhang 			if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
2636f477b7fbSwangyuhang 				!(spi->mode & SPI_TX_QUAD))
2637f477b7fbSwangyuhang 				return -EINVAL;
2638db90a441SSourav Poddar 		}
2639f477b7fbSwangyuhang 		/* check transfer rx_nbits */
2640db90a441SSourav Poddar 		if (xfer->rx_buf) {
2641f477b7fbSwangyuhang 			if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
2642f477b7fbSwangyuhang 				xfer->rx_nbits != SPI_NBITS_DUAL &&
2643f477b7fbSwangyuhang 				xfer->rx_nbits != SPI_NBITS_QUAD)
2644f477b7fbSwangyuhang 				return -EINVAL;
2645f477b7fbSwangyuhang 			if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
2646f477b7fbSwangyuhang 				!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2647f477b7fbSwangyuhang 				return -EINVAL;
2648f477b7fbSwangyuhang 			if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
2649f477b7fbSwangyuhang 				!(spi->mode & SPI_RX_QUAD))
2650f477b7fbSwangyuhang 				return -EINVAL;
2651e6811d1dSLaxman Dewangan 		}
2652e6811d1dSLaxman Dewangan 	}
2653e6811d1dSLaxman Dewangan 
2654cf32b71eSErnst Schwab 	message->status = -EINPROGRESS;
265590808738SMark Brown 
265690808738SMark Brown 	return 0;
265790808738SMark Brown }
265890808738SMark Brown 
265990808738SMark Brown static int __spi_async(struct spi_device *spi, struct spi_message *message)
266090808738SMark Brown {
266190808738SMark Brown 	struct spi_master *master = spi->master;
266290808738SMark Brown 
266390808738SMark Brown 	message->spi = spi;
266490808738SMark Brown 
2665eca2ebc7SMartin Sperl 	SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_async);
2666eca2ebc7SMartin Sperl 	SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
2667eca2ebc7SMartin Sperl 
266890808738SMark Brown 	trace_spi_message_submit(message);
266990808738SMark Brown 
2670cf32b71eSErnst Schwab 	return master->transfer(spi, message);
2671cf32b71eSErnst Schwab }
2672cf32b71eSErnst Schwab 
2673568d0697SDavid Brownell /**
2674568d0697SDavid Brownell  * spi_async - asynchronous SPI transfer
2675568d0697SDavid Brownell  * @spi: device with which data will be exchanged
2676568d0697SDavid Brownell  * @message: describes the data transfers, including completion callback
2677568d0697SDavid Brownell  * Context: any (irqs may be blocked, etc)
2678568d0697SDavid Brownell  *
2679568d0697SDavid Brownell  * This call may be used in_irq and other contexts which can't sleep,
2680568d0697SDavid Brownell  * as well as from task contexts which can sleep.
2681568d0697SDavid Brownell  *
2682568d0697SDavid Brownell  * The completion callback is invoked in a context which can't sleep.
2683568d0697SDavid Brownell  * Before that invocation, the value of message->status is undefined.
2684568d0697SDavid Brownell  * When the callback is issued, message->status holds either zero (to
2685568d0697SDavid Brownell  * indicate complete success) or a negative error code.  After that
2686568d0697SDavid Brownell  * callback returns, the driver which issued the transfer request may
2687568d0697SDavid Brownell  * deallocate the associated memory; it's no longer in use by any SPI
2688568d0697SDavid Brownell  * core or controller driver code.
2689568d0697SDavid Brownell  *
2690568d0697SDavid Brownell  * Note that although all messages to a spi_device are handled in
2691568d0697SDavid Brownell  * FIFO order, messages may go to different devices in other orders.
2692568d0697SDavid Brownell  * Some device might be higher priority, or have various "hard" access
2693568d0697SDavid Brownell  * time requirements, for example.
2694568d0697SDavid Brownell  *
2695568d0697SDavid Brownell  * On detection of any fault during the transfer, processing of
2696568d0697SDavid Brownell  * the entire message is aborted, and the device is deselected.
2697568d0697SDavid Brownell  * Until returning from the associated message completion callback,
2698568d0697SDavid Brownell  * no other spi_message queued to that device will be processed.
2699568d0697SDavid Brownell  * (This rule applies equally to all the synchronous transfer calls,
2700568d0697SDavid Brownell  * which are wrappers around this core asynchronous primitive.)
270197d56dc6SJavier Martinez Canillas  *
270297d56dc6SJavier Martinez Canillas  * Return: zero on success, else a negative error code.
2703568d0697SDavid Brownell  */
2704568d0697SDavid Brownell int spi_async(struct spi_device *spi, struct spi_message *message)
2705568d0697SDavid Brownell {
2706568d0697SDavid Brownell 	struct spi_master *master = spi->master;
2707cf32b71eSErnst Schwab 	int ret;
2708cf32b71eSErnst Schwab 	unsigned long flags;
2709568d0697SDavid Brownell 
271090808738SMark Brown 	ret = __spi_validate(spi, message);
271190808738SMark Brown 	if (ret != 0)
271290808738SMark Brown 		return ret;
271390808738SMark Brown 
2714cf32b71eSErnst Schwab 	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2715568d0697SDavid Brownell 
2716cf32b71eSErnst Schwab 	if (master->bus_lock_flag)
2717cf32b71eSErnst Schwab 		ret = -EBUSY;
2718cf32b71eSErnst Schwab 	else
2719cf32b71eSErnst Schwab 		ret = __spi_async(spi, message);
2720568d0697SDavid Brownell 
2721cf32b71eSErnst Schwab 	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2722cf32b71eSErnst Schwab 
2723cf32b71eSErnst Schwab 	return ret;
2724568d0697SDavid Brownell }
2725568d0697SDavid Brownell EXPORT_SYMBOL_GPL(spi_async);
2726568d0697SDavid Brownell 
2727cf32b71eSErnst Schwab /**
2728cf32b71eSErnst Schwab  * spi_async_locked - version of spi_async with exclusive bus usage
2729cf32b71eSErnst Schwab  * @spi: device with which data will be exchanged
2730cf32b71eSErnst Schwab  * @message: describes the data transfers, including completion callback
2731cf32b71eSErnst Schwab  * Context: any (irqs may be blocked, etc)
2732cf32b71eSErnst Schwab  *
2733cf32b71eSErnst Schwab  * This call may be used in_irq and other contexts which can't sleep,
2734cf32b71eSErnst Schwab  * as well as from task contexts which can sleep.
2735cf32b71eSErnst Schwab  *
2736cf32b71eSErnst Schwab  * The completion callback is invoked in a context which can't sleep.
2737cf32b71eSErnst Schwab  * Before that invocation, the value of message->status is undefined.
2738cf32b71eSErnst Schwab  * When the callback is issued, message->status holds either zero (to
2739cf32b71eSErnst Schwab  * indicate complete success) or a negative error code.  After that
2740cf32b71eSErnst Schwab  * callback returns, the driver which issued the transfer request may
2741cf32b71eSErnst Schwab  * deallocate the associated memory; it's no longer in use by any SPI
2742cf32b71eSErnst Schwab  * core or controller driver code.
2743cf32b71eSErnst Schwab  *
2744cf32b71eSErnst Schwab  * Note that although all messages to a spi_device are handled in
2745cf32b71eSErnst Schwab  * FIFO order, messages may go to different devices in other orders.
2746cf32b71eSErnst Schwab  * Some device might be higher priority, or have various "hard" access
2747cf32b71eSErnst Schwab  * time requirements, for example.
2748cf32b71eSErnst Schwab  *
2749cf32b71eSErnst Schwab  * On detection of any fault during the transfer, processing of
2750cf32b71eSErnst Schwab  * the entire message is aborted, and the device is deselected.
2751cf32b71eSErnst Schwab  * Until returning from the associated message completion callback,
2752cf32b71eSErnst Schwab  * no other spi_message queued to that device will be processed.
2753cf32b71eSErnst Schwab  * (This rule applies equally to all the synchronous transfer calls,
2754cf32b71eSErnst Schwab  * which are wrappers around this core asynchronous primitive.)
275597d56dc6SJavier Martinez Canillas  *
275697d56dc6SJavier Martinez Canillas  * Return: zero on success, else a negative error code.
2757cf32b71eSErnst Schwab  */
2758cf32b71eSErnst Schwab int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2759cf32b71eSErnst Schwab {
2760cf32b71eSErnst Schwab 	struct spi_master *master = spi->master;
2761cf32b71eSErnst Schwab 	int ret;
2762cf32b71eSErnst Schwab 	unsigned long flags;
2763cf32b71eSErnst Schwab 
276490808738SMark Brown 	ret = __spi_validate(spi, message);
276590808738SMark Brown 	if (ret != 0)
276690808738SMark Brown 		return ret;
276790808738SMark Brown 
2768cf32b71eSErnst Schwab 	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2769cf32b71eSErnst Schwab 
2770cf32b71eSErnst Schwab 	ret = __spi_async(spi, message);
2771cf32b71eSErnst Schwab 
2772cf32b71eSErnst Schwab 	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2773cf32b71eSErnst Schwab 
2774cf32b71eSErnst Schwab 	return ret;
2775cf32b71eSErnst Schwab 
2776cf32b71eSErnst Schwab }
2777cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_async_locked);
2778cf32b71eSErnst Schwab 
27797d077197SDavid Brownell 
2780556351f1SVignesh R int spi_flash_read(struct spi_device *spi,
2781556351f1SVignesh R 		   struct spi_flash_read_message *msg)
2782556351f1SVignesh R 
2783556351f1SVignesh R {
2784556351f1SVignesh R 	struct spi_master *master = spi->master;
2785f4502dd1SVignesh R 	struct device *rx_dev = NULL;
2786556351f1SVignesh R 	int ret;
2787556351f1SVignesh R 
2788556351f1SVignesh R 	if ((msg->opcode_nbits == SPI_NBITS_DUAL ||
2789556351f1SVignesh R 	     msg->addr_nbits == SPI_NBITS_DUAL) &&
2790556351f1SVignesh R 	    !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2791556351f1SVignesh R 		return -EINVAL;
2792556351f1SVignesh R 	if ((msg->opcode_nbits == SPI_NBITS_QUAD ||
2793556351f1SVignesh R 	     msg->addr_nbits == SPI_NBITS_QUAD) &&
2794556351f1SVignesh R 	    !(spi->mode & SPI_TX_QUAD))
2795556351f1SVignesh R 		return -EINVAL;
2796556351f1SVignesh R 	if (msg->data_nbits == SPI_NBITS_DUAL &&
2797556351f1SVignesh R 	    !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2798556351f1SVignesh R 		return -EINVAL;
2799556351f1SVignesh R 	if (msg->data_nbits == SPI_NBITS_QUAD &&
2800556351f1SVignesh R 	    !(spi->mode &  SPI_RX_QUAD))
2801556351f1SVignesh R 		return -EINVAL;
2802556351f1SVignesh R 
2803556351f1SVignesh R 	if (master->auto_runtime_pm) {
2804556351f1SVignesh R 		ret = pm_runtime_get_sync(master->dev.parent);
2805556351f1SVignesh R 		if (ret < 0) {
2806556351f1SVignesh R 			dev_err(&master->dev, "Failed to power device: %d\n",
2807556351f1SVignesh R 				ret);
2808556351f1SVignesh R 			return ret;
2809556351f1SVignesh R 		}
2810556351f1SVignesh R 	}
2811f4502dd1SVignesh R 
2812556351f1SVignesh R 	mutex_lock(&master->bus_lock_mutex);
2813ef4d96ecSMark Brown 	mutex_lock(&master->io_mutex);
2814f4502dd1SVignesh R 	if (master->dma_rx) {
2815f4502dd1SVignesh R 		rx_dev = master->dma_rx->device->dev;
2816f4502dd1SVignesh R 		ret = spi_map_buf(master, rx_dev, &msg->rx_sg,
2817f4502dd1SVignesh R 				  msg->buf, msg->len,
2818f4502dd1SVignesh R 				  DMA_FROM_DEVICE);
2819f4502dd1SVignesh R 		if (!ret)
2820f4502dd1SVignesh R 			msg->cur_msg_mapped = true;
2821f4502dd1SVignesh R 	}
2822556351f1SVignesh R 	ret = master->spi_flash_read(spi, msg);
2823f4502dd1SVignesh R 	if (msg->cur_msg_mapped)
2824f4502dd1SVignesh R 		spi_unmap_buf(master, rx_dev, &msg->rx_sg,
2825f4502dd1SVignesh R 			      DMA_FROM_DEVICE);
2826ef4d96ecSMark Brown 	mutex_unlock(&master->io_mutex);
2827556351f1SVignesh R 	mutex_unlock(&master->bus_lock_mutex);
2828f4502dd1SVignesh R 
2829556351f1SVignesh R 	if (master->auto_runtime_pm)
2830556351f1SVignesh R 		pm_runtime_put(master->dev.parent);
2831556351f1SVignesh R 
2832556351f1SVignesh R 	return ret;
2833556351f1SVignesh R }
2834556351f1SVignesh R EXPORT_SYMBOL_GPL(spi_flash_read);
2835556351f1SVignesh R 
28367d077197SDavid Brownell /*-------------------------------------------------------------------------*/
28377d077197SDavid Brownell 
28387d077197SDavid Brownell /* Utility methods for SPI master protocol drivers, layered on
28397d077197SDavid Brownell  * top of the core.  Some other utility methods are defined as
28407d077197SDavid Brownell  * inline functions.
28417d077197SDavid Brownell  */
28427d077197SDavid Brownell 
28435d870c8eSAndrew Morton static void spi_complete(void *arg)
28445d870c8eSAndrew Morton {
28455d870c8eSAndrew Morton 	complete(arg);
28465d870c8eSAndrew Morton }
28475d870c8eSAndrew Morton 
2848ef4d96ecSMark Brown static int __spi_sync(struct spi_device *spi, struct spi_message *message)
2849cf32b71eSErnst Schwab {
2850cf32b71eSErnst Schwab 	DECLARE_COMPLETION_ONSTACK(done);
2851cf32b71eSErnst Schwab 	int status;
2852cf32b71eSErnst Schwab 	struct spi_master *master = spi->master;
28530461a414SMark Brown 	unsigned long flags;
28540461a414SMark Brown 
28550461a414SMark Brown 	status = __spi_validate(spi, message);
28560461a414SMark Brown 	if (status != 0)
28570461a414SMark Brown 		return status;
2858cf32b71eSErnst Schwab 
2859cf32b71eSErnst Schwab 	message->complete = spi_complete;
2860cf32b71eSErnst Schwab 	message->context = &done;
28610461a414SMark Brown 	message->spi = spi;
2862cf32b71eSErnst Schwab 
2863eca2ebc7SMartin Sperl 	SPI_STATISTICS_INCREMENT_FIELD(&master->statistics, spi_sync);
2864eca2ebc7SMartin Sperl 	SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
2865eca2ebc7SMartin Sperl 
28660461a414SMark Brown 	/* If we're not using the legacy transfer method then we will
28670461a414SMark Brown 	 * try to transfer in the calling context so special case.
28680461a414SMark Brown 	 * This code would be less tricky if we could remove the
28690461a414SMark Brown 	 * support for driver implemented message queues.
28700461a414SMark Brown 	 */
28710461a414SMark Brown 	if (master->transfer == spi_queued_transfer) {
28720461a414SMark Brown 		spin_lock_irqsave(&master->bus_lock_spinlock, flags);
28730461a414SMark Brown 
28740461a414SMark Brown 		trace_spi_message_submit(message);
28750461a414SMark Brown 
28760461a414SMark Brown 		status = __spi_queued_transfer(spi, message, false);
28770461a414SMark Brown 
28780461a414SMark Brown 		spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
28790461a414SMark Brown 	} else {
2880cf32b71eSErnst Schwab 		status = spi_async_locked(spi, message);
28810461a414SMark Brown 	}
2882cf32b71eSErnst Schwab 
2883cf32b71eSErnst Schwab 	if (status == 0) {
28840461a414SMark Brown 		/* Push out the messages in the calling context if we
28850461a414SMark Brown 		 * can.
28860461a414SMark Brown 		 */
2887eca2ebc7SMartin Sperl 		if (master->transfer == spi_queued_transfer) {
2888eca2ebc7SMartin Sperl 			SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2889eca2ebc7SMartin Sperl 						       spi_sync_immediate);
2890eca2ebc7SMartin Sperl 			SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
2891eca2ebc7SMartin Sperl 						       spi_sync_immediate);
2892ef4d96ecSMark Brown 			__spi_pump_messages(master, false);
2893eca2ebc7SMartin Sperl 		}
28940461a414SMark Brown 
2895cf32b71eSErnst Schwab 		wait_for_completion(&done);
2896cf32b71eSErnst Schwab 		status = message->status;
2897cf32b71eSErnst Schwab 	}
2898cf32b71eSErnst Schwab 	message->context = NULL;
2899cf32b71eSErnst Schwab 	return status;
2900cf32b71eSErnst Schwab }
2901cf32b71eSErnst Schwab 
29028ae12a0dSDavid Brownell /**
29038ae12a0dSDavid Brownell  * spi_sync - blocking/synchronous SPI data transfers
29048ae12a0dSDavid Brownell  * @spi: device with which data will be exchanged
29058ae12a0dSDavid Brownell  * @message: describes the data transfers
290633e34dc6SDavid Brownell  * Context: can sleep
29078ae12a0dSDavid Brownell  *
29088ae12a0dSDavid Brownell  * This call may only be used from a context that may sleep.  The sleep
29098ae12a0dSDavid Brownell  * is non-interruptible, and has no timeout.  Low-overhead controller
29108ae12a0dSDavid Brownell  * drivers may DMA directly into and out of the message buffers.
29118ae12a0dSDavid Brownell  *
29128ae12a0dSDavid Brownell  * Note that the SPI device's chip select is active during the message,
29138ae12a0dSDavid Brownell  * and then is normally disabled between messages.  Drivers for some
29148ae12a0dSDavid Brownell  * frequently-used devices may want to minimize costs of selecting a chip,
29158ae12a0dSDavid Brownell  * by leaving it selected in anticipation that the next message will go
29168ae12a0dSDavid Brownell  * to the same chip.  (That may increase power usage.)
29178ae12a0dSDavid Brownell  *
29180c868461SDavid Brownell  * Also, the caller is guaranteeing that the memory associated with the
29190c868461SDavid Brownell  * message will not be freed before this call returns.
29200c868461SDavid Brownell  *
292197d56dc6SJavier Martinez Canillas  * Return: zero on success, else a negative error code.
29228ae12a0dSDavid Brownell  */
29238ae12a0dSDavid Brownell int spi_sync(struct spi_device *spi, struct spi_message *message)
29248ae12a0dSDavid Brownell {
2925ef4d96ecSMark Brown 	int ret;
2926ef4d96ecSMark Brown 
2927ef4d96ecSMark Brown 	mutex_lock(&spi->master->bus_lock_mutex);
2928ef4d96ecSMark Brown 	ret = __spi_sync(spi, message);
2929ef4d96ecSMark Brown 	mutex_unlock(&spi->master->bus_lock_mutex);
2930ef4d96ecSMark Brown 
2931ef4d96ecSMark Brown 	return ret;
29328ae12a0dSDavid Brownell }
29338ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_sync);
29348ae12a0dSDavid Brownell 
2935cf32b71eSErnst Schwab /**
2936cf32b71eSErnst Schwab  * spi_sync_locked - version of spi_sync with exclusive bus usage
2937cf32b71eSErnst Schwab  * @spi: device with which data will be exchanged
2938cf32b71eSErnst Schwab  * @message: describes the data transfers
2939cf32b71eSErnst Schwab  * Context: can sleep
2940cf32b71eSErnst Schwab  *
2941cf32b71eSErnst Schwab  * This call may only be used from a context that may sleep.  The sleep
2942cf32b71eSErnst Schwab  * is non-interruptible, and has no timeout.  Low-overhead controller
2943cf32b71eSErnst Schwab  * drivers may DMA directly into and out of the message buffers.
2944cf32b71eSErnst Schwab  *
2945cf32b71eSErnst Schwab  * This call should be used by drivers that require exclusive access to the
294625985edcSLucas De Marchi  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
2947cf32b71eSErnst Schwab  * be released by a spi_bus_unlock call when the exclusive access is over.
2948cf32b71eSErnst Schwab  *
294997d56dc6SJavier Martinez Canillas  * Return: zero on success, else a negative error code.
2950cf32b71eSErnst Schwab  */
2951cf32b71eSErnst Schwab int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
2952cf32b71eSErnst Schwab {
2953ef4d96ecSMark Brown 	return __spi_sync(spi, message);
2954cf32b71eSErnst Schwab }
2955cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_sync_locked);
2956cf32b71eSErnst Schwab 
2957cf32b71eSErnst Schwab /**
2958cf32b71eSErnst Schwab  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
2959cf32b71eSErnst Schwab  * @master: SPI bus master that should be locked for exclusive bus access
2960cf32b71eSErnst Schwab  * Context: can sleep
2961cf32b71eSErnst Schwab  *
2962cf32b71eSErnst Schwab  * This call may only be used from a context that may sleep.  The sleep
2963cf32b71eSErnst Schwab  * is non-interruptible, and has no timeout.
2964cf32b71eSErnst Schwab  *
2965cf32b71eSErnst Schwab  * This call should be used by drivers that require exclusive access to the
2966cf32b71eSErnst Schwab  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
2967cf32b71eSErnst Schwab  * exclusive access is over. Data transfer must be done by spi_sync_locked
2968cf32b71eSErnst Schwab  * and spi_async_locked calls when the SPI bus lock is held.
2969cf32b71eSErnst Schwab  *
297097d56dc6SJavier Martinez Canillas  * Return: always zero.
2971cf32b71eSErnst Schwab  */
2972cf32b71eSErnst Schwab int spi_bus_lock(struct spi_master *master)
2973cf32b71eSErnst Schwab {
2974cf32b71eSErnst Schwab 	unsigned long flags;
2975cf32b71eSErnst Schwab 
2976cf32b71eSErnst Schwab 	mutex_lock(&master->bus_lock_mutex);
2977cf32b71eSErnst Schwab 
2978cf32b71eSErnst Schwab 	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2979cf32b71eSErnst Schwab 	master->bus_lock_flag = 1;
2980cf32b71eSErnst Schwab 	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2981cf32b71eSErnst Schwab 
2982cf32b71eSErnst Schwab 	/* mutex remains locked until spi_bus_unlock is called */
2983cf32b71eSErnst Schwab 
2984cf32b71eSErnst Schwab 	return 0;
2985cf32b71eSErnst Schwab }
2986cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_bus_lock);
2987cf32b71eSErnst Schwab 
2988cf32b71eSErnst Schwab /**
2989cf32b71eSErnst Schwab  * spi_bus_unlock - release the lock for exclusive SPI bus usage
2990cf32b71eSErnst Schwab  * @master: SPI bus master that was locked for exclusive bus access
2991cf32b71eSErnst Schwab  * Context: can sleep
2992cf32b71eSErnst Schwab  *
2993cf32b71eSErnst Schwab  * This call may only be used from a context that may sleep.  The sleep
2994cf32b71eSErnst Schwab  * is non-interruptible, and has no timeout.
2995cf32b71eSErnst Schwab  *
2996cf32b71eSErnst Schwab  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
2997cf32b71eSErnst Schwab  * call.
2998cf32b71eSErnst Schwab  *
299997d56dc6SJavier Martinez Canillas  * Return: always zero.
3000cf32b71eSErnst Schwab  */
3001cf32b71eSErnst Schwab int spi_bus_unlock(struct spi_master *master)
3002cf32b71eSErnst Schwab {
3003cf32b71eSErnst Schwab 	master->bus_lock_flag = 0;
3004cf32b71eSErnst Schwab 
3005cf32b71eSErnst Schwab 	mutex_unlock(&master->bus_lock_mutex);
3006cf32b71eSErnst Schwab 
3007cf32b71eSErnst Schwab 	return 0;
3008cf32b71eSErnst Schwab }
3009cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_bus_unlock);
3010cf32b71eSErnst Schwab 
3011a9948b61SDavid Brownell /* portable code must never pass more than 32 bytes */
3012a9948b61SDavid Brownell #define	SPI_BUFSIZ	max(32, SMP_CACHE_BYTES)
30138ae12a0dSDavid Brownell 
30148ae12a0dSDavid Brownell static u8	*buf;
30158ae12a0dSDavid Brownell 
30168ae12a0dSDavid Brownell /**
30178ae12a0dSDavid Brownell  * spi_write_then_read - SPI synchronous write followed by read
30188ae12a0dSDavid Brownell  * @spi: device with which data will be exchanged
30198ae12a0dSDavid Brownell  * @txbuf: data to be written (need not be dma-safe)
30208ae12a0dSDavid Brownell  * @n_tx: size of txbuf, in bytes
302127570497SJiri Pirko  * @rxbuf: buffer into which data will be read (need not be dma-safe)
302227570497SJiri Pirko  * @n_rx: size of rxbuf, in bytes
302333e34dc6SDavid Brownell  * Context: can sleep
30248ae12a0dSDavid Brownell  *
30258ae12a0dSDavid Brownell  * This performs a half duplex MicroWire style transaction with the
30268ae12a0dSDavid Brownell  * device, sending txbuf and then reading rxbuf.  The return value
30278ae12a0dSDavid Brownell  * is zero for success, else a negative errno status code.
3028b885244eSDavid Brownell  * This call may only be used from a context that may sleep.
30298ae12a0dSDavid Brownell  *
30300c868461SDavid Brownell  * Parameters to this routine are always copied using a small buffer;
303133e34dc6SDavid Brownell  * portable code should never use this for more than 32 bytes.
303233e34dc6SDavid Brownell  * Performance-sensitive or bulk transfer code should instead use
30330c868461SDavid Brownell  * spi_{async,sync}() calls with dma-safe buffers.
303497d56dc6SJavier Martinez Canillas  *
303597d56dc6SJavier Martinez Canillas  * Return: zero on success, else a negative error code.
30368ae12a0dSDavid Brownell  */
30378ae12a0dSDavid Brownell int spi_write_then_read(struct spi_device *spi,
30380c4a1590SMark Brown 		const void *txbuf, unsigned n_tx,
30390c4a1590SMark Brown 		void *rxbuf, unsigned n_rx)
30408ae12a0dSDavid Brownell {
3041068f4070SDavid Brownell 	static DEFINE_MUTEX(lock);
30428ae12a0dSDavid Brownell 
30438ae12a0dSDavid Brownell 	int			status;
30448ae12a0dSDavid Brownell 	struct spi_message	message;
3045bdff549eSDavid Brownell 	struct spi_transfer	x[2];
30468ae12a0dSDavid Brownell 	u8			*local_buf;
30478ae12a0dSDavid Brownell 
3048b3a223eeSMark Brown 	/* Use preallocated DMA-safe buffer if we can.  We can't avoid
3049b3a223eeSMark Brown 	 * copying here, (as a pure convenience thing), but we can
3050b3a223eeSMark Brown 	 * keep heap costs out of the hot path unless someone else is
3051b3a223eeSMark Brown 	 * using the pre-allocated buffer or the transfer is too large.
30528ae12a0dSDavid Brownell 	 */
3053b3a223eeSMark Brown 	if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
30542cd94c8aSMark Brown 		local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
30552cd94c8aSMark Brown 				    GFP_KERNEL | GFP_DMA);
3056b3a223eeSMark Brown 		if (!local_buf)
3057b3a223eeSMark Brown 			return -ENOMEM;
3058b3a223eeSMark Brown 	} else {
3059b3a223eeSMark Brown 		local_buf = buf;
3060b3a223eeSMark Brown 	}
30618ae12a0dSDavid Brownell 
30628275c642SVitaly Wool 	spi_message_init(&message);
30635fe5f05eSJingoo Han 	memset(x, 0, sizeof(x));
3064bdff549eSDavid Brownell 	if (n_tx) {
3065bdff549eSDavid Brownell 		x[0].len = n_tx;
3066bdff549eSDavid Brownell 		spi_message_add_tail(&x[0], &message);
3067bdff549eSDavid Brownell 	}
3068bdff549eSDavid Brownell 	if (n_rx) {
3069bdff549eSDavid Brownell 		x[1].len = n_rx;
3070bdff549eSDavid Brownell 		spi_message_add_tail(&x[1], &message);
3071bdff549eSDavid Brownell 	}
30728275c642SVitaly Wool 
30738ae12a0dSDavid Brownell 	memcpy(local_buf, txbuf, n_tx);
3074bdff549eSDavid Brownell 	x[0].tx_buf = local_buf;
3075bdff549eSDavid Brownell 	x[1].rx_buf = local_buf + n_tx;
30768ae12a0dSDavid Brownell 
30778ae12a0dSDavid Brownell 	/* do the i/o */
30788ae12a0dSDavid Brownell 	status = spi_sync(spi, &message);
30799b938b74SMarc Pignat 	if (status == 0)
3080bdff549eSDavid Brownell 		memcpy(rxbuf, x[1].rx_buf, n_rx);
30818ae12a0dSDavid Brownell 
3082bdff549eSDavid Brownell 	if (x[0].tx_buf == buf)
3083068f4070SDavid Brownell 		mutex_unlock(&lock);
30848ae12a0dSDavid Brownell 	else
30858ae12a0dSDavid Brownell 		kfree(local_buf);
30868ae12a0dSDavid Brownell 
30878ae12a0dSDavid Brownell 	return status;
30888ae12a0dSDavid Brownell }
30898ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_write_then_read);
30908ae12a0dSDavid Brownell 
30918ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/
30928ae12a0dSDavid Brownell 
3093ce79d54aSPantelis Antoniou #if IS_ENABLED(CONFIG_OF_DYNAMIC)
3094ce79d54aSPantelis Antoniou static int __spi_of_device_match(struct device *dev, void *data)
3095ce79d54aSPantelis Antoniou {
3096ce79d54aSPantelis Antoniou 	return dev->of_node == data;
3097ce79d54aSPantelis Antoniou }
3098ce79d54aSPantelis Antoniou 
3099ce79d54aSPantelis Antoniou /* must call put_device() when done with returned spi_device device */
3100ce79d54aSPantelis Antoniou static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
3101ce79d54aSPantelis Antoniou {
3102ce79d54aSPantelis Antoniou 	struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
3103ce79d54aSPantelis Antoniou 						__spi_of_device_match);
3104ce79d54aSPantelis Antoniou 	return dev ? to_spi_device(dev) : NULL;
3105ce79d54aSPantelis Antoniou }
3106ce79d54aSPantelis Antoniou 
3107ce79d54aSPantelis Antoniou static int __spi_of_master_match(struct device *dev, const void *data)
3108ce79d54aSPantelis Antoniou {
3109ce79d54aSPantelis Antoniou 	return dev->of_node == data;
3110ce79d54aSPantelis Antoniou }
3111ce79d54aSPantelis Antoniou 
3112ce79d54aSPantelis Antoniou /* the spi masters are not using spi_bus, so we find it with another way */
3113ce79d54aSPantelis Antoniou static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
3114ce79d54aSPantelis Antoniou {
3115ce79d54aSPantelis Antoniou 	struct device *dev;
3116ce79d54aSPantelis Antoniou 
3117ce79d54aSPantelis Antoniou 	dev = class_find_device(&spi_master_class, NULL, node,
3118ce79d54aSPantelis Antoniou 				__spi_of_master_match);
3119ce79d54aSPantelis Antoniou 	if (!dev)
3120ce79d54aSPantelis Antoniou 		return NULL;
3121ce79d54aSPantelis Antoniou 
3122ce79d54aSPantelis Antoniou 	/* reference got in class_find_device */
3123ce79d54aSPantelis Antoniou 	return container_of(dev, struct spi_master, dev);
3124ce79d54aSPantelis Antoniou }
3125ce79d54aSPantelis Antoniou 
3126ce79d54aSPantelis Antoniou static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3127ce79d54aSPantelis Antoniou 			 void *arg)
3128ce79d54aSPantelis Antoniou {
3129ce79d54aSPantelis Antoniou 	struct of_reconfig_data *rd = arg;
3130ce79d54aSPantelis Antoniou 	struct spi_master *master;
3131ce79d54aSPantelis Antoniou 	struct spi_device *spi;
3132ce79d54aSPantelis Antoniou 
3133ce79d54aSPantelis Antoniou 	switch (of_reconfig_get_state_change(action, arg)) {
3134ce79d54aSPantelis Antoniou 	case OF_RECONFIG_CHANGE_ADD:
3135ce79d54aSPantelis Antoniou 		master = of_find_spi_master_by_node(rd->dn->parent);
3136ce79d54aSPantelis Antoniou 		if (master == NULL)
3137ce79d54aSPantelis Antoniou 			return NOTIFY_OK;	/* not for us */
3138ce79d54aSPantelis Antoniou 
3139bd6c1644SGeert Uytterhoeven 		if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
3140bd6c1644SGeert Uytterhoeven 			put_device(&master->dev);
3141bd6c1644SGeert Uytterhoeven 			return NOTIFY_OK;
3142bd6c1644SGeert Uytterhoeven 		}
3143bd6c1644SGeert Uytterhoeven 
3144ce79d54aSPantelis Antoniou 		spi = of_register_spi_device(master, rd->dn);
3145ce79d54aSPantelis Antoniou 		put_device(&master->dev);
3146ce79d54aSPantelis Antoniou 
3147ce79d54aSPantelis Antoniou 		if (IS_ERR(spi)) {
3148ce79d54aSPantelis Antoniou 			pr_err("%s: failed to create for '%s'\n",
3149ce79d54aSPantelis Antoniou 					__func__, rd->dn->full_name);
3150e0af98a7SRalf Ramsauer 			of_node_clear_flag(rd->dn, OF_POPULATED);
3151ce79d54aSPantelis Antoniou 			return notifier_from_errno(PTR_ERR(spi));
3152ce79d54aSPantelis Antoniou 		}
3153ce79d54aSPantelis Antoniou 		break;
3154ce79d54aSPantelis Antoniou 
3155ce79d54aSPantelis Antoniou 	case OF_RECONFIG_CHANGE_REMOVE:
3156bd6c1644SGeert Uytterhoeven 		/* already depopulated? */
3157bd6c1644SGeert Uytterhoeven 		if (!of_node_check_flag(rd->dn, OF_POPULATED))
3158bd6c1644SGeert Uytterhoeven 			return NOTIFY_OK;
3159bd6c1644SGeert Uytterhoeven 
3160ce79d54aSPantelis Antoniou 		/* find our device by node */
3161ce79d54aSPantelis Antoniou 		spi = of_find_spi_device_by_node(rd->dn);
3162ce79d54aSPantelis Antoniou 		if (spi == NULL)
3163ce79d54aSPantelis Antoniou 			return NOTIFY_OK;	/* no? not meant for us */
3164ce79d54aSPantelis Antoniou 
3165ce79d54aSPantelis Antoniou 		/* unregister takes one ref away */
3166ce79d54aSPantelis Antoniou 		spi_unregister_device(spi);
3167ce79d54aSPantelis Antoniou 
3168ce79d54aSPantelis Antoniou 		/* and put the reference of the find */
3169ce79d54aSPantelis Antoniou 		put_device(&spi->dev);
3170ce79d54aSPantelis Antoniou 		break;
3171ce79d54aSPantelis Antoniou 	}
3172ce79d54aSPantelis Antoniou 
3173ce79d54aSPantelis Antoniou 	return NOTIFY_OK;
3174ce79d54aSPantelis Antoniou }
3175ce79d54aSPantelis Antoniou 
3176ce79d54aSPantelis Antoniou static struct notifier_block spi_of_notifier = {
3177ce79d54aSPantelis Antoniou 	.notifier_call = of_spi_notify,
3178ce79d54aSPantelis Antoniou };
3179ce79d54aSPantelis Antoniou #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3180ce79d54aSPantelis Antoniou extern struct notifier_block spi_of_notifier;
3181ce79d54aSPantelis Antoniou #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
3182ce79d54aSPantelis Antoniou 
31837f24467fSOctavian Purdila #if IS_ENABLED(CONFIG_ACPI)
31847f24467fSOctavian Purdila static int spi_acpi_master_match(struct device *dev, const void *data)
31857f24467fSOctavian Purdila {
31867f24467fSOctavian Purdila 	return ACPI_COMPANION(dev->parent) == data;
31877f24467fSOctavian Purdila }
31887f24467fSOctavian Purdila 
31897f24467fSOctavian Purdila static int spi_acpi_device_match(struct device *dev, void *data)
31907f24467fSOctavian Purdila {
31917f24467fSOctavian Purdila 	return ACPI_COMPANION(dev) == data;
31927f24467fSOctavian Purdila }
31937f24467fSOctavian Purdila 
31947f24467fSOctavian Purdila static struct spi_master *acpi_spi_find_master_by_adev(struct acpi_device *adev)
31957f24467fSOctavian Purdila {
31967f24467fSOctavian Purdila 	struct device *dev;
31977f24467fSOctavian Purdila 
31987f24467fSOctavian Purdila 	dev = class_find_device(&spi_master_class, NULL, adev,
31997f24467fSOctavian Purdila 				spi_acpi_master_match);
32007f24467fSOctavian Purdila 	if (!dev)
32017f24467fSOctavian Purdila 		return NULL;
32027f24467fSOctavian Purdila 
32037f24467fSOctavian Purdila 	return container_of(dev, struct spi_master, dev);
32047f24467fSOctavian Purdila }
32057f24467fSOctavian Purdila 
32067f24467fSOctavian Purdila static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
32077f24467fSOctavian Purdila {
32087f24467fSOctavian Purdila 	struct device *dev;
32097f24467fSOctavian Purdila 
32107f24467fSOctavian Purdila 	dev = bus_find_device(&spi_bus_type, NULL, adev, spi_acpi_device_match);
32117f24467fSOctavian Purdila 
32127f24467fSOctavian Purdila 	return dev ? to_spi_device(dev) : NULL;
32137f24467fSOctavian Purdila }
32147f24467fSOctavian Purdila 
32157f24467fSOctavian Purdila static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
32167f24467fSOctavian Purdila 			   void *arg)
32177f24467fSOctavian Purdila {
32187f24467fSOctavian Purdila 	struct acpi_device *adev = arg;
32197f24467fSOctavian Purdila 	struct spi_master *master;
32207f24467fSOctavian Purdila 	struct spi_device *spi;
32217f24467fSOctavian Purdila 
32227f24467fSOctavian Purdila 	switch (value) {
32237f24467fSOctavian Purdila 	case ACPI_RECONFIG_DEVICE_ADD:
32247f24467fSOctavian Purdila 		master = acpi_spi_find_master_by_adev(adev->parent);
32257f24467fSOctavian Purdila 		if (!master)
32267f24467fSOctavian Purdila 			break;
32277f24467fSOctavian Purdila 
32287f24467fSOctavian Purdila 		acpi_register_spi_device(master, adev);
32297f24467fSOctavian Purdila 		put_device(&master->dev);
32307f24467fSOctavian Purdila 		break;
32317f24467fSOctavian Purdila 	case ACPI_RECONFIG_DEVICE_REMOVE:
32327f24467fSOctavian Purdila 		if (!acpi_device_enumerated(adev))
32337f24467fSOctavian Purdila 			break;
32347f24467fSOctavian Purdila 
32357f24467fSOctavian Purdila 		spi = acpi_spi_find_device_by_adev(adev);
32367f24467fSOctavian Purdila 		if (!spi)
32377f24467fSOctavian Purdila 			break;
32387f24467fSOctavian Purdila 
32397f24467fSOctavian Purdila 		spi_unregister_device(spi);
32407f24467fSOctavian Purdila 		put_device(&spi->dev);
32417f24467fSOctavian Purdila 		break;
32427f24467fSOctavian Purdila 	}
32437f24467fSOctavian Purdila 
32447f24467fSOctavian Purdila 	return NOTIFY_OK;
32457f24467fSOctavian Purdila }
32467f24467fSOctavian Purdila 
32477f24467fSOctavian Purdila static struct notifier_block spi_acpi_notifier = {
32487f24467fSOctavian Purdila 	.notifier_call = acpi_spi_notify,
32497f24467fSOctavian Purdila };
32507f24467fSOctavian Purdila #else
32517f24467fSOctavian Purdila extern struct notifier_block spi_acpi_notifier;
32527f24467fSOctavian Purdila #endif
32537f24467fSOctavian Purdila 
32548ae12a0dSDavid Brownell static int __init spi_init(void)
32558ae12a0dSDavid Brownell {
3256b885244eSDavid Brownell 	int	status;
32578ae12a0dSDavid Brownell 
3258e94b1766SChristoph Lameter 	buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
3259b885244eSDavid Brownell 	if (!buf) {
3260b885244eSDavid Brownell 		status = -ENOMEM;
3261b885244eSDavid Brownell 		goto err0;
32628ae12a0dSDavid Brownell 	}
3263b885244eSDavid Brownell 
3264b885244eSDavid Brownell 	status = bus_register(&spi_bus_type);
3265b885244eSDavid Brownell 	if (status < 0)
3266b885244eSDavid Brownell 		goto err1;
3267b885244eSDavid Brownell 
3268b885244eSDavid Brownell 	status = class_register(&spi_master_class);
3269b885244eSDavid Brownell 	if (status < 0)
3270b885244eSDavid Brownell 		goto err2;
3271ce79d54aSPantelis Antoniou 
32725267720eSFabio Estevam 	if (IS_ENABLED(CONFIG_OF_DYNAMIC))
3273ce79d54aSPantelis Antoniou 		WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
32747f24467fSOctavian Purdila 	if (IS_ENABLED(CONFIG_ACPI))
32757f24467fSOctavian Purdila 		WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
3276ce79d54aSPantelis Antoniou 
3277b885244eSDavid Brownell 	return 0;
3278b885244eSDavid Brownell 
3279b885244eSDavid Brownell err2:
3280b885244eSDavid Brownell 	bus_unregister(&spi_bus_type);
3281b885244eSDavid Brownell err1:
3282b885244eSDavid Brownell 	kfree(buf);
3283b885244eSDavid Brownell 	buf = NULL;
3284b885244eSDavid Brownell err0:
3285b885244eSDavid Brownell 	return status;
3286b885244eSDavid Brownell }
3287b885244eSDavid Brownell 
32888ae12a0dSDavid Brownell /* board_info is normally registered in arch_initcall(),
32898ae12a0dSDavid Brownell  * but even essential drivers wait till later
3290b885244eSDavid Brownell  *
3291b885244eSDavid Brownell  * REVISIT only boardinfo really needs static linking. the rest (device and
3292b885244eSDavid Brownell  * driver registration) _could_ be dynamically linked (modular) ... costs
3293b885244eSDavid Brownell  * include needing to have boardinfo data structures be much more public.
32948ae12a0dSDavid Brownell  */
3295673c0c00SDavid Brownell postcore_initcall(spi_init);
32968ae12a0dSDavid Brownell 
3297