xref: /linux/drivers/spi/spi.c (revision c7299fea67696db5bd09d924d1f1080d894f92ef)
1b445bfcbSMarco Felsch // SPDX-License-Identifier: GPL-2.0-or-later
2787f4889SMark Brown // SPI init/core code
3787f4889SMark Brown //
4787f4889SMark Brown // Copyright (C) 2005 David Brownell
5787f4889SMark Brown // Copyright (C) 2008 Secret Lab Technologies Ltd.
68ae12a0dSDavid Brownell 
78ae12a0dSDavid Brownell #include <linux/kernel.h>
88ae12a0dSDavid Brownell #include <linux/device.h>
98ae12a0dSDavid Brownell #include <linux/init.h>
108ae12a0dSDavid Brownell #include <linux/cache.h>
1199adef31SMark Brown #include <linux/dma-mapping.h>
1299adef31SMark Brown #include <linux/dmaengine.h>
1394040828SMatthias Kaehlcke #include <linux/mutex.h>
142b7a32f7SSinan Akman #include <linux/of_device.h>
15d57a4282SGrant Likely #include <linux/of_irq.h>
1686be408bSSylwester Nawrocki #include <linux/clk/clk-conf.h>
175a0e3ad6STejun Heo #include <linux/slab.h>
18e0626e38SAnton Vorontsov #include <linux/mod_devicetable.h>
198ae12a0dSDavid Brownell #include <linux/spi/spi.h>
20b5932f5cSBoris Brezillon #include <linux/spi/spi-mem.h>
2174317984SJean-Christophe PLAGNIOL-VILLARD #include <linux/of_gpio.h>
22f3186dd8SLinus Walleij #include <linux/gpio/consumer.h>
233ae22e8cSMark Brown #include <linux/pm_runtime.h>
24f48c767cSUlf Hansson #include <linux/pm_domain.h>
25826cf175SDmitry Torokhov #include <linux/property.h>
26025ed130SPaul Gortmaker #include <linux/export.h>
278bd75c77SClark Williams #include <linux/sched/rt.h>
28ae7e81c0SIngo Molnar #include <uapi/linux/sched/types.h>
29ffbbdd21SLinus Walleij #include <linux/delay.h>
30ffbbdd21SLinus Walleij #include <linux/kthread.h>
3164bee4d2SMika Westerberg #include <linux/ioport.h>
3264bee4d2SMika Westerberg #include <linux/acpi.h>
33b1b8153cSVignesh R #include <linux/highmem.h>
349b61e302SSuniel Mahesh #include <linux/idr.h>
358a2e487eSLukas Wunner #include <linux/platform_data/x86/apple.h>
368ae12a0dSDavid Brownell 
3756ec1978SMark Brown #define CREATE_TRACE_POINTS
3856ec1978SMark Brown #include <trace/events/spi.h>
39ca1438dcSArnd Bergmann EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
40ca1438dcSArnd Bergmann EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
419b61e302SSuniel Mahesh 
4246336966SBoris Brezillon #include "internals.h"
4346336966SBoris Brezillon 
449b61e302SSuniel Mahesh static DEFINE_IDR(spi_master_idr);
4556ec1978SMark Brown 
468ae12a0dSDavid Brownell static void spidev_release(struct device *dev)
478ae12a0dSDavid Brownell {
480ffa0285SHans-Peter Nilsson 	struct spi_device	*spi = to_spi_device(dev);
498ae12a0dSDavid Brownell 
508caab75fSGeert Uytterhoeven 	spi_controller_put(spi->controller);
515039563eSTrent Piepho 	kfree(spi->driver_override);
5207a389feSRoman Tereshonkov 	kfree(spi);
538ae12a0dSDavid Brownell }
548ae12a0dSDavid Brownell 
558ae12a0dSDavid Brownell static ssize_t
568ae12a0dSDavid Brownell modalias_show(struct device *dev, struct device_attribute *a, char *buf)
578ae12a0dSDavid Brownell {
588ae12a0dSDavid Brownell 	const struct spi_device	*spi = to_spi_device(dev);
598c4ff6d0SZhang Rui 	int len;
608c4ff6d0SZhang Rui 
618c4ff6d0SZhang Rui 	len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
628c4ff6d0SZhang Rui 	if (len != -ENODEV)
638c4ff6d0SZhang Rui 		return len;
648ae12a0dSDavid Brownell 
65d8e328b3SGrant Likely 	return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
668ae12a0dSDavid Brownell }
67aa7da564SGreg Kroah-Hartman static DEVICE_ATTR_RO(modalias);
688ae12a0dSDavid Brownell 
695039563eSTrent Piepho static ssize_t driver_override_store(struct device *dev,
705039563eSTrent Piepho 				     struct device_attribute *a,
715039563eSTrent Piepho 				     const char *buf, size_t count)
725039563eSTrent Piepho {
735039563eSTrent Piepho 	struct spi_device *spi = to_spi_device(dev);
745039563eSTrent Piepho 	const char *end = memchr(buf, '\n', count);
755039563eSTrent Piepho 	const size_t len = end ? end - buf : count;
765039563eSTrent Piepho 	const char *driver_override, *old;
775039563eSTrent Piepho 
785039563eSTrent Piepho 	/* We need to keep extra room for a newline when displaying value */
795039563eSTrent Piepho 	if (len >= (PAGE_SIZE - 1))
805039563eSTrent Piepho 		return -EINVAL;
815039563eSTrent Piepho 
825039563eSTrent Piepho 	driver_override = kstrndup(buf, len, GFP_KERNEL);
835039563eSTrent Piepho 	if (!driver_override)
845039563eSTrent Piepho 		return -ENOMEM;
855039563eSTrent Piepho 
865039563eSTrent Piepho 	device_lock(dev);
875039563eSTrent Piepho 	old = spi->driver_override;
885039563eSTrent Piepho 	if (len) {
895039563eSTrent Piepho 		spi->driver_override = driver_override;
905039563eSTrent Piepho 	} else {
91be73e323SAndy Shevchenko 		/* Empty string, disable driver override */
925039563eSTrent Piepho 		spi->driver_override = NULL;
935039563eSTrent Piepho 		kfree(driver_override);
945039563eSTrent Piepho 	}
955039563eSTrent Piepho 	device_unlock(dev);
965039563eSTrent Piepho 	kfree(old);
975039563eSTrent Piepho 
985039563eSTrent Piepho 	return count;
995039563eSTrent Piepho }
1005039563eSTrent Piepho 
1015039563eSTrent Piepho static ssize_t driver_override_show(struct device *dev,
1025039563eSTrent Piepho 				    struct device_attribute *a, char *buf)
1035039563eSTrent Piepho {
1045039563eSTrent Piepho 	const struct spi_device *spi = to_spi_device(dev);
1055039563eSTrent Piepho 	ssize_t len;
1065039563eSTrent Piepho 
1075039563eSTrent Piepho 	device_lock(dev);
1085039563eSTrent Piepho 	len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
1095039563eSTrent Piepho 	device_unlock(dev);
1105039563eSTrent Piepho 	return len;
1115039563eSTrent Piepho }
1125039563eSTrent Piepho static DEVICE_ATTR_RW(driver_override);
1135039563eSTrent Piepho 
114eca2ebc7SMartin Sperl #define SPI_STATISTICS_ATTRS(field, file)				\
1158caab75fSGeert Uytterhoeven static ssize_t spi_controller_##field##_show(struct device *dev,	\
116eca2ebc7SMartin Sperl 					     struct device_attribute *attr, \
117eca2ebc7SMartin Sperl 					     char *buf)			\
118eca2ebc7SMartin Sperl {									\
1198caab75fSGeert Uytterhoeven 	struct spi_controller *ctlr = container_of(dev,			\
1208caab75fSGeert Uytterhoeven 					 struct spi_controller, dev);	\
1218caab75fSGeert Uytterhoeven 	return spi_statistics_##field##_show(&ctlr->statistics, buf);	\
122eca2ebc7SMartin Sperl }									\
1238caab75fSGeert Uytterhoeven static struct device_attribute dev_attr_spi_controller_##field = {	\
124ad25c92eSGeert Uytterhoeven 	.attr = { .name = file, .mode = 0444 },				\
1258caab75fSGeert Uytterhoeven 	.show = spi_controller_##field##_show,				\
126eca2ebc7SMartin Sperl };									\
127eca2ebc7SMartin Sperl static ssize_t spi_device_##field##_show(struct device *dev,		\
128eca2ebc7SMartin Sperl 					 struct device_attribute *attr,	\
129eca2ebc7SMartin Sperl 					char *buf)			\
130eca2ebc7SMartin Sperl {									\
131d1eba93bSGeliang Tang 	struct spi_device *spi = to_spi_device(dev);			\
132eca2ebc7SMartin Sperl 	return spi_statistics_##field##_show(&spi->statistics, buf);	\
133eca2ebc7SMartin Sperl }									\
134eca2ebc7SMartin Sperl static struct device_attribute dev_attr_spi_device_##field = {		\
135ad25c92eSGeert Uytterhoeven 	.attr = { .name = file, .mode = 0444 },				\
136eca2ebc7SMartin Sperl 	.show = spi_device_##field##_show,				\
137eca2ebc7SMartin Sperl }
138eca2ebc7SMartin Sperl 
139eca2ebc7SMartin Sperl #define SPI_STATISTICS_SHOW_NAME(name, file, field, format_string)	\
140eca2ebc7SMartin Sperl static ssize_t spi_statistics_##name##_show(struct spi_statistics *stat, \
141eca2ebc7SMartin Sperl 					    char *buf)			\
142eca2ebc7SMartin Sperl {									\
143eca2ebc7SMartin Sperl 	unsigned long flags;						\
144eca2ebc7SMartin Sperl 	ssize_t len;							\
145eca2ebc7SMartin Sperl 	spin_lock_irqsave(&stat->lock, flags);				\
146eca2ebc7SMartin Sperl 	len = sprintf(buf, format_string, stat->field);			\
147eca2ebc7SMartin Sperl 	spin_unlock_irqrestore(&stat->lock, flags);			\
148eca2ebc7SMartin Sperl 	return len;							\
149eca2ebc7SMartin Sperl }									\
150eca2ebc7SMartin Sperl SPI_STATISTICS_ATTRS(name, file)
151eca2ebc7SMartin Sperl 
152eca2ebc7SMartin Sperl #define SPI_STATISTICS_SHOW(field, format_string)			\
153eca2ebc7SMartin Sperl 	SPI_STATISTICS_SHOW_NAME(field, __stringify(field),		\
154eca2ebc7SMartin Sperl 				 field, format_string)
155eca2ebc7SMartin Sperl 
156eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(messages, "%lu");
157eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(transfers, "%lu");
158eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(errors, "%lu");
159eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(timedout, "%lu");
160eca2ebc7SMartin Sperl 
161eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_sync, "%lu");
162eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_sync_immediate, "%lu");
163eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(spi_async, "%lu");
164eca2ebc7SMartin Sperl 
165eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes, "%llu");
166eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes_rx, "%llu");
167eca2ebc7SMartin Sperl SPI_STATISTICS_SHOW(bytes_tx, "%llu");
168eca2ebc7SMartin Sperl 
1696b7bc061SMartin Sperl #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)		\
1706b7bc061SMartin Sperl 	SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,		\
1716b7bc061SMartin Sperl 				 "transfer_bytes_histo_" number,	\
1726b7bc061SMartin Sperl 				 transfer_bytes_histo[index],  "%lu")
1736b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
1746b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
1756b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
1766b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
1776b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
1786b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
1796b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
1806b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
1816b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
1826b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
1836b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
1846b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
1856b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
1866b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
1876b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
1886b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
1896b7bc061SMartin Sperl SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
1906b7bc061SMartin Sperl 
191d9f12122SMartin Sperl SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
192d9f12122SMartin Sperl 
193aa7da564SGreg Kroah-Hartman static struct attribute *spi_dev_attrs[] = {
194aa7da564SGreg Kroah-Hartman 	&dev_attr_modalias.attr,
1955039563eSTrent Piepho 	&dev_attr_driver_override.attr,
196aa7da564SGreg Kroah-Hartman 	NULL,
1978ae12a0dSDavid Brownell };
198eca2ebc7SMartin Sperl 
199eca2ebc7SMartin Sperl static const struct attribute_group spi_dev_group = {
200eca2ebc7SMartin Sperl 	.attrs  = spi_dev_attrs,
201eca2ebc7SMartin Sperl };
202eca2ebc7SMartin Sperl 
203eca2ebc7SMartin Sperl static struct attribute *spi_device_statistics_attrs[] = {
204eca2ebc7SMartin Sperl 	&dev_attr_spi_device_messages.attr,
205eca2ebc7SMartin Sperl 	&dev_attr_spi_device_transfers.attr,
206eca2ebc7SMartin Sperl 	&dev_attr_spi_device_errors.attr,
207eca2ebc7SMartin Sperl 	&dev_attr_spi_device_timedout.attr,
208eca2ebc7SMartin Sperl 	&dev_attr_spi_device_spi_sync.attr,
209eca2ebc7SMartin Sperl 	&dev_attr_spi_device_spi_sync_immediate.attr,
210eca2ebc7SMartin Sperl 	&dev_attr_spi_device_spi_async.attr,
211eca2ebc7SMartin Sperl 	&dev_attr_spi_device_bytes.attr,
212eca2ebc7SMartin Sperl 	&dev_attr_spi_device_bytes_rx.attr,
213eca2ebc7SMartin Sperl 	&dev_attr_spi_device_bytes_tx.attr,
2146b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo0.attr,
2156b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo1.attr,
2166b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo2.attr,
2176b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo3.attr,
2186b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo4.attr,
2196b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo5.attr,
2206b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo6.attr,
2216b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo7.attr,
2226b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo8.attr,
2236b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo9.attr,
2246b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo10.attr,
2256b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo11.attr,
2266b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo12.attr,
2276b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo13.attr,
2286b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo14.attr,
2296b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo15.attr,
2306b7bc061SMartin Sperl 	&dev_attr_spi_device_transfer_bytes_histo16.attr,
231d9f12122SMartin Sperl 	&dev_attr_spi_device_transfers_split_maxsize.attr,
232eca2ebc7SMartin Sperl 	NULL,
233eca2ebc7SMartin Sperl };
234eca2ebc7SMartin Sperl 
235eca2ebc7SMartin Sperl static const struct attribute_group spi_device_statistics_group = {
236eca2ebc7SMartin Sperl 	.name  = "statistics",
237eca2ebc7SMartin Sperl 	.attrs  = spi_device_statistics_attrs,
238eca2ebc7SMartin Sperl };
239eca2ebc7SMartin Sperl 
240eca2ebc7SMartin Sperl static const struct attribute_group *spi_dev_groups[] = {
241eca2ebc7SMartin Sperl 	&spi_dev_group,
242eca2ebc7SMartin Sperl 	&spi_device_statistics_group,
243eca2ebc7SMartin Sperl 	NULL,
244eca2ebc7SMartin Sperl };
245eca2ebc7SMartin Sperl 
2468caab75fSGeert Uytterhoeven static struct attribute *spi_controller_statistics_attrs[] = {
2478caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_messages.attr,
2488caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_transfers.attr,
2498caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_errors.attr,
2508caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_timedout.attr,
2518caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_spi_sync.attr,
2528caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_spi_sync_immediate.attr,
2538caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_spi_async.attr,
2548caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_bytes.attr,
2558caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_bytes_rx.attr,
2568caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_bytes_tx.attr,
2578caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_transfer_bytes_histo0.attr,
2588caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_transfer_bytes_histo1.attr,
2598caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_transfer_bytes_histo2.attr,
2608caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_transfer_bytes_histo3.attr,
2618caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_transfer_bytes_histo4.attr,
2628caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_transfer_bytes_histo5.attr,
2638caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_transfer_bytes_histo6.attr,
2648caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_transfer_bytes_histo7.attr,
2658caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_transfer_bytes_histo8.attr,
2668caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_transfer_bytes_histo9.attr,
2678caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_transfer_bytes_histo10.attr,
2688caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_transfer_bytes_histo11.attr,
2698caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_transfer_bytes_histo12.attr,
2708caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_transfer_bytes_histo13.attr,
2718caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_transfer_bytes_histo14.attr,
2728caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_transfer_bytes_histo15.attr,
2738caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_transfer_bytes_histo16.attr,
2748caab75fSGeert Uytterhoeven 	&dev_attr_spi_controller_transfers_split_maxsize.attr,
275eca2ebc7SMartin Sperl 	NULL,
276eca2ebc7SMartin Sperl };
277eca2ebc7SMartin Sperl 
2788caab75fSGeert Uytterhoeven static const struct attribute_group spi_controller_statistics_group = {
279eca2ebc7SMartin Sperl 	.name  = "statistics",
2808caab75fSGeert Uytterhoeven 	.attrs  = spi_controller_statistics_attrs,
281eca2ebc7SMartin Sperl };
282eca2ebc7SMartin Sperl 
283eca2ebc7SMartin Sperl static const struct attribute_group *spi_master_groups[] = {
2848caab75fSGeert Uytterhoeven 	&spi_controller_statistics_group,
285eca2ebc7SMartin Sperl 	NULL,
286eca2ebc7SMartin Sperl };
287eca2ebc7SMartin Sperl 
288eca2ebc7SMartin Sperl void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
289eca2ebc7SMartin Sperl 				       struct spi_transfer *xfer,
2908caab75fSGeert Uytterhoeven 				       struct spi_controller *ctlr)
291eca2ebc7SMartin Sperl {
292eca2ebc7SMartin Sperl 	unsigned long flags;
2936b7bc061SMartin Sperl 	int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
2946b7bc061SMartin Sperl 
2956b7bc061SMartin Sperl 	if (l2len < 0)
2966b7bc061SMartin Sperl 		l2len = 0;
297eca2ebc7SMartin Sperl 
298eca2ebc7SMartin Sperl 	spin_lock_irqsave(&stats->lock, flags);
299eca2ebc7SMartin Sperl 
300eca2ebc7SMartin Sperl 	stats->transfers++;
3016b7bc061SMartin Sperl 	stats->transfer_bytes_histo[l2len]++;
302eca2ebc7SMartin Sperl 
303eca2ebc7SMartin Sperl 	stats->bytes += xfer->len;
304eca2ebc7SMartin Sperl 	if ((xfer->tx_buf) &&
3058caab75fSGeert Uytterhoeven 	    (xfer->tx_buf != ctlr->dummy_tx))
306eca2ebc7SMartin Sperl 		stats->bytes_tx += xfer->len;
307eca2ebc7SMartin Sperl 	if ((xfer->rx_buf) &&
3088caab75fSGeert Uytterhoeven 	    (xfer->rx_buf != ctlr->dummy_rx))
309eca2ebc7SMartin Sperl 		stats->bytes_rx += xfer->len;
310eca2ebc7SMartin Sperl 
311eca2ebc7SMartin Sperl 	spin_unlock_irqrestore(&stats->lock, flags);
312eca2ebc7SMartin Sperl }
313eca2ebc7SMartin Sperl EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
3148ae12a0dSDavid Brownell 
3158ae12a0dSDavid Brownell /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
3168ae12a0dSDavid Brownell  * and the sysfs version makes coldplug work too.
3178ae12a0dSDavid Brownell  */
3188ae12a0dSDavid Brownell 
31975368bf6SAnton Vorontsov static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
32075368bf6SAnton Vorontsov 						const struct spi_device *sdev)
32175368bf6SAnton Vorontsov {
32275368bf6SAnton Vorontsov 	while (id->name[0]) {
32375368bf6SAnton Vorontsov 		if (!strcmp(sdev->modalias, id->name))
32475368bf6SAnton Vorontsov 			return id;
32575368bf6SAnton Vorontsov 		id++;
32675368bf6SAnton Vorontsov 	}
32775368bf6SAnton Vorontsov 	return NULL;
32875368bf6SAnton Vorontsov }
32975368bf6SAnton Vorontsov 
33075368bf6SAnton Vorontsov const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
33175368bf6SAnton Vorontsov {
33275368bf6SAnton Vorontsov 	const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
33375368bf6SAnton Vorontsov 
33475368bf6SAnton Vorontsov 	return spi_match_id(sdrv->id_table, sdev);
33575368bf6SAnton Vorontsov }
33675368bf6SAnton Vorontsov EXPORT_SYMBOL_GPL(spi_get_device_id);
33775368bf6SAnton Vorontsov 
3388ae12a0dSDavid Brownell static int spi_match_device(struct device *dev, struct device_driver *drv)
3398ae12a0dSDavid Brownell {
3408ae12a0dSDavid Brownell 	const struct spi_device	*spi = to_spi_device(dev);
34175368bf6SAnton Vorontsov 	const struct spi_driver	*sdrv = to_spi_driver(drv);
34275368bf6SAnton Vorontsov 
3435039563eSTrent Piepho 	/* Check override first, and if set, only use the named driver */
3445039563eSTrent Piepho 	if (spi->driver_override)
3455039563eSTrent Piepho 		return strcmp(spi->driver_override, drv->name) == 0;
3465039563eSTrent Piepho 
3472b7a32f7SSinan Akman 	/* Attempt an OF style match */
3482b7a32f7SSinan Akman 	if (of_driver_match_device(dev, drv))
3492b7a32f7SSinan Akman 		return 1;
3502b7a32f7SSinan Akman 
35164bee4d2SMika Westerberg 	/* Then try ACPI */
35264bee4d2SMika Westerberg 	if (acpi_driver_match_device(dev, drv))
35364bee4d2SMika Westerberg 		return 1;
35464bee4d2SMika Westerberg 
35575368bf6SAnton Vorontsov 	if (sdrv->id_table)
35675368bf6SAnton Vorontsov 		return !!spi_match_id(sdrv->id_table, spi);
3578ae12a0dSDavid Brownell 
35835f74fcaSKay Sievers 	return strcmp(spi->modalias, drv->name) == 0;
3598ae12a0dSDavid Brownell }
3608ae12a0dSDavid Brownell 
3617eff2e7aSKay Sievers static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
3628ae12a0dSDavid Brownell {
3638ae12a0dSDavid Brownell 	const struct spi_device		*spi = to_spi_device(dev);
3648c4ff6d0SZhang Rui 	int rc;
3658c4ff6d0SZhang Rui 
3668c4ff6d0SZhang Rui 	rc = acpi_device_uevent_modalias(dev, env);
3678c4ff6d0SZhang Rui 	if (rc != -ENODEV)
3688c4ff6d0SZhang Rui 		return rc;
3698ae12a0dSDavid Brownell 
3702856670fSAndy Shevchenko 	return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
3718ae12a0dSDavid Brownell }
3728ae12a0dSDavid Brownell 
3739db34ee6SUwe Kleine-König static int spi_probe(struct device *dev)
374b885244eSDavid Brownell {
375b885244eSDavid Brownell 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
37644af7927SJon Hunter 	struct spi_device		*spi = to_spi_device(dev);
37733cf00e5SMika Westerberg 	int ret;
378b885244eSDavid Brownell 
37986be408bSSylwester Nawrocki 	ret = of_clk_set_defaults(dev->of_node, false);
38086be408bSSylwester Nawrocki 	if (ret)
38186be408bSSylwester Nawrocki 		return ret;
38286be408bSSylwester Nawrocki 
38344af7927SJon Hunter 	if (dev->of_node) {
38444af7927SJon Hunter 		spi->irq = of_irq_get(dev->of_node, 0);
38544af7927SJon Hunter 		if (spi->irq == -EPROBE_DEFER)
38644af7927SJon Hunter 			return -EPROBE_DEFER;
38744af7927SJon Hunter 		if (spi->irq < 0)
38844af7927SJon Hunter 			spi->irq = 0;
38944af7927SJon Hunter 	}
39044af7927SJon Hunter 
391676e7c25SUlf Hansson 	ret = dev_pm_domain_attach(dev, true);
39271f277a7SUlf Hansson 	if (ret)
39371f277a7SUlf Hansson 		return ret;
39471f277a7SUlf Hansson 
395440408dbSUwe Kleine-König 	if (sdrv->probe) {
39644af7927SJon Hunter 		ret = sdrv->probe(spi);
39733cf00e5SMika Westerberg 		if (ret)
398676e7c25SUlf Hansson 			dev_pm_domain_detach(dev, true);
399440408dbSUwe Kleine-König 	}
40033cf00e5SMika Westerberg 
40133cf00e5SMika Westerberg 	return ret;
402b885244eSDavid Brownell }
403b885244eSDavid Brownell 
4049db34ee6SUwe Kleine-König static int spi_remove(struct device *dev)
405b885244eSDavid Brownell {
406b885244eSDavid Brownell 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
407b885244eSDavid Brownell 
4087795d475SUwe Kleine-König 	if (sdrv->remove) {
4097795d475SUwe Kleine-König 		int ret;
4107795d475SUwe Kleine-König 
411aec35f4eSJean Delvare 		ret = sdrv->remove(to_spi_device(dev));
4127795d475SUwe Kleine-König 		if (ret)
4137795d475SUwe Kleine-König 			dev_warn(dev,
4147795d475SUwe Kleine-König 				 "Failed to unbind driver (%pe), ignoring\n",
4157795d475SUwe Kleine-König 				 ERR_PTR(ret));
4167795d475SUwe Kleine-König 	}
4177795d475SUwe Kleine-König 
418676e7c25SUlf Hansson 	dev_pm_domain_detach(dev, true);
41933cf00e5SMika Westerberg 
4207795d475SUwe Kleine-König 	return 0;
421b885244eSDavid Brownell }
422b885244eSDavid Brownell 
4239db34ee6SUwe Kleine-König static void spi_shutdown(struct device *dev)
424b885244eSDavid Brownell {
425a6f483b2SMarek Szyprowski 	if (dev->driver) {
426b885244eSDavid Brownell 		const struct spi_driver	*sdrv = to_spi_driver(dev->driver);
427b885244eSDavid Brownell 
4289db34ee6SUwe Kleine-König 		if (sdrv->shutdown)
429b885244eSDavid Brownell 			sdrv->shutdown(to_spi_device(dev));
430b885244eSDavid Brownell 	}
431a6f483b2SMarek Szyprowski }
432b885244eSDavid Brownell 
4339db34ee6SUwe Kleine-König struct bus_type spi_bus_type = {
4349db34ee6SUwe Kleine-König 	.name		= "spi",
4359db34ee6SUwe Kleine-König 	.dev_groups	= spi_dev_groups,
4369db34ee6SUwe Kleine-König 	.match		= spi_match_device,
4379db34ee6SUwe Kleine-König 	.uevent		= spi_uevent,
4389db34ee6SUwe Kleine-König 	.probe		= spi_probe,
4399db34ee6SUwe Kleine-König 	.remove		= spi_remove,
4409db34ee6SUwe Kleine-König 	.shutdown	= spi_shutdown,
4419db34ee6SUwe Kleine-König };
4429db34ee6SUwe Kleine-König EXPORT_SYMBOL_GPL(spi_bus_type);
4439db34ee6SUwe Kleine-König 
44433e34dc6SDavid Brownell /**
445ca5d2485SAndrew F. Davis  * __spi_register_driver - register a SPI driver
44688c9321dSThierry Reding  * @owner: owner module of the driver to register
44733e34dc6SDavid Brownell  * @sdrv: the driver to register
44833e34dc6SDavid Brownell  * Context: can sleep
44997d56dc6SJavier Martinez Canillas  *
45097d56dc6SJavier Martinez Canillas  * Return: zero on success, else a negative error code.
45133e34dc6SDavid Brownell  */
452ca5d2485SAndrew F. Davis int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
453b885244eSDavid Brownell {
454ca5d2485SAndrew F. Davis 	sdrv->driver.owner = owner;
455b885244eSDavid Brownell 	sdrv->driver.bus = &spi_bus_type;
456b885244eSDavid Brownell 	return driver_register(&sdrv->driver);
457b885244eSDavid Brownell }
458ca5d2485SAndrew F. Davis EXPORT_SYMBOL_GPL(__spi_register_driver);
459b885244eSDavid Brownell 
4608ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/
4618ae12a0dSDavid Brownell 
4628ae12a0dSDavid Brownell /* SPI devices should normally not be created by SPI device drivers; that
4638caab75fSGeert Uytterhoeven  * would make them board-specific.  Similarly with SPI controller drivers.
4648ae12a0dSDavid Brownell  * Device registration normally goes into like arch/.../mach.../board-YYY.c
4658ae12a0dSDavid Brownell  * with other readonly (flashable) information about mainboard devices.
4668ae12a0dSDavid Brownell  */
4678ae12a0dSDavid Brownell 
4688ae12a0dSDavid Brownell struct boardinfo {
4698ae12a0dSDavid Brownell 	struct list_head	list;
4702b9603a0SFeng Tang 	struct spi_board_info	board_info;
4718ae12a0dSDavid Brownell };
4728ae12a0dSDavid Brownell 
4738ae12a0dSDavid Brownell static LIST_HEAD(board_list);
4748caab75fSGeert Uytterhoeven static LIST_HEAD(spi_controller_list);
4752b9603a0SFeng Tang 
4762b9603a0SFeng Tang /*
477be73e323SAndy Shevchenko  * Used to protect add/del operation for board_info list and
4788caab75fSGeert Uytterhoeven  * spi_controller list, and their matching process
4799b61e302SSuniel Mahesh  * also used to protect object of type struct idr
4802b9603a0SFeng Tang  */
48194040828SMatthias Kaehlcke static DEFINE_MUTEX(board_lock);
4828ae12a0dSDavid Brownell 
483ddf75be4SLukas Wunner /*
484ddf75be4SLukas Wunner  * Prevents addition of devices with same chip select and
485ddf75be4SLukas Wunner  * addition of devices below an unregistering controller.
486ddf75be4SLukas Wunner  */
487ddf75be4SLukas Wunner static DEFINE_MUTEX(spi_add_lock);
488ddf75be4SLukas Wunner 
489dc87c98eSGrant Likely /**
490dc87c98eSGrant Likely  * spi_alloc_device - Allocate a new SPI device
4918caab75fSGeert Uytterhoeven  * @ctlr: Controller to which device is connected
492dc87c98eSGrant Likely  * Context: can sleep
493dc87c98eSGrant Likely  *
494dc87c98eSGrant Likely  * Allows a driver to allocate and initialize a spi_device without
495dc87c98eSGrant Likely  * registering it immediately.  This allows a driver to directly
496dc87c98eSGrant Likely  * fill the spi_device with device parameters before calling
497dc87c98eSGrant Likely  * spi_add_device() on it.
498dc87c98eSGrant Likely  *
499dc87c98eSGrant Likely  * Caller is responsible to call spi_add_device() on the returned
5008caab75fSGeert Uytterhoeven  * spi_device structure to add it to the SPI controller.  If the caller
501dc87c98eSGrant Likely  * needs to discard the spi_device without adding it, then it should
502dc87c98eSGrant Likely  * call spi_dev_put() on it.
503dc87c98eSGrant Likely  *
50497d56dc6SJavier Martinez Canillas  * Return: a pointer to the new device, or NULL.
505dc87c98eSGrant Likely  */
5068caab75fSGeert Uytterhoeven struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
507dc87c98eSGrant Likely {
508dc87c98eSGrant Likely 	struct spi_device	*spi;
509dc87c98eSGrant Likely 
5108caab75fSGeert Uytterhoeven 	if (!spi_controller_get(ctlr))
511dc87c98eSGrant Likely 		return NULL;
512dc87c98eSGrant Likely 
5135fe5f05eSJingoo Han 	spi = kzalloc(sizeof(*spi), GFP_KERNEL);
514dc87c98eSGrant Likely 	if (!spi) {
5158caab75fSGeert Uytterhoeven 		spi_controller_put(ctlr);
516dc87c98eSGrant Likely 		return NULL;
517dc87c98eSGrant Likely 	}
518dc87c98eSGrant Likely 
5198caab75fSGeert Uytterhoeven 	spi->master = spi->controller = ctlr;
5208caab75fSGeert Uytterhoeven 	spi->dev.parent = &ctlr->dev;
521dc87c98eSGrant Likely 	spi->dev.bus = &spi_bus_type;
522dc87c98eSGrant Likely 	spi->dev.release = spidev_release;
523446411e1SAndreas Larsson 	spi->cs_gpio = -ENOENT;
524ea235786SJohn Garry 	spi->mode = ctlr->buswidth_override_bits;
525eca2ebc7SMartin Sperl 
526eca2ebc7SMartin Sperl 	spin_lock_init(&spi->statistics.lock);
527eca2ebc7SMartin Sperl 
528dc87c98eSGrant Likely 	device_initialize(&spi->dev);
529dc87c98eSGrant Likely 	return spi;
530dc87c98eSGrant Likely }
531dc87c98eSGrant Likely EXPORT_SYMBOL_GPL(spi_alloc_device);
532dc87c98eSGrant Likely 
533e13ac47bSJarkko Nikula static void spi_dev_set_name(struct spi_device *spi)
534e13ac47bSJarkko Nikula {
535e13ac47bSJarkko Nikula 	struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
536e13ac47bSJarkko Nikula 
537e13ac47bSJarkko Nikula 	if (adev) {
538e13ac47bSJarkko Nikula 		dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
539e13ac47bSJarkko Nikula 		return;
540e13ac47bSJarkko Nikula 	}
541e13ac47bSJarkko Nikula 
5428caab75fSGeert Uytterhoeven 	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
543e13ac47bSJarkko Nikula 		     spi->chip_select);
544e13ac47bSJarkko Nikula }
545e13ac47bSJarkko Nikula 
546b6fb8d3aSMika Westerberg static int spi_dev_check(struct device *dev, void *data)
547b6fb8d3aSMika Westerberg {
548b6fb8d3aSMika Westerberg 	struct spi_device *spi = to_spi_device(dev);
549b6fb8d3aSMika Westerberg 	struct spi_device *new_spi = data;
550b6fb8d3aSMika Westerberg 
5518caab75fSGeert Uytterhoeven 	if (spi->controller == new_spi->controller &&
552b6fb8d3aSMika Westerberg 	    spi->chip_select == new_spi->chip_select)
553b6fb8d3aSMika Westerberg 		return -EBUSY;
554b6fb8d3aSMika Westerberg 	return 0;
555b6fb8d3aSMika Westerberg }
556b6fb8d3aSMika Westerberg 
557*c7299feaSSaravana Kannan static void spi_cleanup(struct spi_device *spi)
558*c7299feaSSaravana Kannan {
559*c7299feaSSaravana Kannan 	if (spi->controller->cleanup)
560*c7299feaSSaravana Kannan 		spi->controller->cleanup(spi);
561*c7299feaSSaravana Kannan }
562*c7299feaSSaravana Kannan 
563dc87c98eSGrant Likely /**
564dc87c98eSGrant Likely  * spi_add_device - Add spi_device allocated with spi_alloc_device
565dc87c98eSGrant Likely  * @spi: spi_device to register
566dc87c98eSGrant Likely  *
567dc87c98eSGrant Likely  * Companion function to spi_alloc_device.  Devices allocated with
568dc87c98eSGrant Likely  * spi_alloc_device can be added onto the spi bus with this function.
569dc87c98eSGrant Likely  *
57097d56dc6SJavier Martinez Canillas  * Return: 0 on success; negative errno on failure
571dc87c98eSGrant Likely  */
572dc87c98eSGrant Likely int spi_add_device(struct spi_device *spi)
573dc87c98eSGrant Likely {
5748caab75fSGeert Uytterhoeven 	struct spi_controller *ctlr = spi->controller;
5758caab75fSGeert Uytterhoeven 	struct device *dev = ctlr->dev.parent;
576dc87c98eSGrant Likely 	int status;
577dc87c98eSGrant Likely 
578dc87c98eSGrant Likely 	/* Chipselects are numbered 0..max; validate. */
5798caab75fSGeert Uytterhoeven 	if (spi->chip_select >= ctlr->num_chipselect) {
5808caab75fSGeert Uytterhoeven 		dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
5818caab75fSGeert Uytterhoeven 			ctlr->num_chipselect);
582dc87c98eSGrant Likely 		return -EINVAL;
583dc87c98eSGrant Likely 	}
584dc87c98eSGrant Likely 
585dc87c98eSGrant Likely 	/* Set the bus ID string */
586e13ac47bSJarkko Nikula 	spi_dev_set_name(spi);
587e48880e0SDavid Brownell 
588e48880e0SDavid Brownell 	/* We need to make sure there's no other device with this
589e48880e0SDavid Brownell 	 * chipselect **BEFORE** we call setup(), else we'll trash
590e48880e0SDavid Brownell 	 * its configuration.  Lock against concurrent add() calls.
591e48880e0SDavid Brownell 	 */
592e48880e0SDavid Brownell 	mutex_lock(&spi_add_lock);
593e48880e0SDavid Brownell 
594b6fb8d3aSMika Westerberg 	status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
595b6fb8d3aSMika Westerberg 	if (status) {
596e48880e0SDavid Brownell 		dev_err(dev, "chipselect %d already in use\n",
597e48880e0SDavid Brownell 				spi->chip_select);
598e48880e0SDavid Brownell 		goto done;
599e48880e0SDavid Brownell 	}
600e48880e0SDavid Brownell 
601ddf75be4SLukas Wunner 	/* Controller may unregister concurrently */
602ddf75be4SLukas Wunner 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
603ddf75be4SLukas Wunner 	    !device_is_registered(&ctlr->dev)) {
604ddf75be4SLukas Wunner 		status = -ENODEV;
605ddf75be4SLukas Wunner 		goto done;
606ddf75be4SLukas Wunner 	}
607ddf75be4SLukas Wunner 
608f3186dd8SLinus Walleij 	/* Descriptors take precedence */
609f3186dd8SLinus Walleij 	if (ctlr->cs_gpiods)
610f3186dd8SLinus Walleij 		spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
611f3186dd8SLinus Walleij 	else if (ctlr->cs_gpios)
6128caab75fSGeert Uytterhoeven 		spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
61374317984SJean-Christophe PLAGNIOL-VILLARD 
614e48880e0SDavid Brownell 	/* Drivers may modify this initial i/o setup, but will
615e48880e0SDavid Brownell 	 * normally rely on the device being setup.  Devices
616e48880e0SDavid Brownell 	 * using SPI_CS_HIGH can't coexist well otherwise...
617e48880e0SDavid Brownell 	 */
6187d077197SDavid Brownell 	status = spi_setup(spi);
619dc87c98eSGrant Likely 	if (status < 0) {
620eb288a1fSLinus Walleij 		dev_err(dev, "can't setup %s, status %d\n",
621eb288a1fSLinus Walleij 				dev_name(&spi->dev), status);
622e48880e0SDavid Brownell 		goto done;
623dc87c98eSGrant Likely 	}
624dc87c98eSGrant Likely 
625e48880e0SDavid Brownell 	/* Device may be bound to an active driver when this returns */
626dc87c98eSGrant Likely 	status = device_add(&spi->dev);
627*c7299feaSSaravana Kannan 	if (status < 0) {
628eb288a1fSLinus Walleij 		dev_err(dev, "can't add %s, status %d\n",
629eb288a1fSLinus Walleij 				dev_name(&spi->dev), status);
630*c7299feaSSaravana Kannan 		spi_cleanup(spi);
631*c7299feaSSaravana Kannan 	} else {
63235f74fcaSKay Sievers 		dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
633*c7299feaSSaravana Kannan 	}
634e48880e0SDavid Brownell 
635e48880e0SDavid Brownell done:
636e48880e0SDavid Brownell 	mutex_unlock(&spi_add_lock);
637e48880e0SDavid Brownell 	return status;
638dc87c98eSGrant Likely }
639dc87c98eSGrant Likely EXPORT_SYMBOL_GPL(spi_add_device);
6408ae12a0dSDavid Brownell 
64133e34dc6SDavid Brownell /**
64233e34dc6SDavid Brownell  * spi_new_device - instantiate one new SPI device
6438caab75fSGeert Uytterhoeven  * @ctlr: Controller to which device is connected
64433e34dc6SDavid Brownell  * @chip: Describes the SPI device
64533e34dc6SDavid Brownell  * Context: can sleep
64633e34dc6SDavid Brownell  *
64733e34dc6SDavid Brownell  * On typical mainboards, this is purely internal; and it's not needed
6488ae12a0dSDavid Brownell  * after board init creates the hard-wired devices.  Some development
6498ae12a0dSDavid Brownell  * platforms may not be able to use spi_register_board_info though, and
6508ae12a0dSDavid Brownell  * this is exported so that for example a USB or parport based adapter
6518ae12a0dSDavid Brownell  * driver could add devices (which it would learn about out-of-band).
652082c8cb4SDavid Brownell  *
65397d56dc6SJavier Martinez Canillas  * Return: the new device, or NULL.
6548ae12a0dSDavid Brownell  */
6558caab75fSGeert Uytterhoeven struct spi_device *spi_new_device(struct spi_controller *ctlr,
656e9d5a461SAdrian Bunk 				  struct spi_board_info *chip)
6578ae12a0dSDavid Brownell {
6588ae12a0dSDavid Brownell 	struct spi_device	*proxy;
6598ae12a0dSDavid Brownell 	int			status;
6608ae12a0dSDavid Brownell 
661082c8cb4SDavid Brownell 	/* NOTE:  caller did any chip->bus_num checks necessary.
662082c8cb4SDavid Brownell 	 *
663082c8cb4SDavid Brownell 	 * Also, unless we change the return value convention to use
664082c8cb4SDavid Brownell 	 * error-or-pointer (not NULL-or-pointer), troubleshootability
665082c8cb4SDavid Brownell 	 * suggests syslogged diagnostics are best here (ugh).
666082c8cb4SDavid Brownell 	 */
667082c8cb4SDavid Brownell 
6688caab75fSGeert Uytterhoeven 	proxy = spi_alloc_device(ctlr);
669dc87c98eSGrant Likely 	if (!proxy)
6708ae12a0dSDavid Brownell 		return NULL;
6718ae12a0dSDavid Brownell 
672102eb975SGrant Likely 	WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
673102eb975SGrant Likely 
6748ae12a0dSDavid Brownell 	proxy->chip_select = chip->chip_select;
6758ae12a0dSDavid Brownell 	proxy->max_speed_hz = chip->max_speed_hz;
676980a01c9SDavid Brownell 	proxy->mode = chip->mode;
6778ae12a0dSDavid Brownell 	proxy->irq = chip->irq;
678102eb975SGrant Likely 	strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
6798ae12a0dSDavid Brownell 	proxy->dev.platform_data = (void *) chip->platform_data;
6808ae12a0dSDavid Brownell 	proxy->controller_data = chip->controller_data;
6818ae12a0dSDavid Brownell 	proxy->controller_state = NULL;
6828ae12a0dSDavid Brownell 
68347afc77bSHeikki Krogerus 	if (chip->swnode) {
68447afc77bSHeikki Krogerus 		status = device_add_software_node(&proxy->dev, chip->swnode);
685826cf175SDmitry Torokhov 		if (status) {
6869d902c2aSColin Ian King 			dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
687826cf175SDmitry Torokhov 				chip->modalias, status);
688826cf175SDmitry Torokhov 			goto err_dev_put;
689826cf175SDmitry Torokhov 		}
6908ae12a0dSDavid Brownell 	}
691dc87c98eSGrant Likely 
692826cf175SDmitry Torokhov 	status = spi_add_device(proxy);
693826cf175SDmitry Torokhov 	if (status < 0)
694df41a5daSHeikki Krogerus 		goto err_dev_put;
695826cf175SDmitry Torokhov 
696dc87c98eSGrant Likely 	return proxy;
697826cf175SDmitry Torokhov 
698826cf175SDmitry Torokhov err_dev_put:
699df41a5daSHeikki Krogerus 	device_remove_software_node(&proxy->dev);
700826cf175SDmitry Torokhov 	spi_dev_put(proxy);
701826cf175SDmitry Torokhov 	return NULL;
702dc87c98eSGrant Likely }
7038ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_new_device);
7048ae12a0dSDavid Brownell 
7053b1884c2SGeert Uytterhoeven /**
7063b1884c2SGeert Uytterhoeven  * spi_unregister_device - unregister a single SPI device
7073b1884c2SGeert Uytterhoeven  * @spi: spi_device to unregister
7083b1884c2SGeert Uytterhoeven  *
7093b1884c2SGeert Uytterhoeven  * Start making the passed SPI device vanish. Normally this would be handled
7108caab75fSGeert Uytterhoeven  * by spi_unregister_controller().
7113b1884c2SGeert Uytterhoeven  */
7123b1884c2SGeert Uytterhoeven void spi_unregister_device(struct spi_device *spi)
7133b1884c2SGeert Uytterhoeven {
714bd6c1644SGeert Uytterhoeven 	if (!spi)
715bd6c1644SGeert Uytterhoeven 		return;
716bd6c1644SGeert Uytterhoeven 
717*c7299feaSSaravana Kannan 	spi_cleanup(spi);
718*c7299feaSSaravana Kannan 
7198324147fSJohan Hovold 	if (spi->dev.of_node) {
720bd6c1644SGeert Uytterhoeven 		of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
7218324147fSJohan Hovold 		of_node_put(spi->dev.of_node);
7228324147fSJohan Hovold 	}
7237f24467fSOctavian Purdila 	if (ACPI_COMPANION(&spi->dev))
7247f24467fSOctavian Purdila 		acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
72547afc77bSHeikki Krogerus 	device_remove_software_node(&spi->dev);
7263b1884c2SGeert Uytterhoeven 	device_unregister(&spi->dev);
7273b1884c2SGeert Uytterhoeven }
7283b1884c2SGeert Uytterhoeven EXPORT_SYMBOL_GPL(spi_unregister_device);
7293b1884c2SGeert Uytterhoeven 
7308caab75fSGeert Uytterhoeven static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
7312b9603a0SFeng Tang 					      struct spi_board_info *bi)
7322b9603a0SFeng Tang {
7332b9603a0SFeng Tang 	struct spi_device *dev;
7342b9603a0SFeng Tang 
7358caab75fSGeert Uytterhoeven 	if (ctlr->bus_num != bi->bus_num)
7362b9603a0SFeng Tang 		return;
7372b9603a0SFeng Tang 
7388caab75fSGeert Uytterhoeven 	dev = spi_new_device(ctlr, bi);
7392b9603a0SFeng Tang 	if (!dev)
7408caab75fSGeert Uytterhoeven 		dev_err(ctlr->dev.parent, "can't create new device for %s\n",
7412b9603a0SFeng Tang 			bi->modalias);
7422b9603a0SFeng Tang }
7432b9603a0SFeng Tang 
74433e34dc6SDavid Brownell /**
74533e34dc6SDavid Brownell  * spi_register_board_info - register SPI devices for a given board
74633e34dc6SDavid Brownell  * @info: array of chip descriptors
74733e34dc6SDavid Brownell  * @n: how many descriptors are provided
74833e34dc6SDavid Brownell  * Context: can sleep
74933e34dc6SDavid Brownell  *
7508ae12a0dSDavid Brownell  * Board-specific early init code calls this (probably during arch_initcall)
7518ae12a0dSDavid Brownell  * with segments of the SPI device table.  Any device nodes are created later,
7528ae12a0dSDavid Brownell  * after the relevant parent SPI controller (bus_num) is defined.  We keep
7538ae12a0dSDavid Brownell  * this table of devices forever, so that reloading a controller driver will
7548ae12a0dSDavid Brownell  * not make Linux forget about these hard-wired devices.
7558ae12a0dSDavid Brownell  *
7568ae12a0dSDavid Brownell  * Other code can also call this, e.g. a particular add-on board might provide
7578ae12a0dSDavid Brownell  * SPI devices through its expansion connector, so code initializing that board
7588ae12a0dSDavid Brownell  * would naturally declare its SPI devices.
7598ae12a0dSDavid Brownell  *
7608ae12a0dSDavid Brownell  * The board info passed can safely be __initdata ... but be careful of
7618ae12a0dSDavid Brownell  * any embedded pointers (platform_data, etc), they're copied as-is.
76297d56dc6SJavier Martinez Canillas  *
76397d56dc6SJavier Martinez Canillas  * Return: zero on success, else a negative error code.
7648ae12a0dSDavid Brownell  */
765fd4a319bSGrant Likely int spi_register_board_info(struct spi_board_info const *info, unsigned n)
7668ae12a0dSDavid Brownell {
7678ae12a0dSDavid Brownell 	struct boardinfo *bi;
7682b9603a0SFeng Tang 	int i;
7698ae12a0dSDavid Brownell 
770c7908a37SXiubo Li 	if (!n)
771f974cf57SDmitry Torokhov 		return 0;
772c7908a37SXiubo Li 
773f9bdb7fdSMarkus Elfring 	bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
7748ae12a0dSDavid Brownell 	if (!bi)
7758ae12a0dSDavid Brownell 		return -ENOMEM;
7768ae12a0dSDavid Brownell 
7772b9603a0SFeng Tang 	for (i = 0; i < n; i++, bi++, info++) {
7788caab75fSGeert Uytterhoeven 		struct spi_controller *ctlr;
7792b9603a0SFeng Tang 
7802b9603a0SFeng Tang 		memcpy(&bi->board_info, info, sizeof(*info));
781826cf175SDmitry Torokhov 
78294040828SMatthias Kaehlcke 		mutex_lock(&board_lock);
7838ae12a0dSDavid Brownell 		list_add_tail(&bi->list, &board_list);
7848caab75fSGeert Uytterhoeven 		list_for_each_entry(ctlr, &spi_controller_list, list)
7858caab75fSGeert Uytterhoeven 			spi_match_controller_to_boardinfo(ctlr,
7868caab75fSGeert Uytterhoeven 							  &bi->board_info);
78794040828SMatthias Kaehlcke 		mutex_unlock(&board_lock);
7882b9603a0SFeng Tang 	}
7892b9603a0SFeng Tang 
7908ae12a0dSDavid Brownell 	return 0;
7918ae12a0dSDavid Brownell }
7928ae12a0dSDavid Brownell 
7938ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/
7948ae12a0dSDavid Brownell 
795d347b4aaSDavid Bauer static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
796b158935fSMark Brown {
79786527bcbSAndy Shevchenko 	bool activate = enable;
79825093bdeSAlexandru Ardelean 
799d40f0b6fSDouglas Anderson 	/*
800d40f0b6fSDouglas Anderson 	 * Avoid calling into the driver (or doing delays) if the chip select
801d40f0b6fSDouglas Anderson 	 * isn't actually changing from the last time this was called.
802d40f0b6fSDouglas Anderson 	 */
803d347b4aaSDavid Bauer 	if (!force && (spi->controller->last_cs_enable == enable) &&
804d40f0b6fSDouglas Anderson 	    (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
805d40f0b6fSDouglas Anderson 		return;
806d40f0b6fSDouglas Anderson 
807d40f0b6fSDouglas Anderson 	spi->controller->last_cs_enable = enable;
808d40f0b6fSDouglas Anderson 	spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
809d40f0b6fSDouglas Anderson 
8100486d9f9Sleilk.liu 	if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
8110486d9f9Sleilk.liu 	    !spi->controller->set_cs_timing) {
81286527bcbSAndy Shevchenko 		if (activate)
81325093bdeSAlexandru Ardelean 			spi_delay_exec(&spi->controller->cs_setup, NULL);
81425093bdeSAlexandru Ardelean 		else
81525093bdeSAlexandru Ardelean 			spi_delay_exec(&spi->controller->cs_hold, NULL);
81625093bdeSAlexandru Ardelean 	}
81725093bdeSAlexandru Ardelean 
818b158935fSMark Brown 	if (spi->mode & SPI_CS_HIGH)
819b158935fSMark Brown 		enable = !enable;
820b158935fSMark Brown 
821f3186dd8SLinus Walleij 	if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
822f3186dd8SLinus Walleij 		if (!(spi->mode & SPI_NO_CS)) {
823f3186dd8SLinus Walleij 			if (spi->cs_gpiod)
824766c6b63SSven Van Asbroeck 				/* polarity handled by gpiolib */
82586527bcbSAndy Shevchenko 				gpiod_set_value_cansleep(spi->cs_gpiod, activate);
826f3186dd8SLinus Walleij 			else
827766c6b63SSven Van Asbroeck 				/*
828766c6b63SSven Van Asbroeck 				 * invert the enable line, as active low is
829766c6b63SSven Van Asbroeck 				 * default for SPI.
830766c6b63SSven Van Asbroeck 				 */
83128f7604fSFelix Fietkau 				gpio_set_value_cansleep(spi->cs_gpio, !enable);
832f3186dd8SLinus Walleij 		}
8338eee6b9dSThor Thayer 		/* Some SPI masters need both GPIO CS & slave_select */
8348caab75fSGeert Uytterhoeven 		if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
8358caab75fSGeert Uytterhoeven 		    spi->controller->set_cs)
8368caab75fSGeert Uytterhoeven 			spi->controller->set_cs(spi, !enable);
8378caab75fSGeert Uytterhoeven 	} else if (spi->controller->set_cs) {
8388caab75fSGeert Uytterhoeven 		spi->controller->set_cs(spi, !enable);
8398eee6b9dSThor Thayer 	}
84025093bdeSAlexandru Ardelean 
8410486d9f9Sleilk.liu 	if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio) ||
8420486d9f9Sleilk.liu 	    !spi->controller->set_cs_timing) {
84386527bcbSAndy Shevchenko 		if (!activate)
84425093bdeSAlexandru Ardelean 			spi_delay_exec(&spi->controller->cs_inactive, NULL);
84525093bdeSAlexandru Ardelean 	}
846b158935fSMark Brown }
847b158935fSMark Brown 
8482de440f5SGeert Uytterhoeven #ifdef CONFIG_HAS_DMA
84946336966SBoris Brezillon int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
8506ad45a27SMark Brown 		struct sg_table *sgt, void *buf, size_t len,
8516ad45a27SMark Brown 		enum dma_data_direction dir)
8526ad45a27SMark Brown {
8536ad45a27SMark Brown 	const bool vmalloced_buf = is_vmalloc_addr(buf);
854df88e91bSAndy Shevchenko 	unsigned int max_seg_size = dma_get_max_seg_size(dev);
855b1b8153cSVignesh R #ifdef CONFIG_HIGHMEM
856b1b8153cSVignesh R 	const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
857b1b8153cSVignesh R 				(unsigned long)buf < (PKMAP_BASE +
858b1b8153cSVignesh R 					(LAST_PKMAP * PAGE_SIZE)));
859b1b8153cSVignesh R #else
860b1b8153cSVignesh R 	const bool kmap_buf = false;
861b1b8153cSVignesh R #endif
86265598c13SAndrew Gabbasov 	int desc_len;
86365598c13SAndrew Gabbasov 	int sgs;
8646ad45a27SMark Brown 	struct page *vm_page;
8658dd4a016SJuan Gutierrez 	struct scatterlist *sg;
8666ad45a27SMark Brown 	void *sg_buf;
8676ad45a27SMark Brown 	size_t min;
8686ad45a27SMark Brown 	int i, ret;
8696ad45a27SMark Brown 
870b1b8153cSVignesh R 	if (vmalloced_buf || kmap_buf) {
871df88e91bSAndy Shevchenko 		desc_len = min_t(int, max_seg_size, PAGE_SIZE);
87265598c13SAndrew Gabbasov 		sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
8730569a88fSVignesh R 	} else if (virt_addr_valid(buf)) {
8748caab75fSGeert Uytterhoeven 		desc_len = min_t(int, max_seg_size, ctlr->max_dma_len);
87565598c13SAndrew Gabbasov 		sgs = DIV_ROUND_UP(len, desc_len);
8760569a88fSVignesh R 	} else {
8770569a88fSVignesh R 		return -EINVAL;
87865598c13SAndrew Gabbasov 	}
87965598c13SAndrew Gabbasov 
8806ad45a27SMark Brown 	ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
8816ad45a27SMark Brown 	if (ret != 0)
8826ad45a27SMark Brown 		return ret;
8836ad45a27SMark Brown 
8848dd4a016SJuan Gutierrez 	sg = &sgt->sgl[0];
8856ad45a27SMark Brown 	for (i = 0; i < sgs; i++) {
8866ad45a27SMark Brown 
887b1b8153cSVignesh R 		if (vmalloced_buf || kmap_buf) {
888ce99319aSMaxime Chevallier 			/*
889ce99319aSMaxime Chevallier 			 * Next scatterlist entry size is the minimum between
890ce99319aSMaxime Chevallier 			 * the desc_len and the remaining buffer length that
891ce99319aSMaxime Chevallier 			 * fits in a page.
892ce99319aSMaxime Chevallier 			 */
893ce99319aSMaxime Chevallier 			min = min_t(size_t, desc_len,
894ce99319aSMaxime Chevallier 				    min_t(size_t, len,
895ce99319aSMaxime Chevallier 					  PAGE_SIZE - offset_in_page(buf)));
896b1b8153cSVignesh R 			if (vmalloced_buf)
8976ad45a27SMark Brown 				vm_page = vmalloc_to_page(buf);
898b1b8153cSVignesh R 			else
899b1b8153cSVignesh R 				vm_page = kmap_to_page(buf);
9006ad45a27SMark Brown 			if (!vm_page) {
9016ad45a27SMark Brown 				sg_free_table(sgt);
9026ad45a27SMark Brown 				return -ENOMEM;
9036ad45a27SMark Brown 			}
9048dd4a016SJuan Gutierrez 			sg_set_page(sg, vm_page,
905c1aefbddSCharles Keepax 				    min, offset_in_page(buf));
9066ad45a27SMark Brown 		} else {
90765598c13SAndrew Gabbasov 			min = min_t(size_t, len, desc_len);
9086ad45a27SMark Brown 			sg_buf = buf;
9098dd4a016SJuan Gutierrez 			sg_set_buf(sg, sg_buf, min);
9106ad45a27SMark Brown 		}
9116ad45a27SMark Brown 
9126ad45a27SMark Brown 		buf += min;
9136ad45a27SMark Brown 		len -= min;
9148dd4a016SJuan Gutierrez 		sg = sg_next(sg);
9156ad45a27SMark Brown 	}
9166ad45a27SMark Brown 
9176ad45a27SMark Brown 	ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
91889e4b66aSGeert Uytterhoeven 	if (!ret)
91989e4b66aSGeert Uytterhoeven 		ret = -ENOMEM;
9206ad45a27SMark Brown 	if (ret < 0) {
9216ad45a27SMark Brown 		sg_free_table(sgt);
9226ad45a27SMark Brown 		return ret;
9236ad45a27SMark Brown 	}
9246ad45a27SMark Brown 
9256ad45a27SMark Brown 	sgt->nents = ret;
9266ad45a27SMark Brown 
9276ad45a27SMark Brown 	return 0;
9286ad45a27SMark Brown }
9296ad45a27SMark Brown 
93046336966SBoris Brezillon void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
9316ad45a27SMark Brown 		   struct sg_table *sgt, enum dma_data_direction dir)
9326ad45a27SMark Brown {
9336ad45a27SMark Brown 	if (sgt->orig_nents) {
9346ad45a27SMark Brown 		dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
9356ad45a27SMark Brown 		sg_free_table(sgt);
9366ad45a27SMark Brown 	}
9376ad45a27SMark Brown }
9386ad45a27SMark Brown 
9398caab75fSGeert Uytterhoeven static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
94099adef31SMark Brown {
94199adef31SMark Brown 	struct device *tx_dev, *rx_dev;
94299adef31SMark Brown 	struct spi_transfer *xfer;
9436ad45a27SMark Brown 	int ret;
9443a2eba9bSMark Brown 
9458caab75fSGeert Uytterhoeven 	if (!ctlr->can_dma)
94699adef31SMark Brown 		return 0;
94799adef31SMark Brown 
9488caab75fSGeert Uytterhoeven 	if (ctlr->dma_tx)
9498caab75fSGeert Uytterhoeven 		tx_dev = ctlr->dma_tx->device->dev;
950c37f45b5SLeilk Liu 	else
9518caab75fSGeert Uytterhoeven 		tx_dev = ctlr->dev.parent;
952c37f45b5SLeilk Liu 
9538caab75fSGeert Uytterhoeven 	if (ctlr->dma_rx)
9548caab75fSGeert Uytterhoeven 		rx_dev = ctlr->dma_rx->device->dev;
955c37f45b5SLeilk Liu 	else
9568caab75fSGeert Uytterhoeven 		rx_dev = ctlr->dev.parent;
95799adef31SMark Brown 
95899adef31SMark Brown 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
9598caab75fSGeert Uytterhoeven 		if (!ctlr->can_dma(ctlr, msg->spi, xfer))
96099adef31SMark Brown 			continue;
96199adef31SMark Brown 
96299adef31SMark Brown 		if (xfer->tx_buf != NULL) {
9638caab75fSGeert Uytterhoeven 			ret = spi_map_buf(ctlr, tx_dev, &xfer->tx_sg,
9646ad45a27SMark Brown 					  (void *)xfer->tx_buf, xfer->len,
96599adef31SMark Brown 					  DMA_TO_DEVICE);
9666ad45a27SMark Brown 			if (ret != 0)
9676ad45a27SMark Brown 				return ret;
96899adef31SMark Brown 		}
96999adef31SMark Brown 
97099adef31SMark Brown 		if (xfer->rx_buf != NULL) {
9718caab75fSGeert Uytterhoeven 			ret = spi_map_buf(ctlr, rx_dev, &xfer->rx_sg,
97299adef31SMark Brown 					  xfer->rx_buf, xfer->len,
97399adef31SMark Brown 					  DMA_FROM_DEVICE);
9746ad45a27SMark Brown 			if (ret != 0) {
9758caab75fSGeert Uytterhoeven 				spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg,
9766ad45a27SMark Brown 					      DMA_TO_DEVICE);
9776ad45a27SMark Brown 				return ret;
97899adef31SMark Brown 			}
97999adef31SMark Brown 		}
98099adef31SMark Brown 	}
98199adef31SMark Brown 
9828caab75fSGeert Uytterhoeven 	ctlr->cur_msg_mapped = true;
98399adef31SMark Brown 
98499adef31SMark Brown 	return 0;
98599adef31SMark Brown }
98699adef31SMark Brown 
9878caab75fSGeert Uytterhoeven static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
98899adef31SMark Brown {
98999adef31SMark Brown 	struct spi_transfer *xfer;
99099adef31SMark Brown 	struct device *tx_dev, *rx_dev;
99199adef31SMark Brown 
9928caab75fSGeert Uytterhoeven 	if (!ctlr->cur_msg_mapped || !ctlr->can_dma)
99399adef31SMark Brown 		return 0;
99499adef31SMark Brown 
9958caab75fSGeert Uytterhoeven 	if (ctlr->dma_tx)
9968caab75fSGeert Uytterhoeven 		tx_dev = ctlr->dma_tx->device->dev;
997c37f45b5SLeilk Liu 	else
9988caab75fSGeert Uytterhoeven 		tx_dev = ctlr->dev.parent;
999c37f45b5SLeilk Liu 
10008caab75fSGeert Uytterhoeven 	if (ctlr->dma_rx)
10018caab75fSGeert Uytterhoeven 		rx_dev = ctlr->dma_rx->device->dev;
1002c37f45b5SLeilk Liu 	else
10038caab75fSGeert Uytterhoeven 		rx_dev = ctlr->dev.parent;
100499adef31SMark Brown 
100599adef31SMark Brown 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
10068caab75fSGeert Uytterhoeven 		if (!ctlr->can_dma(ctlr, msg->spi, xfer))
100799adef31SMark Brown 			continue;
100899adef31SMark Brown 
10098caab75fSGeert Uytterhoeven 		spi_unmap_buf(ctlr, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
10108caab75fSGeert Uytterhoeven 		spi_unmap_buf(ctlr, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
101199adef31SMark Brown 	}
101299adef31SMark Brown 
1013809b1b04SRobin Gong 	ctlr->cur_msg_mapped = false;
1014809b1b04SRobin Gong 
101599adef31SMark Brown 	return 0;
101699adef31SMark Brown }
10172de440f5SGeert Uytterhoeven #else /* !CONFIG_HAS_DMA */
10188caab75fSGeert Uytterhoeven static inline int __spi_map_msg(struct spi_controller *ctlr,
10192de440f5SGeert Uytterhoeven 				struct spi_message *msg)
10202de440f5SGeert Uytterhoeven {
10212de440f5SGeert Uytterhoeven 	return 0;
10222de440f5SGeert Uytterhoeven }
10232de440f5SGeert Uytterhoeven 
10248caab75fSGeert Uytterhoeven static inline int __spi_unmap_msg(struct spi_controller *ctlr,
10252de440f5SGeert Uytterhoeven 				  struct spi_message *msg)
10262de440f5SGeert Uytterhoeven {
10272de440f5SGeert Uytterhoeven 	return 0;
10282de440f5SGeert Uytterhoeven }
10292de440f5SGeert Uytterhoeven #endif /* !CONFIG_HAS_DMA */
10302de440f5SGeert Uytterhoeven 
10318caab75fSGeert Uytterhoeven static inline int spi_unmap_msg(struct spi_controller *ctlr,
10324b786458SMartin Sperl 				struct spi_message *msg)
10334b786458SMartin Sperl {
10344b786458SMartin Sperl 	struct spi_transfer *xfer;
10354b786458SMartin Sperl 
10364b786458SMartin Sperl 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
10374b786458SMartin Sperl 		/*
10384b786458SMartin Sperl 		 * Restore the original value of tx_buf or rx_buf if they are
10394b786458SMartin Sperl 		 * NULL.
10404b786458SMartin Sperl 		 */
10418caab75fSGeert Uytterhoeven 		if (xfer->tx_buf == ctlr->dummy_tx)
10424b786458SMartin Sperl 			xfer->tx_buf = NULL;
10438caab75fSGeert Uytterhoeven 		if (xfer->rx_buf == ctlr->dummy_rx)
10444b786458SMartin Sperl 			xfer->rx_buf = NULL;
10454b786458SMartin Sperl 	}
10464b786458SMartin Sperl 
10478caab75fSGeert Uytterhoeven 	return __spi_unmap_msg(ctlr, msg);
10484b786458SMartin Sperl }
10494b786458SMartin Sperl 
10508caab75fSGeert Uytterhoeven static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
10512de440f5SGeert Uytterhoeven {
10522de440f5SGeert Uytterhoeven 	struct spi_transfer *xfer;
10532de440f5SGeert Uytterhoeven 	void *tmp;
10542de440f5SGeert Uytterhoeven 	unsigned int max_tx, max_rx;
10552de440f5SGeert Uytterhoeven 
1056aee67fe8Sdillon min 	if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1057aee67fe8Sdillon min 		&& !(msg->spi->mode & SPI_3WIRE)) {
10582de440f5SGeert Uytterhoeven 		max_tx = 0;
10592de440f5SGeert Uytterhoeven 		max_rx = 0;
10602de440f5SGeert Uytterhoeven 
10612de440f5SGeert Uytterhoeven 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
10628caab75fSGeert Uytterhoeven 			if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
10632de440f5SGeert Uytterhoeven 			    !xfer->tx_buf)
10642de440f5SGeert Uytterhoeven 				max_tx = max(xfer->len, max_tx);
10658caab75fSGeert Uytterhoeven 			if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
10662de440f5SGeert Uytterhoeven 			    !xfer->rx_buf)
10672de440f5SGeert Uytterhoeven 				max_rx = max(xfer->len, max_rx);
10682de440f5SGeert Uytterhoeven 		}
10692de440f5SGeert Uytterhoeven 
10702de440f5SGeert Uytterhoeven 		if (max_tx) {
10718caab75fSGeert Uytterhoeven 			tmp = krealloc(ctlr->dummy_tx, max_tx,
10722de440f5SGeert Uytterhoeven 				       GFP_KERNEL | GFP_DMA);
10732de440f5SGeert Uytterhoeven 			if (!tmp)
10742de440f5SGeert Uytterhoeven 				return -ENOMEM;
10758caab75fSGeert Uytterhoeven 			ctlr->dummy_tx = tmp;
10762de440f5SGeert Uytterhoeven 			memset(tmp, 0, max_tx);
10772de440f5SGeert Uytterhoeven 		}
10782de440f5SGeert Uytterhoeven 
10792de440f5SGeert Uytterhoeven 		if (max_rx) {
10808caab75fSGeert Uytterhoeven 			tmp = krealloc(ctlr->dummy_rx, max_rx,
10812de440f5SGeert Uytterhoeven 				       GFP_KERNEL | GFP_DMA);
10822de440f5SGeert Uytterhoeven 			if (!tmp)
10832de440f5SGeert Uytterhoeven 				return -ENOMEM;
10848caab75fSGeert Uytterhoeven 			ctlr->dummy_rx = tmp;
10852de440f5SGeert Uytterhoeven 		}
10862de440f5SGeert Uytterhoeven 
10872de440f5SGeert Uytterhoeven 		if (max_tx || max_rx) {
10882de440f5SGeert Uytterhoeven 			list_for_each_entry(xfer, &msg->transfers,
10892de440f5SGeert Uytterhoeven 					    transfer_list) {
10905442dcaaSChris Lesiak 				if (!xfer->len)
10915442dcaaSChris Lesiak 					continue;
10922de440f5SGeert Uytterhoeven 				if (!xfer->tx_buf)
10938caab75fSGeert Uytterhoeven 					xfer->tx_buf = ctlr->dummy_tx;
10942de440f5SGeert Uytterhoeven 				if (!xfer->rx_buf)
10958caab75fSGeert Uytterhoeven 					xfer->rx_buf = ctlr->dummy_rx;
10962de440f5SGeert Uytterhoeven 			}
10972de440f5SGeert Uytterhoeven 		}
10982de440f5SGeert Uytterhoeven 	}
10992de440f5SGeert Uytterhoeven 
11008caab75fSGeert Uytterhoeven 	return __spi_map_msg(ctlr, msg);
11012de440f5SGeert Uytterhoeven }
110299adef31SMark Brown 
1103810923f3SLubomir Rintel static int spi_transfer_wait(struct spi_controller *ctlr,
1104810923f3SLubomir Rintel 			     struct spi_message *msg,
1105810923f3SLubomir Rintel 			     struct spi_transfer *xfer)
1106810923f3SLubomir Rintel {
1107810923f3SLubomir Rintel 	struct spi_statistics *statm = &ctlr->statistics;
1108810923f3SLubomir Rintel 	struct spi_statistics *stats = &msg->spi->statistics;
11096170d077SXu Yilun 	u32 speed_hz = xfer->speed_hz;
111049686df5SColin Ian King 	unsigned long long ms;
1111810923f3SLubomir Rintel 
1112810923f3SLubomir Rintel 	if (spi_controller_is_slave(ctlr)) {
1113810923f3SLubomir Rintel 		if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1114810923f3SLubomir Rintel 			dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1115810923f3SLubomir Rintel 			return -EINTR;
1116810923f3SLubomir Rintel 		}
1117810923f3SLubomir Rintel 	} else {
11186170d077SXu Yilun 		if (!speed_hz)
11196170d077SXu Yilun 			speed_hz = 100000;
11206170d077SXu Yilun 
1121810923f3SLubomir Rintel 		ms = 8LL * 1000LL * xfer->len;
11226170d077SXu Yilun 		do_div(ms, speed_hz);
1123810923f3SLubomir Rintel 		ms += ms + 200; /* some tolerance */
1124810923f3SLubomir Rintel 
1125810923f3SLubomir Rintel 		if (ms > UINT_MAX)
1126810923f3SLubomir Rintel 			ms = UINT_MAX;
1127810923f3SLubomir Rintel 
1128810923f3SLubomir Rintel 		ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1129810923f3SLubomir Rintel 						 msecs_to_jiffies(ms));
1130810923f3SLubomir Rintel 
1131810923f3SLubomir Rintel 		if (ms == 0) {
1132810923f3SLubomir Rintel 			SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1133810923f3SLubomir Rintel 			SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1134810923f3SLubomir Rintel 			dev_err(&msg->spi->dev,
1135810923f3SLubomir Rintel 				"SPI transfer timed out\n");
1136810923f3SLubomir Rintel 			return -ETIMEDOUT;
1137810923f3SLubomir Rintel 		}
1138810923f3SLubomir Rintel 	}
1139810923f3SLubomir Rintel 
1140810923f3SLubomir Rintel 	return 0;
1141810923f3SLubomir Rintel }
1142810923f3SLubomir Rintel 
11430ff2de8bSMartin Sperl static void _spi_transfer_delay_ns(u32 ns)
11440ff2de8bSMartin Sperl {
11450ff2de8bSMartin Sperl 	if (!ns)
11460ff2de8bSMartin Sperl 		return;
11470ff2de8bSMartin Sperl 	if (ns <= 1000) {
11480ff2de8bSMartin Sperl 		ndelay(ns);
11490ff2de8bSMartin Sperl 	} else {
11500ff2de8bSMartin Sperl 		u32 us = DIV_ROUND_UP(ns, 1000);
11510ff2de8bSMartin Sperl 
11520ff2de8bSMartin Sperl 		if (us <= 10)
11530ff2de8bSMartin Sperl 			udelay(us);
11540ff2de8bSMartin Sperl 		else
11550ff2de8bSMartin Sperl 			usleep_range(us, us + DIV_ROUND_UP(us, 10));
11560ff2de8bSMartin Sperl 	}
11570ff2de8bSMartin Sperl }
11580ff2de8bSMartin Sperl 
11593984d39bSAlexandru Ardelean int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
11600ff2de8bSMartin Sperl {
1161b2c98153SAlexandru Ardelean 	u32 delay = _delay->value;
1162b2c98153SAlexandru Ardelean 	u32 unit = _delay->unit;
1163d5864e5bSMartin Sperl 	u32 hz;
11640ff2de8bSMartin Sperl 
1165b2c98153SAlexandru Ardelean 	if (!delay)
1166b2c98153SAlexandru Ardelean 		return 0;
11670ff2de8bSMartin Sperl 
11680ff2de8bSMartin Sperl 	switch (unit) {
11690ff2de8bSMartin Sperl 	case SPI_DELAY_UNIT_USECS:
11700ff2de8bSMartin Sperl 		delay *= 1000;
11710ff2de8bSMartin Sperl 		break;
11720ff2de8bSMartin Sperl 	case SPI_DELAY_UNIT_NSECS: /* nothing to do here */
11730ff2de8bSMartin Sperl 		break;
1174d5864e5bSMartin Sperl 	case SPI_DELAY_UNIT_SCK:
1175b2c98153SAlexandru Ardelean 		/* clock cycles need to be obtained from spi_transfer */
1176b2c98153SAlexandru Ardelean 		if (!xfer)
1177b2c98153SAlexandru Ardelean 			return -EINVAL;
1178d5864e5bSMartin Sperl 		/* if there is no effective speed know, then approximate
1179d5864e5bSMartin Sperl 		 * by underestimating with half the requested hz
1180d5864e5bSMartin Sperl 		 */
1181d5864e5bSMartin Sperl 		hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1182b2c98153SAlexandru Ardelean 		if (!hz)
1183b2c98153SAlexandru Ardelean 			return -EINVAL;
1184d5864e5bSMartin Sperl 		delay *= DIV_ROUND_UP(1000000000, hz);
1185d5864e5bSMartin Sperl 		break;
11860ff2de8bSMartin Sperl 	default:
1187b2c98153SAlexandru Ardelean 		return -EINVAL;
1188b2c98153SAlexandru Ardelean 	}
1189b2c98153SAlexandru Ardelean 
1190b2c98153SAlexandru Ardelean 	return delay;
1191b2c98153SAlexandru Ardelean }
11923984d39bSAlexandru Ardelean EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1193b2c98153SAlexandru Ardelean 
1194b2c98153SAlexandru Ardelean int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1195b2c98153SAlexandru Ardelean {
1196b2c98153SAlexandru Ardelean 	int delay;
1197b2c98153SAlexandru Ardelean 
11988fede89fSMark Brown 	might_sleep();
11998fede89fSMark Brown 
1200b2c98153SAlexandru Ardelean 	if (!_delay)
1201b2c98153SAlexandru Ardelean 		return -EINVAL;
1202b2c98153SAlexandru Ardelean 
12033984d39bSAlexandru Ardelean 	delay = spi_delay_to_ns(_delay, xfer);
1204b2c98153SAlexandru Ardelean 	if (delay < 0)
1205b2c98153SAlexandru Ardelean 		return delay;
1206b2c98153SAlexandru Ardelean 
1207b2c98153SAlexandru Ardelean 	_spi_transfer_delay_ns(delay);
1208b2c98153SAlexandru Ardelean 
1209b2c98153SAlexandru Ardelean 	return 0;
1210b2c98153SAlexandru Ardelean }
1211b2c98153SAlexandru Ardelean EXPORT_SYMBOL_GPL(spi_delay_exec);
1212b2c98153SAlexandru Ardelean 
12130ff2de8bSMartin Sperl static void _spi_transfer_cs_change_delay(struct spi_message *msg,
12140ff2de8bSMartin Sperl 					  struct spi_transfer *xfer)
12150ff2de8bSMartin Sperl {
1216329f0dacSAlexandru Ardelean 	u32 delay = xfer->cs_change_delay.value;
1217329f0dacSAlexandru Ardelean 	u32 unit = xfer->cs_change_delay.unit;
1218329f0dacSAlexandru Ardelean 	int ret;
12190ff2de8bSMartin Sperl 
12200ff2de8bSMartin Sperl 	/* return early on "fast" mode - for everything but USECS */
12216b3f236aSAlexandru Ardelean 	if (!delay) {
12226b3f236aSAlexandru Ardelean 		if (unit == SPI_DELAY_UNIT_USECS)
12236b3f236aSAlexandru Ardelean 			_spi_transfer_delay_ns(10000);
12240ff2de8bSMartin Sperl 		return;
12256b3f236aSAlexandru Ardelean 	}
12260ff2de8bSMartin Sperl 
1227329f0dacSAlexandru Ardelean 	ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1228329f0dacSAlexandru Ardelean 	if (ret) {
12290ff2de8bSMartin Sperl 		dev_err_once(&msg->spi->dev,
12300ff2de8bSMartin Sperl 			     "Use of unsupported delay unit %i, using default of 10us\n",
1231329f0dacSAlexandru Ardelean 			     unit);
1232329f0dacSAlexandru Ardelean 		_spi_transfer_delay_ns(10000);
12330ff2de8bSMartin Sperl 	}
12340ff2de8bSMartin Sperl }
12350ff2de8bSMartin Sperl 
1236b158935fSMark Brown /*
1237b158935fSMark Brown  * spi_transfer_one_message - Default implementation of transfer_one_message()
1238b158935fSMark Brown  *
1239b158935fSMark Brown  * This is a standard implementation of transfer_one_message() for
12408ba811a7SMoritz Fischer  * drivers which implement a transfer_one() operation.  It provides
1241b158935fSMark Brown  * standard handling of delays and chip select management.
1242b158935fSMark Brown  */
12438caab75fSGeert Uytterhoeven static int spi_transfer_one_message(struct spi_controller *ctlr,
1244b158935fSMark Brown 				    struct spi_message *msg)
1245b158935fSMark Brown {
1246b158935fSMark Brown 	struct spi_transfer *xfer;
1247b158935fSMark Brown 	bool keep_cs = false;
1248b158935fSMark Brown 	int ret = 0;
12498caab75fSGeert Uytterhoeven 	struct spi_statistics *statm = &ctlr->statistics;
1250eca2ebc7SMartin Sperl 	struct spi_statistics *stats = &msg->spi->statistics;
1251b158935fSMark Brown 
1252d347b4aaSDavid Bauer 	spi_set_cs(msg->spi, true, false);
1253b158935fSMark Brown 
1254eca2ebc7SMartin Sperl 	SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1255eca2ebc7SMartin Sperl 	SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1256eca2ebc7SMartin Sperl 
1257b158935fSMark Brown 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1258b158935fSMark Brown 		trace_spi_transfer_start(msg, xfer);
1259b158935fSMark Brown 
12608caab75fSGeert Uytterhoeven 		spi_statistics_add_transfer_stats(statm, xfer, ctlr);
12618caab75fSGeert Uytterhoeven 		spi_statistics_add_transfer_stats(stats, xfer, ctlr);
1262eca2ebc7SMartin Sperl 
1263b42faeeeSVladimir Oltean 		if (!ctlr->ptp_sts_supported) {
1264b42faeeeSVladimir Oltean 			xfer->ptp_sts_word_pre = 0;
1265b42faeeeSVladimir Oltean 			ptp_read_system_prets(xfer->ptp_sts);
1266b42faeeeSVladimir Oltean 		}
1267b42faeeeSVladimir Oltean 
1268b3063203SNicolas Saenz Julienne 		if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
12698caab75fSGeert Uytterhoeven 			reinit_completion(&ctlr->xfer_completion);
1270b158935fSMark Brown 
1271809b1b04SRobin Gong fallback_pio:
12728caab75fSGeert Uytterhoeven 			ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1273b158935fSMark Brown 			if (ret < 0) {
1274809b1b04SRobin Gong 				if (ctlr->cur_msg_mapped &&
1275809b1b04SRobin Gong 				   (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1276809b1b04SRobin Gong 					__spi_unmap_msg(ctlr, msg);
1277809b1b04SRobin Gong 					ctlr->fallback = true;
1278809b1b04SRobin Gong 					xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1279809b1b04SRobin Gong 					goto fallback_pio;
1280809b1b04SRobin Gong 				}
1281809b1b04SRobin Gong 
1282eca2ebc7SMartin Sperl 				SPI_STATISTICS_INCREMENT_FIELD(statm,
1283eca2ebc7SMartin Sperl 							       errors);
1284eca2ebc7SMartin Sperl 				SPI_STATISTICS_INCREMENT_FIELD(stats,
1285eca2ebc7SMartin Sperl 							       errors);
1286b158935fSMark Brown 				dev_err(&msg->spi->dev,
1287b158935fSMark Brown 					"SPI transfer failed: %d\n", ret);
1288b158935fSMark Brown 				goto out;
1289b158935fSMark Brown 			}
1290b158935fSMark Brown 
1291d57e7960SMark Brown 			if (ret > 0) {
1292810923f3SLubomir Rintel 				ret = spi_transfer_wait(ctlr, msg, xfer);
1293810923f3SLubomir Rintel 				if (ret < 0)
1294810923f3SLubomir Rintel 					msg->status = ret;
1295d57e7960SMark Brown 			}
129638ec10f6SMark Brown 		} else {
129738ec10f6SMark Brown 			if (xfer->len)
129838ec10f6SMark Brown 				dev_err(&msg->spi->dev,
129938ec10f6SMark Brown 					"Bufferless transfer has length %u\n",
130038ec10f6SMark Brown 					xfer->len);
130138ec10f6SMark Brown 		}
1302b158935fSMark Brown 
1303b42faeeeSVladimir Oltean 		if (!ctlr->ptp_sts_supported) {
1304b42faeeeSVladimir Oltean 			ptp_read_system_postts(xfer->ptp_sts);
1305b42faeeeSVladimir Oltean 			xfer->ptp_sts_word_post = xfer->len;
1306b42faeeeSVladimir Oltean 		}
1307b42faeeeSVladimir Oltean 
1308b158935fSMark Brown 		trace_spi_transfer_stop(msg, xfer);
1309b158935fSMark Brown 
1310b158935fSMark Brown 		if (msg->status != -EINPROGRESS)
1311b158935fSMark Brown 			goto out;
1312b158935fSMark Brown 
1313bebcfd27SAlexandru Ardelean 		spi_transfer_delay_exec(xfer);
1314b158935fSMark Brown 
1315b158935fSMark Brown 		if (xfer->cs_change) {
1316b158935fSMark Brown 			if (list_is_last(&xfer->transfer_list,
1317b158935fSMark Brown 					 &msg->transfers)) {
1318b158935fSMark Brown 				keep_cs = true;
1319b158935fSMark Brown 			} else {
1320d347b4aaSDavid Bauer 				spi_set_cs(msg->spi, false, false);
13210ff2de8bSMartin Sperl 				_spi_transfer_cs_change_delay(msg, xfer);
1322d347b4aaSDavid Bauer 				spi_set_cs(msg->spi, true, false);
1323b158935fSMark Brown 			}
1324b158935fSMark Brown 		}
1325b158935fSMark Brown 
1326b158935fSMark Brown 		msg->actual_length += xfer->len;
1327b158935fSMark Brown 	}
1328b158935fSMark Brown 
1329b158935fSMark Brown out:
1330b158935fSMark Brown 	if (ret != 0 || !keep_cs)
1331d347b4aaSDavid Bauer 		spi_set_cs(msg->spi, false, false);
1332b158935fSMark Brown 
1333b158935fSMark Brown 	if (msg->status == -EINPROGRESS)
1334b158935fSMark Brown 		msg->status = ret;
1335b158935fSMark Brown 
13368caab75fSGeert Uytterhoeven 	if (msg->status && ctlr->handle_err)
13378caab75fSGeert Uytterhoeven 		ctlr->handle_err(ctlr, msg);
1338b716c4ffSAndy Shevchenko 
13390ed56252SMark Brown 	spi_finalize_current_message(ctlr);
13400ed56252SMark Brown 
1341b158935fSMark Brown 	return ret;
1342b158935fSMark Brown }
1343b158935fSMark Brown 
1344b158935fSMark Brown /**
1345b158935fSMark Brown  * spi_finalize_current_transfer - report completion of a transfer
13468caab75fSGeert Uytterhoeven  * @ctlr: the controller reporting completion
1347b158935fSMark Brown  *
1348b158935fSMark Brown  * Called by SPI drivers using the core transfer_one_message()
1349b158935fSMark Brown  * implementation to notify it that the current interrupt driven
13509e8f4882SGeert Uytterhoeven  * transfer has finished and the next one may be scheduled.
1351b158935fSMark Brown  */
13528caab75fSGeert Uytterhoeven void spi_finalize_current_transfer(struct spi_controller *ctlr)
1353b158935fSMark Brown {
13548caab75fSGeert Uytterhoeven 	complete(&ctlr->xfer_completion);
1355b158935fSMark Brown }
1356b158935fSMark Brown EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1357b158935fSMark Brown 
1358e1268597SMark Brown static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1359e1268597SMark Brown {
1360e1268597SMark Brown 	if (ctlr->auto_runtime_pm) {
1361e1268597SMark Brown 		pm_runtime_mark_last_busy(ctlr->dev.parent);
1362e1268597SMark Brown 		pm_runtime_put_autosuspend(ctlr->dev.parent);
1363e1268597SMark Brown 	}
1364e1268597SMark Brown }
1365e1268597SMark Brown 
1366ffbbdd21SLinus Walleij /**
1367fc9e0f71SMark Brown  * __spi_pump_messages - function which processes spi message queue
13688caab75fSGeert Uytterhoeven  * @ctlr: controller to process queue for
1369fc9e0f71SMark Brown  * @in_kthread: true if we are in the context of the message pump thread
1370ffbbdd21SLinus Walleij  *
1371ffbbdd21SLinus Walleij  * This function checks if there is any spi message in the queue that
1372ffbbdd21SLinus Walleij  * needs processing and if so call out to the driver to initialize hardware
1373ffbbdd21SLinus Walleij  * and transfer each message.
1374ffbbdd21SLinus Walleij  *
13750461a414SMark Brown  * Note that it is called both from the kthread itself and also from
13760461a414SMark Brown  * inside spi_sync(); the queue extraction handling at the top of the
13770461a414SMark Brown  * function should deal with this safely.
1378ffbbdd21SLinus Walleij  */
13798caab75fSGeert Uytterhoeven static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1380ffbbdd21SLinus Walleij {
1381b42faeeeSVladimir Oltean 	struct spi_transfer *xfer;
1382d1c44c93SVladimir Oltean 	struct spi_message *msg;
1383ffbbdd21SLinus Walleij 	bool was_busy = false;
1384d1c44c93SVladimir Oltean 	unsigned long flags;
1385ffbbdd21SLinus Walleij 	int ret;
1386ffbbdd21SLinus Walleij 
1387983aee5dSMark Brown 	/* Lock queue */
13888caab75fSGeert Uytterhoeven 	spin_lock_irqsave(&ctlr->queue_lock, flags);
1389983aee5dSMark Brown 
1390983aee5dSMark Brown 	/* Make sure we are not already running a message */
13918caab75fSGeert Uytterhoeven 	if (ctlr->cur_msg) {
13928caab75fSGeert Uytterhoeven 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1393983aee5dSMark Brown 		return;
1394983aee5dSMark Brown 	}
1395983aee5dSMark Brown 
1396f0125f1aSMark Brown 	/* If another context is idling the device then defer */
13978caab75fSGeert Uytterhoeven 	if (ctlr->idling) {
139860a883d1SMarek Szyprowski 		kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
13998caab75fSGeert Uytterhoeven 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
14000461a414SMark Brown 		return;
14010461a414SMark Brown 	}
14020461a414SMark Brown 
1403983aee5dSMark Brown 	/* Check if the queue is idle */
14048caab75fSGeert Uytterhoeven 	if (list_empty(&ctlr->queue) || !ctlr->running) {
14058caab75fSGeert Uytterhoeven 		if (!ctlr->busy) {
14068caab75fSGeert Uytterhoeven 			spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1407ffbbdd21SLinus Walleij 			return;
1408ffbbdd21SLinus Walleij 		}
1409fc9e0f71SMark Brown 
1410e1268597SMark Brown 		/* Defer any non-atomic teardown to the thread */
1411f0125f1aSMark Brown 		if (!in_kthread) {
1412e1268597SMark Brown 			if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1413e1268597SMark Brown 			    !ctlr->unprepare_transfer_hardware) {
1414e1268597SMark Brown 				spi_idle_runtime_pm(ctlr);
1415e1268597SMark Brown 				ctlr->busy = false;
1416e1268597SMark Brown 				trace_spi_controller_idle(ctlr);
1417e1268597SMark Brown 			} else {
141860a883d1SMarek Szyprowski 				kthread_queue_work(ctlr->kworker,
1419f0125f1aSMark Brown 						   &ctlr->pump_messages);
1420e1268597SMark Brown 			}
1421f0125f1aSMark Brown 			spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1422f0125f1aSMark Brown 			return;
1423f0125f1aSMark Brown 		}
1424f0125f1aSMark Brown 
1425f0125f1aSMark Brown 		ctlr->busy = false;
1426f0125f1aSMark Brown 		ctlr->idling = true;
1427f0125f1aSMark Brown 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1428f0125f1aSMark Brown 
1429f0125f1aSMark Brown 		kfree(ctlr->dummy_rx);
1430f0125f1aSMark Brown 		ctlr->dummy_rx = NULL;
1431f0125f1aSMark Brown 		kfree(ctlr->dummy_tx);
1432f0125f1aSMark Brown 		ctlr->dummy_tx = NULL;
1433f0125f1aSMark Brown 		if (ctlr->unprepare_transfer_hardware &&
1434f0125f1aSMark Brown 		    ctlr->unprepare_transfer_hardware(ctlr))
1435f0125f1aSMark Brown 			dev_err(&ctlr->dev,
1436f0125f1aSMark Brown 				"failed to unprepare transfer hardware\n");
1437e1268597SMark Brown 		spi_idle_runtime_pm(ctlr);
1438f0125f1aSMark Brown 		trace_spi_controller_idle(ctlr);
1439f0125f1aSMark Brown 
1440f0125f1aSMark Brown 		spin_lock_irqsave(&ctlr->queue_lock, flags);
1441f0125f1aSMark Brown 		ctlr->idling = false;
14428caab75fSGeert Uytterhoeven 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1443ffbbdd21SLinus Walleij 		return;
1444ffbbdd21SLinus Walleij 	}
1445ffbbdd21SLinus Walleij 
1446ffbbdd21SLinus Walleij 	/* Extract head of queue */
1447d1c44c93SVladimir Oltean 	msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1448d1c44c93SVladimir Oltean 	ctlr->cur_msg = msg;
1449ffbbdd21SLinus Walleij 
1450d1c44c93SVladimir Oltean 	list_del_init(&msg->queue);
14518caab75fSGeert Uytterhoeven 	if (ctlr->busy)
1452ffbbdd21SLinus Walleij 		was_busy = true;
1453ffbbdd21SLinus Walleij 	else
14548caab75fSGeert Uytterhoeven 		ctlr->busy = true;
14558caab75fSGeert Uytterhoeven 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1456ffbbdd21SLinus Walleij 
14578caab75fSGeert Uytterhoeven 	mutex_lock(&ctlr->io_mutex);
1458ef4d96ecSMark Brown 
14598caab75fSGeert Uytterhoeven 	if (!was_busy && ctlr->auto_runtime_pm) {
14608caab75fSGeert Uytterhoeven 		ret = pm_runtime_get_sync(ctlr->dev.parent);
146149834de2SMark Brown 		if (ret < 0) {
14627e48e23aSTony Lindgren 			pm_runtime_put_noidle(ctlr->dev.parent);
14638caab75fSGeert Uytterhoeven 			dev_err(&ctlr->dev, "Failed to power device: %d\n",
146449834de2SMark Brown 				ret);
14658caab75fSGeert Uytterhoeven 			mutex_unlock(&ctlr->io_mutex);
146649834de2SMark Brown 			return;
146749834de2SMark Brown 		}
146849834de2SMark Brown 	}
146949834de2SMark Brown 
147056ec1978SMark Brown 	if (!was_busy)
14718caab75fSGeert Uytterhoeven 		trace_spi_controller_busy(ctlr);
147256ec1978SMark Brown 
14738caab75fSGeert Uytterhoeven 	if (!was_busy && ctlr->prepare_transfer_hardware) {
14748caab75fSGeert Uytterhoeven 		ret = ctlr->prepare_transfer_hardware(ctlr);
1475ffbbdd21SLinus Walleij 		if (ret) {
14768caab75fSGeert Uytterhoeven 			dev_err(&ctlr->dev,
1477f3440d9aSSuper Liu 				"failed to prepare transfer hardware: %d\n",
1478f3440d9aSSuper Liu 				ret);
147949834de2SMark Brown 
14808caab75fSGeert Uytterhoeven 			if (ctlr->auto_runtime_pm)
14818caab75fSGeert Uytterhoeven 				pm_runtime_put(ctlr->dev.parent);
1482f3440d9aSSuper Liu 
1483d1c44c93SVladimir Oltean 			msg->status = ret;
1484f3440d9aSSuper Liu 			spi_finalize_current_message(ctlr);
1485f3440d9aSSuper Liu 
14868caab75fSGeert Uytterhoeven 			mutex_unlock(&ctlr->io_mutex);
1487ffbbdd21SLinus Walleij 			return;
1488ffbbdd21SLinus Walleij 		}
1489ffbbdd21SLinus Walleij 	}
1490ffbbdd21SLinus Walleij 
1491d1c44c93SVladimir Oltean 	trace_spi_message_start(msg);
149256ec1978SMark Brown 
14938caab75fSGeert Uytterhoeven 	if (ctlr->prepare_message) {
1494d1c44c93SVladimir Oltean 		ret = ctlr->prepare_message(ctlr, msg);
14952841a5fcSMark Brown 		if (ret) {
14968caab75fSGeert Uytterhoeven 			dev_err(&ctlr->dev, "failed to prepare message: %d\n",
14978caab75fSGeert Uytterhoeven 				ret);
1498d1c44c93SVladimir Oltean 			msg->status = ret;
14998caab75fSGeert Uytterhoeven 			spi_finalize_current_message(ctlr);
150049023d2eSJon Hunter 			goto out;
15012841a5fcSMark Brown 		}
15028caab75fSGeert Uytterhoeven 		ctlr->cur_msg_prepared = true;
15032841a5fcSMark Brown 	}
15042841a5fcSMark Brown 
1505d1c44c93SVladimir Oltean 	ret = spi_map_msg(ctlr, msg);
150699adef31SMark Brown 	if (ret) {
1507d1c44c93SVladimir Oltean 		msg->status = ret;
15088caab75fSGeert Uytterhoeven 		spi_finalize_current_message(ctlr);
150949023d2eSJon Hunter 		goto out;
151099adef31SMark Brown 	}
151199adef31SMark Brown 
1512b42faeeeSVladimir Oltean 	if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1513b42faeeeSVladimir Oltean 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1514b42faeeeSVladimir Oltean 			xfer->ptp_sts_word_pre = 0;
1515b42faeeeSVladimir Oltean 			ptp_read_system_prets(xfer->ptp_sts);
1516b42faeeeSVladimir Oltean 		}
1517b42faeeeSVladimir Oltean 	}
1518b42faeeeSVladimir Oltean 
1519d1c44c93SVladimir Oltean 	ret = ctlr->transfer_one_message(ctlr, msg);
1520ffbbdd21SLinus Walleij 	if (ret) {
15218caab75fSGeert Uytterhoeven 		dev_err(&ctlr->dev,
15221f802f82SGeert Uytterhoeven 			"failed to transfer one message from queue\n");
152349023d2eSJon Hunter 		goto out;
1524ffbbdd21SLinus Walleij 	}
152549023d2eSJon Hunter 
152649023d2eSJon Hunter out:
15278caab75fSGeert Uytterhoeven 	mutex_unlock(&ctlr->io_mutex);
152862826970SMark Brown 
152962826970SMark Brown 	/* Prod the scheduler in case transfer_one() was busy waiting */
153049023d2eSJon Hunter 	if (!ret)
153162826970SMark Brown 		cond_resched();
1532ffbbdd21SLinus Walleij }
1533ffbbdd21SLinus Walleij 
1534fc9e0f71SMark Brown /**
1535fc9e0f71SMark Brown  * spi_pump_messages - kthread work function which processes spi message queue
15368caab75fSGeert Uytterhoeven  * @work: pointer to kthread work struct contained in the controller struct
1537fc9e0f71SMark Brown  */
1538fc9e0f71SMark Brown static void spi_pump_messages(struct kthread_work *work)
1539fc9e0f71SMark Brown {
15408caab75fSGeert Uytterhoeven 	struct spi_controller *ctlr =
15418caab75fSGeert Uytterhoeven 		container_of(work, struct spi_controller, pump_messages);
1542fc9e0f71SMark Brown 
15438caab75fSGeert Uytterhoeven 	__spi_pump_messages(ctlr, true);
1544fc9e0f71SMark Brown }
1545fc9e0f71SMark Brown 
1546924b5867SDouglas Anderson /**
1547b42faeeeSVladimir Oltean  * spi_take_timestamp_pre - helper for drivers to collect the beginning of the
1548b42faeeeSVladimir Oltean  *			    TX timestamp for the requested byte from the SPI
1549b42faeeeSVladimir Oltean  *			    transfer. The frequency with which this function
1550b42faeeeSVladimir Oltean  *			    must be called (once per word, once for the whole
1551b42faeeeSVladimir Oltean  *			    transfer, once per batch of words etc) is arbitrary
1552b42faeeeSVladimir Oltean  *			    as long as the @tx buffer offset is greater than or
1553b42faeeeSVladimir Oltean  *			    equal to the requested byte at the time of the
1554b42faeeeSVladimir Oltean  *			    call. The timestamp is only taken once, at the
1555b42faeeeSVladimir Oltean  *			    first such call. It is assumed that the driver
1556b42faeeeSVladimir Oltean  *			    advances its @tx buffer pointer monotonically.
1557b42faeeeSVladimir Oltean  * @ctlr: Pointer to the spi_controller structure of the driver
1558b42faeeeSVladimir Oltean  * @xfer: Pointer to the transfer being timestamped
1559862dd2a9SVladimir Oltean  * @progress: How many words (not bytes) have been transferred so far
1560b42faeeeSVladimir Oltean  * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1561b42faeeeSVladimir Oltean  *	      transfer, for less jitter in time measurement. Only compatible
1562b42faeeeSVladimir Oltean  *	      with PIO drivers. If true, must follow up with
1563b42faeeeSVladimir Oltean  *	      spi_take_timestamp_post or otherwise system will crash.
1564b42faeeeSVladimir Oltean  *	      WARNING: for fully predictable results, the CPU frequency must
1565b42faeeeSVladimir Oltean  *	      also be under control (governor).
1566b42faeeeSVladimir Oltean  */
1567b42faeeeSVladimir Oltean void spi_take_timestamp_pre(struct spi_controller *ctlr,
1568b42faeeeSVladimir Oltean 			    struct spi_transfer *xfer,
1569862dd2a9SVladimir Oltean 			    size_t progress, bool irqs_off)
1570b42faeeeSVladimir Oltean {
1571b42faeeeSVladimir Oltean 	if (!xfer->ptp_sts)
1572b42faeeeSVladimir Oltean 		return;
1573b42faeeeSVladimir Oltean 
15746a726824SVladimir Oltean 	if (xfer->timestamped)
1575b42faeeeSVladimir Oltean 		return;
1576b42faeeeSVladimir Oltean 
15776a726824SVladimir Oltean 	if (progress > xfer->ptp_sts_word_pre)
1578b42faeeeSVladimir Oltean 		return;
1579b42faeeeSVladimir Oltean 
1580b42faeeeSVladimir Oltean 	/* Capture the resolution of the timestamp */
1581862dd2a9SVladimir Oltean 	xfer->ptp_sts_word_pre = progress;
1582b42faeeeSVladimir Oltean 
1583b42faeeeSVladimir Oltean 	if (irqs_off) {
1584b42faeeeSVladimir Oltean 		local_irq_save(ctlr->irq_flags);
1585b42faeeeSVladimir Oltean 		preempt_disable();
1586b42faeeeSVladimir Oltean 	}
1587b42faeeeSVladimir Oltean 
1588b42faeeeSVladimir Oltean 	ptp_read_system_prets(xfer->ptp_sts);
1589b42faeeeSVladimir Oltean }
1590b42faeeeSVladimir Oltean EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1591b42faeeeSVladimir Oltean 
1592b42faeeeSVladimir Oltean /**
1593b42faeeeSVladimir Oltean  * spi_take_timestamp_post - helper for drivers to collect the end of the
1594b42faeeeSVladimir Oltean  *			     TX timestamp for the requested byte from the SPI
1595b42faeeeSVladimir Oltean  *			     transfer. Can be called with an arbitrary
1596b42faeeeSVladimir Oltean  *			     frequency: only the first call where @tx exceeds
1597b42faeeeSVladimir Oltean  *			     or is equal to the requested word will be
1598b42faeeeSVladimir Oltean  *			     timestamped.
1599b42faeeeSVladimir Oltean  * @ctlr: Pointer to the spi_controller structure of the driver
1600b42faeeeSVladimir Oltean  * @xfer: Pointer to the transfer being timestamped
1601862dd2a9SVladimir Oltean  * @progress: How many words (not bytes) have been transferred so far
1602b42faeeeSVladimir Oltean  * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1603b42faeeeSVladimir Oltean  */
1604b42faeeeSVladimir Oltean void spi_take_timestamp_post(struct spi_controller *ctlr,
1605b42faeeeSVladimir Oltean 			     struct spi_transfer *xfer,
1606862dd2a9SVladimir Oltean 			     size_t progress, bool irqs_off)
1607b42faeeeSVladimir Oltean {
1608b42faeeeSVladimir Oltean 	if (!xfer->ptp_sts)
1609b42faeeeSVladimir Oltean 		return;
1610b42faeeeSVladimir Oltean 
16116a726824SVladimir Oltean 	if (xfer->timestamped)
1612b42faeeeSVladimir Oltean 		return;
1613b42faeeeSVladimir Oltean 
1614862dd2a9SVladimir Oltean 	if (progress < xfer->ptp_sts_word_post)
1615b42faeeeSVladimir Oltean 		return;
1616b42faeeeSVladimir Oltean 
1617b42faeeeSVladimir Oltean 	ptp_read_system_postts(xfer->ptp_sts);
1618b42faeeeSVladimir Oltean 
1619b42faeeeSVladimir Oltean 	if (irqs_off) {
1620b42faeeeSVladimir Oltean 		local_irq_restore(ctlr->irq_flags);
1621b42faeeeSVladimir Oltean 		preempt_enable();
1622b42faeeeSVladimir Oltean 	}
1623b42faeeeSVladimir Oltean 
1624b42faeeeSVladimir Oltean 	/* Capture the resolution of the timestamp */
1625862dd2a9SVladimir Oltean 	xfer->ptp_sts_word_post = progress;
1626b42faeeeSVladimir Oltean 
16276a726824SVladimir Oltean 	xfer->timestamped = true;
1628b42faeeeSVladimir Oltean }
1629b42faeeeSVladimir Oltean EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
1630b42faeeeSVladimir Oltean 
1631b42faeeeSVladimir Oltean /**
1632924b5867SDouglas Anderson  * spi_set_thread_rt - set the controller to pump at realtime priority
1633924b5867SDouglas Anderson  * @ctlr: controller to boost priority of
1634924b5867SDouglas Anderson  *
1635924b5867SDouglas Anderson  * This can be called because the controller requested realtime priority
1636924b5867SDouglas Anderson  * (by setting the ->rt value before calling spi_register_controller()) or
1637924b5867SDouglas Anderson  * because a device on the bus said that its transfers needed realtime
1638924b5867SDouglas Anderson  * priority.
1639924b5867SDouglas Anderson  *
1640924b5867SDouglas Anderson  * NOTE: at the moment if any device on a bus says it needs realtime then
1641924b5867SDouglas Anderson  * the thread will be at realtime priority for all transfers on that
1642924b5867SDouglas Anderson  * controller.  If this eventually becomes a problem we may see if we can
1643924b5867SDouglas Anderson  * find a way to boost the priority only temporarily during relevant
1644924b5867SDouglas Anderson  * transfers.
1645924b5867SDouglas Anderson  */
1646924b5867SDouglas Anderson static void spi_set_thread_rt(struct spi_controller *ctlr)
1647ffbbdd21SLinus Walleij {
1648924b5867SDouglas Anderson 	dev_info(&ctlr->dev,
1649924b5867SDouglas Anderson 		"will run message pump with realtime priority\n");
16506d2b84a4SLinus Torvalds 	sched_set_fifo(ctlr->kworker->task);
1651924b5867SDouglas Anderson }
1652924b5867SDouglas Anderson 
1653924b5867SDouglas Anderson static int spi_init_queue(struct spi_controller *ctlr)
1654924b5867SDouglas Anderson {
16558caab75fSGeert Uytterhoeven 	ctlr->running = false;
16568caab75fSGeert Uytterhoeven 	ctlr->busy = false;
1657ffbbdd21SLinus Walleij 
165860a883d1SMarek Szyprowski 	ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
165960a883d1SMarek Szyprowski 	if (IS_ERR(ctlr->kworker)) {
166060a883d1SMarek Szyprowski 		dev_err(&ctlr->dev, "failed to create message pump kworker\n");
166160a883d1SMarek Szyprowski 		return PTR_ERR(ctlr->kworker);
1662ffbbdd21SLinus Walleij 	}
166360a883d1SMarek Szyprowski 
16648caab75fSGeert Uytterhoeven 	kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
1665f0125f1aSMark Brown 
1666ffbbdd21SLinus Walleij 	/*
16678caab75fSGeert Uytterhoeven 	 * Controller config will indicate if this controller should run the
1668ffbbdd21SLinus Walleij 	 * message pump with high (realtime) priority to reduce the transfer
1669ffbbdd21SLinus Walleij 	 * latency on the bus by minimising the delay between a transfer
1670ffbbdd21SLinus Walleij 	 * request and the scheduling of the message pump thread. Without this
1671ffbbdd21SLinus Walleij 	 * setting the message pump thread will remain at default priority.
1672ffbbdd21SLinus Walleij 	 */
1673924b5867SDouglas Anderson 	if (ctlr->rt)
1674924b5867SDouglas Anderson 		spi_set_thread_rt(ctlr);
1675ffbbdd21SLinus Walleij 
1676ffbbdd21SLinus Walleij 	return 0;
1677ffbbdd21SLinus Walleij }
1678ffbbdd21SLinus Walleij 
1679ffbbdd21SLinus Walleij /**
1680ffbbdd21SLinus Walleij  * spi_get_next_queued_message() - called by driver to check for queued
1681ffbbdd21SLinus Walleij  * messages
16828caab75fSGeert Uytterhoeven  * @ctlr: the controller to check for queued messages
1683ffbbdd21SLinus Walleij  *
1684ffbbdd21SLinus Walleij  * If there are more messages in the queue, the next message is returned from
1685ffbbdd21SLinus Walleij  * this call.
168697d56dc6SJavier Martinez Canillas  *
168797d56dc6SJavier Martinez Canillas  * Return: the next message in the queue, else NULL if the queue is empty.
1688ffbbdd21SLinus Walleij  */
16898caab75fSGeert Uytterhoeven struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
1690ffbbdd21SLinus Walleij {
1691ffbbdd21SLinus Walleij 	struct spi_message *next;
1692ffbbdd21SLinus Walleij 	unsigned long flags;
1693ffbbdd21SLinus Walleij 
1694ffbbdd21SLinus Walleij 	/* get a pointer to the next message, if any */
16958caab75fSGeert Uytterhoeven 	spin_lock_irqsave(&ctlr->queue_lock, flags);
16968caab75fSGeert Uytterhoeven 	next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
16971cfd97f9SAxel Lin 					queue);
16988caab75fSGeert Uytterhoeven 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1699ffbbdd21SLinus Walleij 
1700ffbbdd21SLinus Walleij 	return next;
1701ffbbdd21SLinus Walleij }
1702ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
1703ffbbdd21SLinus Walleij 
1704ffbbdd21SLinus Walleij /**
1705ffbbdd21SLinus Walleij  * spi_finalize_current_message() - the current message is complete
17068caab75fSGeert Uytterhoeven  * @ctlr: the controller to return the message to
1707ffbbdd21SLinus Walleij  *
1708ffbbdd21SLinus Walleij  * Called by the driver to notify the core that the message in the front of the
1709ffbbdd21SLinus Walleij  * queue is complete and can be removed from the queue.
1710ffbbdd21SLinus Walleij  */
17118caab75fSGeert Uytterhoeven void spi_finalize_current_message(struct spi_controller *ctlr)
1712ffbbdd21SLinus Walleij {
1713b42faeeeSVladimir Oltean 	struct spi_transfer *xfer;
1714ffbbdd21SLinus Walleij 	struct spi_message *mesg;
1715ffbbdd21SLinus Walleij 	unsigned long flags;
17162841a5fcSMark Brown 	int ret;
1717ffbbdd21SLinus Walleij 
17188caab75fSGeert Uytterhoeven 	spin_lock_irqsave(&ctlr->queue_lock, flags);
17198caab75fSGeert Uytterhoeven 	mesg = ctlr->cur_msg;
17208caab75fSGeert Uytterhoeven 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1721ffbbdd21SLinus Walleij 
1722b42faeeeSVladimir Oltean 	if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1723b42faeeeSVladimir Oltean 		list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
1724b42faeeeSVladimir Oltean 			ptp_read_system_postts(xfer->ptp_sts);
1725b42faeeeSVladimir Oltean 			xfer->ptp_sts_word_post = xfer->len;
1726b42faeeeSVladimir Oltean 		}
1727b42faeeeSVladimir Oltean 	}
1728b42faeeeSVladimir Oltean 
17296a726824SVladimir Oltean 	if (unlikely(ctlr->ptp_sts_supported))
17306a726824SVladimir Oltean 		list_for_each_entry(xfer, &mesg->transfers, transfer_list)
17316a726824SVladimir Oltean 			WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
1732f971a207SVladimir Oltean 
17338caab75fSGeert Uytterhoeven 	spi_unmap_msg(ctlr, mesg);
173499adef31SMark Brown 
1735b59a7ca1SGustav Wiklander 	/* In the prepare_messages callback the spi bus has the opportunity to
1736b59a7ca1SGustav Wiklander 	 * split a transfer to smaller chunks.
1737b59a7ca1SGustav Wiklander 	 * Release splited transfers here since spi_map_msg is done on the
1738b59a7ca1SGustav Wiklander 	 * splited transfers.
1739b59a7ca1SGustav Wiklander 	 */
1740b59a7ca1SGustav Wiklander 	spi_res_release(ctlr, mesg);
1741b59a7ca1SGustav Wiklander 
17428caab75fSGeert Uytterhoeven 	if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
17438caab75fSGeert Uytterhoeven 		ret = ctlr->unprepare_message(ctlr, mesg);
17442841a5fcSMark Brown 		if (ret) {
17458caab75fSGeert Uytterhoeven 			dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
17468caab75fSGeert Uytterhoeven 				ret);
17472841a5fcSMark Brown 		}
17482841a5fcSMark Brown 	}
1749391949b6SUwe Kleine-König 
17508caab75fSGeert Uytterhoeven 	spin_lock_irqsave(&ctlr->queue_lock, flags);
17518caab75fSGeert Uytterhoeven 	ctlr->cur_msg = NULL;
17528caab75fSGeert Uytterhoeven 	ctlr->cur_msg_prepared = false;
1753809b1b04SRobin Gong 	ctlr->fallback = false;
175460a883d1SMarek Szyprowski 	kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
17558caab75fSGeert Uytterhoeven 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
17568e76ef88SMartin Sperl 
17578e76ef88SMartin Sperl 	trace_spi_message_done(mesg);
17582841a5fcSMark Brown 
1759ffbbdd21SLinus Walleij 	mesg->state = NULL;
1760ffbbdd21SLinus Walleij 	if (mesg->complete)
1761ffbbdd21SLinus Walleij 		mesg->complete(mesg->context);
1762ffbbdd21SLinus Walleij }
1763ffbbdd21SLinus Walleij EXPORT_SYMBOL_GPL(spi_finalize_current_message);
1764ffbbdd21SLinus Walleij 
17658caab75fSGeert Uytterhoeven static int spi_start_queue(struct spi_controller *ctlr)
1766ffbbdd21SLinus Walleij {
1767ffbbdd21SLinus Walleij 	unsigned long flags;
1768ffbbdd21SLinus Walleij 
17698caab75fSGeert Uytterhoeven 	spin_lock_irqsave(&ctlr->queue_lock, flags);
1770ffbbdd21SLinus Walleij 
17718caab75fSGeert Uytterhoeven 	if (ctlr->running || ctlr->busy) {
17728caab75fSGeert Uytterhoeven 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1773ffbbdd21SLinus Walleij 		return -EBUSY;
1774ffbbdd21SLinus Walleij 	}
1775ffbbdd21SLinus Walleij 
17768caab75fSGeert Uytterhoeven 	ctlr->running = true;
17778caab75fSGeert Uytterhoeven 	ctlr->cur_msg = NULL;
17788caab75fSGeert Uytterhoeven 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1779ffbbdd21SLinus Walleij 
178060a883d1SMarek Szyprowski 	kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1781ffbbdd21SLinus Walleij 
1782ffbbdd21SLinus Walleij 	return 0;
1783ffbbdd21SLinus Walleij }
1784ffbbdd21SLinus Walleij 
17858caab75fSGeert Uytterhoeven static int spi_stop_queue(struct spi_controller *ctlr)
1786ffbbdd21SLinus Walleij {
1787ffbbdd21SLinus Walleij 	unsigned long flags;
1788ffbbdd21SLinus Walleij 	unsigned limit = 500;
1789ffbbdd21SLinus Walleij 	int ret = 0;
1790ffbbdd21SLinus Walleij 
17918caab75fSGeert Uytterhoeven 	spin_lock_irqsave(&ctlr->queue_lock, flags);
1792ffbbdd21SLinus Walleij 
1793ffbbdd21SLinus Walleij 	/*
1794ffbbdd21SLinus Walleij 	 * This is a bit lame, but is optimized for the common execution path.
17958caab75fSGeert Uytterhoeven 	 * A wait_queue on the ctlr->busy could be used, but then the common
1796ffbbdd21SLinus Walleij 	 * execution path (pump_messages) would be required to call wake_up or
1797ffbbdd21SLinus Walleij 	 * friends on every SPI message. Do this instead.
1798ffbbdd21SLinus Walleij 	 */
17998caab75fSGeert Uytterhoeven 	while ((!list_empty(&ctlr->queue) || ctlr->busy) && limit--) {
18008caab75fSGeert Uytterhoeven 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1801f97b26b0SAxel Lin 		usleep_range(10000, 11000);
18028caab75fSGeert Uytterhoeven 		spin_lock_irqsave(&ctlr->queue_lock, flags);
1803ffbbdd21SLinus Walleij 	}
1804ffbbdd21SLinus Walleij 
18058caab75fSGeert Uytterhoeven 	if (!list_empty(&ctlr->queue) || ctlr->busy)
1806ffbbdd21SLinus Walleij 		ret = -EBUSY;
1807ffbbdd21SLinus Walleij 	else
18088caab75fSGeert Uytterhoeven 		ctlr->running = false;
1809ffbbdd21SLinus Walleij 
18108caab75fSGeert Uytterhoeven 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1811ffbbdd21SLinus Walleij 
1812ffbbdd21SLinus Walleij 	if (ret) {
18138caab75fSGeert Uytterhoeven 		dev_warn(&ctlr->dev, "could not stop message queue\n");
1814ffbbdd21SLinus Walleij 		return ret;
1815ffbbdd21SLinus Walleij 	}
1816ffbbdd21SLinus Walleij 	return ret;
1817ffbbdd21SLinus Walleij }
1818ffbbdd21SLinus Walleij 
18198caab75fSGeert Uytterhoeven static int spi_destroy_queue(struct spi_controller *ctlr)
1820ffbbdd21SLinus Walleij {
1821ffbbdd21SLinus Walleij 	int ret;
1822ffbbdd21SLinus Walleij 
18238caab75fSGeert Uytterhoeven 	ret = spi_stop_queue(ctlr);
1824ffbbdd21SLinus Walleij 
1825ffbbdd21SLinus Walleij 	/*
18263989144fSPetr Mladek 	 * kthread_flush_worker will block until all work is done.
1827ffbbdd21SLinus Walleij 	 * If the reason that stop_queue timed out is that the work will never
1828ffbbdd21SLinus Walleij 	 * finish, then it does no good to call flush/stop thread, so
1829ffbbdd21SLinus Walleij 	 * return anyway.
1830ffbbdd21SLinus Walleij 	 */
1831ffbbdd21SLinus Walleij 	if (ret) {
18328caab75fSGeert Uytterhoeven 		dev_err(&ctlr->dev, "problem destroying queue\n");
1833ffbbdd21SLinus Walleij 		return ret;
1834ffbbdd21SLinus Walleij 	}
1835ffbbdd21SLinus Walleij 
183660a883d1SMarek Szyprowski 	kthread_destroy_worker(ctlr->kworker);
1837ffbbdd21SLinus Walleij 
1838ffbbdd21SLinus Walleij 	return 0;
1839ffbbdd21SLinus Walleij }
1840ffbbdd21SLinus Walleij 
18410461a414SMark Brown static int __spi_queued_transfer(struct spi_device *spi,
18420461a414SMark Brown 				 struct spi_message *msg,
18430461a414SMark Brown 				 bool need_pump)
1844ffbbdd21SLinus Walleij {
18458caab75fSGeert Uytterhoeven 	struct spi_controller *ctlr = spi->controller;
1846ffbbdd21SLinus Walleij 	unsigned long flags;
1847ffbbdd21SLinus Walleij 
18488caab75fSGeert Uytterhoeven 	spin_lock_irqsave(&ctlr->queue_lock, flags);
1849ffbbdd21SLinus Walleij 
18508caab75fSGeert Uytterhoeven 	if (!ctlr->running) {
18518caab75fSGeert Uytterhoeven 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1852ffbbdd21SLinus Walleij 		return -ESHUTDOWN;
1853ffbbdd21SLinus Walleij 	}
1854ffbbdd21SLinus Walleij 	msg->actual_length = 0;
1855ffbbdd21SLinus Walleij 	msg->status = -EINPROGRESS;
1856ffbbdd21SLinus Walleij 
18578caab75fSGeert Uytterhoeven 	list_add_tail(&msg->queue, &ctlr->queue);
1858f0125f1aSMark Brown 	if (!ctlr->busy && need_pump)
185960a883d1SMarek Szyprowski 		kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1860ffbbdd21SLinus Walleij 
18618caab75fSGeert Uytterhoeven 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1862ffbbdd21SLinus Walleij 	return 0;
1863ffbbdd21SLinus Walleij }
1864ffbbdd21SLinus Walleij 
18650461a414SMark Brown /**
18660461a414SMark Brown  * spi_queued_transfer - transfer function for queued transfers
18670461a414SMark Brown  * @spi: spi device which is requesting transfer
18680461a414SMark Brown  * @msg: spi message which is to handled is queued to driver queue
186997d56dc6SJavier Martinez Canillas  *
187097d56dc6SJavier Martinez Canillas  * Return: zero on success, else a negative error code.
18710461a414SMark Brown  */
18720461a414SMark Brown static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
18730461a414SMark Brown {
18740461a414SMark Brown 	return __spi_queued_transfer(spi, msg, true);
18750461a414SMark Brown }
18760461a414SMark Brown 
18778caab75fSGeert Uytterhoeven static int spi_controller_initialize_queue(struct spi_controller *ctlr)
1878ffbbdd21SLinus Walleij {
1879ffbbdd21SLinus Walleij 	int ret;
1880ffbbdd21SLinus Walleij 
18818caab75fSGeert Uytterhoeven 	ctlr->transfer = spi_queued_transfer;
18828caab75fSGeert Uytterhoeven 	if (!ctlr->transfer_one_message)
18838caab75fSGeert Uytterhoeven 		ctlr->transfer_one_message = spi_transfer_one_message;
1884ffbbdd21SLinus Walleij 
1885ffbbdd21SLinus Walleij 	/* Initialize and start queue */
18868caab75fSGeert Uytterhoeven 	ret = spi_init_queue(ctlr);
1887ffbbdd21SLinus Walleij 	if (ret) {
18888caab75fSGeert Uytterhoeven 		dev_err(&ctlr->dev, "problem initializing queue\n");
1889ffbbdd21SLinus Walleij 		goto err_init_queue;
1890ffbbdd21SLinus Walleij 	}
18918caab75fSGeert Uytterhoeven 	ctlr->queued = true;
18928caab75fSGeert Uytterhoeven 	ret = spi_start_queue(ctlr);
1893ffbbdd21SLinus Walleij 	if (ret) {
18948caab75fSGeert Uytterhoeven 		dev_err(&ctlr->dev, "problem starting queue\n");
1895ffbbdd21SLinus Walleij 		goto err_start_queue;
1896ffbbdd21SLinus Walleij 	}
1897ffbbdd21SLinus Walleij 
1898ffbbdd21SLinus Walleij 	return 0;
1899ffbbdd21SLinus Walleij 
1900ffbbdd21SLinus Walleij err_start_queue:
19018caab75fSGeert Uytterhoeven 	spi_destroy_queue(ctlr);
1902c3676d5cSMark Brown err_init_queue:
1903ffbbdd21SLinus Walleij 	return ret;
1904ffbbdd21SLinus Walleij }
1905ffbbdd21SLinus Walleij 
1906988f259bSBoris Brezillon /**
1907988f259bSBoris Brezillon  * spi_flush_queue - Send all pending messages in the queue from the callers'
1908988f259bSBoris Brezillon  *		     context
1909988f259bSBoris Brezillon  * @ctlr: controller to process queue for
1910988f259bSBoris Brezillon  *
1911988f259bSBoris Brezillon  * This should be used when one wants to ensure all pending messages have been
1912988f259bSBoris Brezillon  * sent before doing something. Is used by the spi-mem code to make sure SPI
1913988f259bSBoris Brezillon  * memory operations do not preempt regular SPI transfers that have been queued
1914988f259bSBoris Brezillon  * before the spi-mem operation.
1915988f259bSBoris Brezillon  */
1916988f259bSBoris Brezillon void spi_flush_queue(struct spi_controller *ctlr)
1917988f259bSBoris Brezillon {
1918988f259bSBoris Brezillon 	if (ctlr->transfer == spi_queued_transfer)
1919988f259bSBoris Brezillon 		__spi_pump_messages(ctlr, false);
1920988f259bSBoris Brezillon }
1921988f259bSBoris Brezillon 
1922ffbbdd21SLinus Walleij /*-------------------------------------------------------------------------*/
1923ffbbdd21SLinus Walleij 
19247cb94361SAndreas Larsson #if defined(CONFIG_OF)
19258caab75fSGeert Uytterhoeven static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1926c2e51ac3SGeert Uytterhoeven 			   struct device_node *nc)
1927d57a4282SGrant Likely {
192889da4293STrent Piepho 	u32 value;
1929c2e51ac3SGeert Uytterhoeven 	int rc;
1930d57a4282SGrant Likely 
1931d57a4282SGrant Likely 	/* Mode (clock phase/polarity/etc.) */
1932e0bcb680SSergei Shtylyov 	if (of_property_read_bool(nc, "spi-cpha"))
1933d57a4282SGrant Likely 		spi->mode |= SPI_CPHA;
1934e0bcb680SSergei Shtylyov 	if (of_property_read_bool(nc, "spi-cpol"))
1935d57a4282SGrant Likely 		spi->mode |= SPI_CPOL;
1936e0bcb680SSergei Shtylyov 	if (of_property_read_bool(nc, "spi-3wire"))
1937c20151dfSLars-Peter Clausen 		spi->mode |= SPI_3WIRE;
1938e0bcb680SSergei Shtylyov 	if (of_property_read_bool(nc, "spi-lsb-first"))
1939cd6339e6SZhao Qiang 		spi->mode |= SPI_LSB_FIRST;
19403e5ec1dbSGregory CLEMENT 	if (of_property_read_bool(nc, "spi-cs-high"))
1941f3186dd8SLinus Walleij 		spi->mode |= SPI_CS_HIGH;
1942f3186dd8SLinus Walleij 
1943f477b7fbSwangyuhang 	/* Device DUAL/QUAD mode */
194489da4293STrent Piepho 	if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
194589da4293STrent Piepho 		switch (value) {
1946d962608cSDragos Bogdan 		case 0:
1947d962608cSDragos Bogdan 			spi->mode |= SPI_NO_TX;
1948d962608cSDragos Bogdan 			break;
194989da4293STrent Piepho 		case 1:
1950f477b7fbSwangyuhang 			break;
195189da4293STrent Piepho 		case 2:
1952f477b7fbSwangyuhang 			spi->mode |= SPI_TX_DUAL;
1953f477b7fbSwangyuhang 			break;
195489da4293STrent Piepho 		case 4:
1955f477b7fbSwangyuhang 			spi->mode |= SPI_TX_QUAD;
1956f477b7fbSwangyuhang 			break;
19576b03061fSYogesh Narayan Gaur 		case 8:
19586b03061fSYogesh Narayan Gaur 			spi->mode |= SPI_TX_OCTAL;
19596b03061fSYogesh Narayan Gaur 			break;
1960f477b7fbSwangyuhang 		default:
19618caab75fSGeert Uytterhoeven 			dev_warn(&ctlr->dev,
1962a110f93dSwangyuhang 				"spi-tx-bus-width %d not supported\n",
196389da4293STrent Piepho 				value);
196480874d8cSGeert Uytterhoeven 			break;
1965f477b7fbSwangyuhang 		}
1966a822e99cSMark Brown 	}
1967f477b7fbSwangyuhang 
196889da4293STrent Piepho 	if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
196989da4293STrent Piepho 		switch (value) {
1970d962608cSDragos Bogdan 		case 0:
1971d962608cSDragos Bogdan 			spi->mode |= SPI_NO_RX;
1972d962608cSDragos Bogdan 			break;
197389da4293STrent Piepho 		case 1:
1974f477b7fbSwangyuhang 			break;
197589da4293STrent Piepho 		case 2:
1976f477b7fbSwangyuhang 			spi->mode |= SPI_RX_DUAL;
1977f477b7fbSwangyuhang 			break;
197889da4293STrent Piepho 		case 4:
1979f477b7fbSwangyuhang 			spi->mode |= SPI_RX_QUAD;
1980f477b7fbSwangyuhang 			break;
19816b03061fSYogesh Narayan Gaur 		case 8:
19826b03061fSYogesh Narayan Gaur 			spi->mode |= SPI_RX_OCTAL;
19836b03061fSYogesh Narayan Gaur 			break;
1984f477b7fbSwangyuhang 		default:
19858caab75fSGeert Uytterhoeven 			dev_warn(&ctlr->dev,
1986a110f93dSwangyuhang 				"spi-rx-bus-width %d not supported\n",
198789da4293STrent Piepho 				value);
198880874d8cSGeert Uytterhoeven 			break;
1989f477b7fbSwangyuhang 		}
1990a822e99cSMark Brown 	}
1991f477b7fbSwangyuhang 
19928caab75fSGeert Uytterhoeven 	if (spi_controller_is_slave(ctlr)) {
1993194276b0SRob Herring 		if (!of_node_name_eq(nc, "slave")) {
199425c56c88SRob Herring 			dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
199525c56c88SRob Herring 				nc);
19966c364062SGeert Uytterhoeven 			return -EINVAL;
19976c364062SGeert Uytterhoeven 		}
19986c364062SGeert Uytterhoeven 		return 0;
19996c364062SGeert Uytterhoeven 	}
20006c364062SGeert Uytterhoeven 
20016c364062SGeert Uytterhoeven 	/* Device address */
20026c364062SGeert Uytterhoeven 	rc = of_property_read_u32(nc, "reg", &value);
20036c364062SGeert Uytterhoeven 	if (rc) {
200425c56c88SRob Herring 		dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
200525c56c88SRob Herring 			nc, rc);
20066c364062SGeert Uytterhoeven 		return rc;
20076c364062SGeert Uytterhoeven 	}
20086c364062SGeert Uytterhoeven 	spi->chip_select = value;
20096c364062SGeert Uytterhoeven 
2010d57a4282SGrant Likely 	/* Device speed */
2011671c3bf5SChuanhong Guo 	if (!of_property_read_u32(nc, "spi-max-frequency", &value))
201289da4293STrent Piepho 		spi->max_speed_hz = value;
2013d57a4282SGrant Likely 
2014c2e51ac3SGeert Uytterhoeven 	return 0;
2015c2e51ac3SGeert Uytterhoeven }
2016c2e51ac3SGeert Uytterhoeven 
2017c2e51ac3SGeert Uytterhoeven static struct spi_device *
20188caab75fSGeert Uytterhoeven of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2019c2e51ac3SGeert Uytterhoeven {
2020c2e51ac3SGeert Uytterhoeven 	struct spi_device *spi;
2021c2e51ac3SGeert Uytterhoeven 	int rc;
2022c2e51ac3SGeert Uytterhoeven 
2023c2e51ac3SGeert Uytterhoeven 	/* Alloc an spi_device */
20248caab75fSGeert Uytterhoeven 	spi = spi_alloc_device(ctlr);
2025c2e51ac3SGeert Uytterhoeven 	if (!spi) {
202625c56c88SRob Herring 		dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2027c2e51ac3SGeert Uytterhoeven 		rc = -ENOMEM;
2028c2e51ac3SGeert Uytterhoeven 		goto err_out;
2029c2e51ac3SGeert Uytterhoeven 	}
2030c2e51ac3SGeert Uytterhoeven 
2031c2e51ac3SGeert Uytterhoeven 	/* Select device driver */
2032c2e51ac3SGeert Uytterhoeven 	rc = of_modalias_node(nc, spi->modalias,
2033c2e51ac3SGeert Uytterhoeven 				sizeof(spi->modalias));
2034c2e51ac3SGeert Uytterhoeven 	if (rc < 0) {
203525c56c88SRob Herring 		dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2036c2e51ac3SGeert Uytterhoeven 		goto err_out;
2037c2e51ac3SGeert Uytterhoeven 	}
2038c2e51ac3SGeert Uytterhoeven 
20398caab75fSGeert Uytterhoeven 	rc = of_spi_parse_dt(ctlr, spi, nc);
2040c2e51ac3SGeert Uytterhoeven 	if (rc)
2041c2e51ac3SGeert Uytterhoeven 		goto err_out;
2042c2e51ac3SGeert Uytterhoeven 
2043d57a4282SGrant Likely 	/* Store a pointer to the node in the device structure */
2044d57a4282SGrant Likely 	of_node_get(nc);
2045d57a4282SGrant Likely 	spi->dev.of_node = nc;
2046d57a4282SGrant Likely 
2047d57a4282SGrant Likely 	/* Register the new device */
2048d57a4282SGrant Likely 	rc = spi_add_device(spi);
2049d57a4282SGrant Likely 	if (rc) {
205025c56c88SRob Herring 		dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
20518324147fSJohan Hovold 		goto err_of_node_put;
2052d57a4282SGrant Likely 	}
2053d57a4282SGrant Likely 
2054aff5e3f8SPantelis Antoniou 	return spi;
2055aff5e3f8SPantelis Antoniou 
20568324147fSJohan Hovold err_of_node_put:
20578324147fSJohan Hovold 	of_node_put(nc);
2058aff5e3f8SPantelis Antoniou err_out:
2059aff5e3f8SPantelis Antoniou 	spi_dev_put(spi);
2060aff5e3f8SPantelis Antoniou 	return ERR_PTR(rc);
2061aff5e3f8SPantelis Antoniou }
2062aff5e3f8SPantelis Antoniou 
2063aff5e3f8SPantelis Antoniou /**
2064aff5e3f8SPantelis Antoniou  * of_register_spi_devices() - Register child devices onto the SPI bus
20658caab75fSGeert Uytterhoeven  * @ctlr:	Pointer to spi_controller device
2066aff5e3f8SPantelis Antoniou  *
20676c364062SGeert Uytterhoeven  * Registers an spi_device for each child node of controller node which
20686c364062SGeert Uytterhoeven  * represents a valid SPI slave.
2069aff5e3f8SPantelis Antoniou  */
20708caab75fSGeert Uytterhoeven static void of_register_spi_devices(struct spi_controller *ctlr)
2071aff5e3f8SPantelis Antoniou {
2072aff5e3f8SPantelis Antoniou 	struct spi_device *spi;
2073aff5e3f8SPantelis Antoniou 	struct device_node *nc;
2074aff5e3f8SPantelis Antoniou 
20758caab75fSGeert Uytterhoeven 	if (!ctlr->dev.of_node)
2076aff5e3f8SPantelis Antoniou 		return;
2077aff5e3f8SPantelis Antoniou 
20788caab75fSGeert Uytterhoeven 	for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2079bd6c1644SGeert Uytterhoeven 		if (of_node_test_and_set_flag(nc, OF_POPULATED))
2080bd6c1644SGeert Uytterhoeven 			continue;
20818caab75fSGeert Uytterhoeven 		spi = of_register_spi_device(ctlr, nc);
2082e0af98a7SRalf Ramsauer 		if (IS_ERR(spi)) {
20838caab75fSGeert Uytterhoeven 			dev_warn(&ctlr->dev,
208425c56c88SRob Herring 				 "Failed to create SPI device for %pOF\n", nc);
2085e0af98a7SRalf Ramsauer 			of_node_clear_flag(nc, OF_POPULATED);
2086e0af98a7SRalf Ramsauer 		}
2087d57a4282SGrant Likely 	}
2088d57a4282SGrant Likely }
2089d57a4282SGrant Likely #else
20908caab75fSGeert Uytterhoeven static void of_register_spi_devices(struct spi_controller *ctlr) { }
2091d57a4282SGrant Likely #endif
2092d57a4282SGrant Likely 
209364bee4d2SMika Westerberg #ifdef CONFIG_ACPI
20944c3c5954SArd Biesheuvel struct acpi_spi_lookup {
20954c3c5954SArd Biesheuvel 	struct spi_controller 	*ctlr;
20964c3c5954SArd Biesheuvel 	u32			max_speed_hz;
20974c3c5954SArd Biesheuvel 	u32			mode;
20984c3c5954SArd Biesheuvel 	int			irq;
20994c3c5954SArd Biesheuvel 	u8			bits_per_word;
21004c3c5954SArd Biesheuvel 	u8			chip_select;
21014c3c5954SArd Biesheuvel };
21024c3c5954SArd Biesheuvel 
21034c3c5954SArd Biesheuvel static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
21044c3c5954SArd Biesheuvel 					    struct acpi_spi_lookup *lookup)
21058a2e487eSLukas Wunner {
21068a2e487eSLukas Wunner 	const union acpi_object *obj;
21078a2e487eSLukas Wunner 
21088a2e487eSLukas Wunner 	if (!x86_apple_machine)
21098a2e487eSLukas Wunner 		return;
21108a2e487eSLukas Wunner 
21118a2e487eSLukas Wunner 	if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
21128a2e487eSLukas Wunner 	    && obj->buffer.length >= 4)
21134c3c5954SArd Biesheuvel 		lookup->max_speed_hz  = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
21148a2e487eSLukas Wunner 
21158a2e487eSLukas Wunner 	if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
21168a2e487eSLukas Wunner 	    && obj->buffer.length == 8)
21174c3c5954SArd Biesheuvel 		lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
21188a2e487eSLukas Wunner 
21198a2e487eSLukas Wunner 	if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
21208a2e487eSLukas Wunner 	    && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
21214c3c5954SArd Biesheuvel 		lookup->mode |= SPI_LSB_FIRST;
21228a2e487eSLukas Wunner 
21238a2e487eSLukas Wunner 	if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
21248a2e487eSLukas Wunner 	    && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
21254c3c5954SArd Biesheuvel 		lookup->mode |= SPI_CPOL;
21268a2e487eSLukas Wunner 
21278a2e487eSLukas Wunner 	if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
21288a2e487eSLukas Wunner 	    && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
21294c3c5954SArd Biesheuvel 		lookup->mode |= SPI_CPHA;
21308a2e487eSLukas Wunner }
21318a2e487eSLukas Wunner 
213264bee4d2SMika Westerberg static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
213364bee4d2SMika Westerberg {
21344c3c5954SArd Biesheuvel 	struct acpi_spi_lookup *lookup = data;
21354c3c5954SArd Biesheuvel 	struct spi_controller *ctlr = lookup->ctlr;
213664bee4d2SMika Westerberg 
213764bee4d2SMika Westerberg 	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
213864bee4d2SMika Westerberg 		struct acpi_resource_spi_serialbus *sb;
21394c3c5954SArd Biesheuvel 		acpi_handle parent_handle;
21404c3c5954SArd Biesheuvel 		acpi_status status;
214164bee4d2SMika Westerberg 
214264bee4d2SMika Westerberg 		sb = &ares->data.spi_serial_bus;
214364bee4d2SMika Westerberg 		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
21444c3c5954SArd Biesheuvel 
21454c3c5954SArd Biesheuvel 			status = acpi_get_handle(NULL,
21464c3c5954SArd Biesheuvel 						 sb->resource_source.string_ptr,
21474c3c5954SArd Biesheuvel 						 &parent_handle);
21484c3c5954SArd Biesheuvel 
2149b5e3cf41SArd Biesheuvel 			if (ACPI_FAILURE(status) ||
21504c3c5954SArd Biesheuvel 			    ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
21514c3c5954SArd Biesheuvel 				return -ENODEV;
21524c3c5954SArd Biesheuvel 
2153a0a90718SMika Westerberg 			/*
2154a0a90718SMika Westerberg 			 * ACPI DeviceSelection numbering is handled by the
2155a0a90718SMika Westerberg 			 * host controller driver in Windows and can vary
2156a0a90718SMika Westerberg 			 * from driver to driver. In Linux we always expect
2157a0a90718SMika Westerberg 			 * 0 .. max - 1 so we need to ask the driver to
2158a0a90718SMika Westerberg 			 * translate between the two schemes.
2159a0a90718SMika Westerberg 			 */
21608caab75fSGeert Uytterhoeven 			if (ctlr->fw_translate_cs) {
21618caab75fSGeert Uytterhoeven 				int cs = ctlr->fw_translate_cs(ctlr,
2162a0a90718SMika Westerberg 						sb->device_selection);
2163a0a90718SMika Westerberg 				if (cs < 0)
2164a0a90718SMika Westerberg 					return cs;
21654c3c5954SArd Biesheuvel 				lookup->chip_select = cs;
2166a0a90718SMika Westerberg 			} else {
21674c3c5954SArd Biesheuvel 				lookup->chip_select = sb->device_selection;
2168a0a90718SMika Westerberg 			}
2169a0a90718SMika Westerberg 
21704c3c5954SArd Biesheuvel 			lookup->max_speed_hz = sb->connection_speed;
21710dadde34SAndy Shevchenko 			lookup->bits_per_word = sb->data_bit_length;
217264bee4d2SMika Westerberg 
217364bee4d2SMika Westerberg 			if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
21744c3c5954SArd Biesheuvel 				lookup->mode |= SPI_CPHA;
217564bee4d2SMika Westerberg 			if (sb->clock_polarity == ACPI_SPI_START_HIGH)
21764c3c5954SArd Biesheuvel 				lookup->mode |= SPI_CPOL;
217764bee4d2SMika Westerberg 			if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
21784c3c5954SArd Biesheuvel 				lookup->mode |= SPI_CS_HIGH;
217964bee4d2SMika Westerberg 		}
21804c3c5954SArd Biesheuvel 	} else if (lookup->irq < 0) {
218164bee4d2SMika Westerberg 		struct resource r;
218264bee4d2SMika Westerberg 
218364bee4d2SMika Westerberg 		if (acpi_dev_resource_interrupt(ares, 0, &r))
21844c3c5954SArd Biesheuvel 			lookup->irq = r.start;
218564bee4d2SMika Westerberg 	}
218664bee4d2SMika Westerberg 
218764bee4d2SMika Westerberg 	/* Always tell the ACPI core to skip this resource */
218864bee4d2SMika Westerberg 	return 1;
218964bee4d2SMika Westerberg }
219064bee4d2SMika Westerberg 
21918caab75fSGeert Uytterhoeven static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
21927f24467fSOctavian Purdila 					    struct acpi_device *adev)
219364bee4d2SMika Westerberg {
21944c3c5954SArd Biesheuvel 	acpi_handle parent_handle = NULL;
219564bee4d2SMika Westerberg 	struct list_head resource_list;
2196b28944c6SArd Biesheuvel 	struct acpi_spi_lookup lookup = {};
219764bee4d2SMika Westerberg 	struct spi_device *spi;
219864bee4d2SMika Westerberg 	int ret;
219964bee4d2SMika Westerberg 
22007f24467fSOctavian Purdila 	if (acpi_bus_get_status(adev) || !adev->status.present ||
22017f24467fSOctavian Purdila 	    acpi_device_enumerated(adev))
220264bee4d2SMika Westerberg 		return AE_OK;
220364bee4d2SMika Westerberg 
22044c3c5954SArd Biesheuvel 	lookup.ctlr		= ctlr;
22054c3c5954SArd Biesheuvel 	lookup.irq		= -1;
22064c3c5954SArd Biesheuvel 
22074c3c5954SArd Biesheuvel 	INIT_LIST_HEAD(&resource_list);
22084c3c5954SArd Biesheuvel 	ret = acpi_dev_get_resources(adev, &resource_list,
22094c3c5954SArd Biesheuvel 				     acpi_spi_add_resource, &lookup);
22104c3c5954SArd Biesheuvel 	acpi_dev_free_resource_list(&resource_list);
22114c3c5954SArd Biesheuvel 
22124c3c5954SArd Biesheuvel 	if (ret < 0)
22134c3c5954SArd Biesheuvel 		/* found SPI in _CRS but it points to another controller */
22144c3c5954SArd Biesheuvel 		return AE_OK;
22154c3c5954SArd Biesheuvel 
22164c3c5954SArd Biesheuvel 	if (!lookup.max_speed_hz &&
221710e92724SBjorn Helgaas 	    ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
22184c3c5954SArd Biesheuvel 	    ACPI_HANDLE(ctlr->dev.parent) == parent_handle) {
22194c3c5954SArd Biesheuvel 		/* Apple does not use _CRS but nested devices for SPI slaves */
22204c3c5954SArd Biesheuvel 		acpi_spi_parse_apple_properties(adev, &lookup);
22214c3c5954SArd Biesheuvel 	}
22224c3c5954SArd Biesheuvel 
22234c3c5954SArd Biesheuvel 	if (!lookup.max_speed_hz)
22244c3c5954SArd Biesheuvel 		return AE_OK;
22254c3c5954SArd Biesheuvel 
22268caab75fSGeert Uytterhoeven 	spi = spi_alloc_device(ctlr);
222764bee4d2SMika Westerberg 	if (!spi) {
22288caab75fSGeert Uytterhoeven 		dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
222964bee4d2SMika Westerberg 			dev_name(&adev->dev));
223064bee4d2SMika Westerberg 		return AE_NO_MEMORY;
223164bee4d2SMika Westerberg 	}
223264bee4d2SMika Westerberg 
2233ea235786SJohn Garry 
22347b199811SRafael J. Wysocki 	ACPI_COMPANION_SET(&spi->dev, adev);
22354c3c5954SArd Biesheuvel 	spi->max_speed_hz	= lookup.max_speed_hz;
2236ea235786SJohn Garry 	spi->mode		|= lookup.mode;
22374c3c5954SArd Biesheuvel 	spi->irq		= lookup.irq;
22384c3c5954SArd Biesheuvel 	spi->bits_per_word	= lookup.bits_per_word;
22394c3c5954SArd Biesheuvel 	spi->chip_select	= lookup.chip_select;
224064bee4d2SMika Westerberg 
22410c6543f6SDan O'Donovan 	acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
22420c6543f6SDan O'Donovan 			  sizeof(spi->modalias));
22430c6543f6SDan O'Donovan 
224433ada67dSChristophe RICARD 	if (spi->irq < 0)
224533ada67dSChristophe RICARD 		spi->irq = acpi_dev_gpio_irq_get(adev, 0);
224633ada67dSChristophe RICARD 
22477f24467fSOctavian Purdila 	acpi_device_set_enumerated(adev);
22487f24467fSOctavian Purdila 
224933cf00e5SMika Westerberg 	adev->power.flags.ignore_parent = true;
225064bee4d2SMika Westerberg 	if (spi_add_device(spi)) {
225133cf00e5SMika Westerberg 		adev->power.flags.ignore_parent = false;
22528caab75fSGeert Uytterhoeven 		dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
225364bee4d2SMika Westerberg 			dev_name(&adev->dev));
225464bee4d2SMika Westerberg 		spi_dev_put(spi);
225564bee4d2SMika Westerberg 	}
225664bee4d2SMika Westerberg 
225764bee4d2SMika Westerberg 	return AE_OK;
225864bee4d2SMika Westerberg }
225964bee4d2SMika Westerberg 
22607f24467fSOctavian Purdila static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
22617f24467fSOctavian Purdila 				       void *data, void **return_value)
22627f24467fSOctavian Purdila {
22638caab75fSGeert Uytterhoeven 	struct spi_controller *ctlr = data;
22647f24467fSOctavian Purdila 	struct acpi_device *adev;
22657f24467fSOctavian Purdila 
22667f24467fSOctavian Purdila 	if (acpi_bus_get_device(handle, &adev))
22677f24467fSOctavian Purdila 		return AE_OK;
22687f24467fSOctavian Purdila 
22698caab75fSGeert Uytterhoeven 	return acpi_register_spi_device(ctlr, adev);
22707f24467fSOctavian Purdila }
22717f24467fSOctavian Purdila 
22724c3c5954SArd Biesheuvel #define SPI_ACPI_ENUMERATE_MAX_DEPTH		32
22734c3c5954SArd Biesheuvel 
22748caab75fSGeert Uytterhoeven static void acpi_register_spi_devices(struct spi_controller *ctlr)
227564bee4d2SMika Westerberg {
227664bee4d2SMika Westerberg 	acpi_status status;
227764bee4d2SMika Westerberg 	acpi_handle handle;
227864bee4d2SMika Westerberg 
22798caab75fSGeert Uytterhoeven 	handle = ACPI_HANDLE(ctlr->dev.parent);
228064bee4d2SMika Westerberg 	if (!handle)
228164bee4d2SMika Westerberg 		return;
228264bee4d2SMika Westerberg 
22834c3c5954SArd Biesheuvel 	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
22844c3c5954SArd Biesheuvel 				     SPI_ACPI_ENUMERATE_MAX_DEPTH,
22858caab75fSGeert Uytterhoeven 				     acpi_spi_add_device, NULL, ctlr, NULL);
228664bee4d2SMika Westerberg 	if (ACPI_FAILURE(status))
22878caab75fSGeert Uytterhoeven 		dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
228864bee4d2SMika Westerberg }
228964bee4d2SMika Westerberg #else
22908caab75fSGeert Uytterhoeven static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
229164bee4d2SMika Westerberg #endif /* CONFIG_ACPI */
229264bee4d2SMika Westerberg 
22938caab75fSGeert Uytterhoeven static void spi_controller_release(struct device *dev)
22948ae12a0dSDavid Brownell {
22958caab75fSGeert Uytterhoeven 	struct spi_controller *ctlr;
22968ae12a0dSDavid Brownell 
22978caab75fSGeert Uytterhoeven 	ctlr = container_of(dev, struct spi_controller, dev);
22988caab75fSGeert Uytterhoeven 	kfree(ctlr);
22998ae12a0dSDavid Brownell }
23008ae12a0dSDavid Brownell 
23018ae12a0dSDavid Brownell static struct class spi_master_class = {
23028ae12a0dSDavid Brownell 	.name		= "spi_master",
23038ae12a0dSDavid Brownell 	.owner		= THIS_MODULE,
23048caab75fSGeert Uytterhoeven 	.dev_release	= spi_controller_release,
2305eca2ebc7SMartin Sperl 	.dev_groups	= spi_master_groups,
23068ae12a0dSDavid Brownell };
23078ae12a0dSDavid Brownell 
23086c364062SGeert Uytterhoeven #ifdef CONFIG_SPI_SLAVE
23096c364062SGeert Uytterhoeven /**
23106c364062SGeert Uytterhoeven  * spi_slave_abort - abort the ongoing transfer request on an SPI slave
23116c364062SGeert Uytterhoeven  *		     controller
23126c364062SGeert Uytterhoeven  * @spi: device used for the current transfer
23136c364062SGeert Uytterhoeven  */
23146c364062SGeert Uytterhoeven int spi_slave_abort(struct spi_device *spi)
23156c364062SGeert Uytterhoeven {
23168caab75fSGeert Uytterhoeven 	struct spi_controller *ctlr = spi->controller;
23176c364062SGeert Uytterhoeven 
23188caab75fSGeert Uytterhoeven 	if (spi_controller_is_slave(ctlr) && ctlr->slave_abort)
23198caab75fSGeert Uytterhoeven 		return ctlr->slave_abort(ctlr);
23206c364062SGeert Uytterhoeven 
23216c364062SGeert Uytterhoeven 	return -ENOTSUPP;
23226c364062SGeert Uytterhoeven }
23236c364062SGeert Uytterhoeven EXPORT_SYMBOL_GPL(spi_slave_abort);
23246c364062SGeert Uytterhoeven 
23256c364062SGeert Uytterhoeven static int match_true(struct device *dev, void *data)
23266c364062SGeert Uytterhoeven {
23276c364062SGeert Uytterhoeven 	return 1;
23286c364062SGeert Uytterhoeven }
23296c364062SGeert Uytterhoeven 
2330cc8b4659SGeert Uytterhoeven static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2331cc8b4659SGeert Uytterhoeven 			  char *buf)
23326c364062SGeert Uytterhoeven {
23338caab75fSGeert Uytterhoeven 	struct spi_controller *ctlr = container_of(dev, struct spi_controller,
23348caab75fSGeert Uytterhoeven 						   dev);
23356c364062SGeert Uytterhoeven 	struct device *child;
23366c364062SGeert Uytterhoeven 
23376c364062SGeert Uytterhoeven 	child = device_find_child(&ctlr->dev, NULL, match_true);
23386c364062SGeert Uytterhoeven 	return sprintf(buf, "%s\n",
23396c364062SGeert Uytterhoeven 		       child ? to_spi_device(child)->modalias : NULL);
23406c364062SGeert Uytterhoeven }
23416c364062SGeert Uytterhoeven 
2342cc8b4659SGeert Uytterhoeven static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2343cc8b4659SGeert Uytterhoeven 			   const char *buf, size_t count)
23446c364062SGeert Uytterhoeven {
23458caab75fSGeert Uytterhoeven 	struct spi_controller *ctlr = container_of(dev, struct spi_controller,
23468caab75fSGeert Uytterhoeven 						   dev);
23476c364062SGeert Uytterhoeven 	struct spi_device *spi;
23486c364062SGeert Uytterhoeven 	struct device *child;
23496c364062SGeert Uytterhoeven 	char name[32];
23506c364062SGeert Uytterhoeven 	int rc;
23516c364062SGeert Uytterhoeven 
23526c364062SGeert Uytterhoeven 	rc = sscanf(buf, "%31s", name);
23536c364062SGeert Uytterhoeven 	if (rc != 1 || !name[0])
23546c364062SGeert Uytterhoeven 		return -EINVAL;
23556c364062SGeert Uytterhoeven 
23566c364062SGeert Uytterhoeven 	child = device_find_child(&ctlr->dev, NULL, match_true);
23576c364062SGeert Uytterhoeven 	if (child) {
23586c364062SGeert Uytterhoeven 		/* Remove registered slave */
23596c364062SGeert Uytterhoeven 		device_unregister(child);
23606c364062SGeert Uytterhoeven 		put_device(child);
23616c364062SGeert Uytterhoeven 	}
23626c364062SGeert Uytterhoeven 
23636c364062SGeert Uytterhoeven 	if (strcmp(name, "(null)")) {
23646c364062SGeert Uytterhoeven 		/* Register new slave */
23656c364062SGeert Uytterhoeven 		spi = spi_alloc_device(ctlr);
23666c364062SGeert Uytterhoeven 		if (!spi)
23676c364062SGeert Uytterhoeven 			return -ENOMEM;
23686c364062SGeert Uytterhoeven 
23696c364062SGeert Uytterhoeven 		strlcpy(spi->modalias, name, sizeof(spi->modalias));
23706c364062SGeert Uytterhoeven 
23716c364062SGeert Uytterhoeven 		rc = spi_add_device(spi);
23726c364062SGeert Uytterhoeven 		if (rc) {
23736c364062SGeert Uytterhoeven 			spi_dev_put(spi);
23746c364062SGeert Uytterhoeven 			return rc;
23756c364062SGeert Uytterhoeven 		}
23766c364062SGeert Uytterhoeven 	}
23776c364062SGeert Uytterhoeven 
23786c364062SGeert Uytterhoeven 	return count;
23796c364062SGeert Uytterhoeven }
23806c364062SGeert Uytterhoeven 
2381cc8b4659SGeert Uytterhoeven static DEVICE_ATTR_RW(slave);
23826c364062SGeert Uytterhoeven 
23836c364062SGeert Uytterhoeven static struct attribute *spi_slave_attrs[] = {
23846c364062SGeert Uytterhoeven 	&dev_attr_slave.attr,
23856c364062SGeert Uytterhoeven 	NULL,
23866c364062SGeert Uytterhoeven };
23876c364062SGeert Uytterhoeven 
23886c364062SGeert Uytterhoeven static const struct attribute_group spi_slave_group = {
23896c364062SGeert Uytterhoeven 	.attrs = spi_slave_attrs,
23906c364062SGeert Uytterhoeven };
23916c364062SGeert Uytterhoeven 
23926c364062SGeert Uytterhoeven static const struct attribute_group *spi_slave_groups[] = {
23938caab75fSGeert Uytterhoeven 	&spi_controller_statistics_group,
23946c364062SGeert Uytterhoeven 	&spi_slave_group,
23956c364062SGeert Uytterhoeven 	NULL,
23966c364062SGeert Uytterhoeven };
23976c364062SGeert Uytterhoeven 
23986c364062SGeert Uytterhoeven static struct class spi_slave_class = {
23996c364062SGeert Uytterhoeven 	.name		= "spi_slave",
24006c364062SGeert Uytterhoeven 	.owner		= THIS_MODULE,
24018caab75fSGeert Uytterhoeven 	.dev_release	= spi_controller_release,
24026c364062SGeert Uytterhoeven 	.dev_groups	= spi_slave_groups,
24036c364062SGeert Uytterhoeven };
24046c364062SGeert Uytterhoeven #else
24056c364062SGeert Uytterhoeven extern struct class spi_slave_class;	/* dummy */
24066c364062SGeert Uytterhoeven #endif
24078ae12a0dSDavid Brownell 
24088ae12a0dSDavid Brownell /**
24096c364062SGeert Uytterhoeven  * __spi_alloc_controller - allocate an SPI master or slave controller
24108ae12a0dSDavid Brownell  * @dev: the controller, possibly using the platform_bus
241133e34dc6SDavid Brownell  * @size: how much zeroed driver-private data to allocate; the pointer to this
2412229e6af1SLukas Wunner  *	memory is in the driver_data field of the returned device, accessible
2413229e6af1SLukas Wunner  *	with spi_controller_get_devdata(); the memory is cacheline aligned;
2414229e6af1SLukas Wunner  *	drivers granting DMA access to portions of their private data need to
2415229e6af1SLukas Wunner  *	round up @size using ALIGN(size, dma_get_cache_alignment()).
24166c364062SGeert Uytterhoeven  * @slave: flag indicating whether to allocate an SPI master (false) or SPI
24176c364062SGeert Uytterhoeven  *	slave (true) controller
241833e34dc6SDavid Brownell  * Context: can sleep
24198ae12a0dSDavid Brownell  *
24206c364062SGeert Uytterhoeven  * This call is used only by SPI controller drivers, which are the
24218ae12a0dSDavid Brownell  * only ones directly touching chip registers.  It's how they allocate
24228caab75fSGeert Uytterhoeven  * an spi_controller structure, prior to calling spi_register_controller().
24238ae12a0dSDavid Brownell  *
242497d56dc6SJavier Martinez Canillas  * This must be called from context that can sleep.
24258ae12a0dSDavid Brownell  *
24266c364062SGeert Uytterhoeven  * The caller is responsible for assigning the bus number and initializing the
24278caab75fSGeert Uytterhoeven  * controller's methods before calling spi_register_controller(); and (after
24288caab75fSGeert Uytterhoeven  * errors adding the device) calling spi_controller_put() to prevent a memory
24298caab75fSGeert Uytterhoeven  * leak.
243097d56dc6SJavier Martinez Canillas  *
24316c364062SGeert Uytterhoeven  * Return: the SPI controller structure on success, else NULL.
24328ae12a0dSDavid Brownell  */
24338caab75fSGeert Uytterhoeven struct spi_controller *__spi_alloc_controller(struct device *dev,
24346c364062SGeert Uytterhoeven 					      unsigned int size, bool slave)
24358ae12a0dSDavid Brownell {
24368caab75fSGeert Uytterhoeven 	struct spi_controller	*ctlr;
2437229e6af1SLukas Wunner 	size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
24388ae12a0dSDavid Brownell 
24390c868461SDavid Brownell 	if (!dev)
24400c868461SDavid Brownell 		return NULL;
24410c868461SDavid Brownell 
2442229e6af1SLukas Wunner 	ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
24438caab75fSGeert Uytterhoeven 	if (!ctlr)
24448ae12a0dSDavid Brownell 		return NULL;
24458ae12a0dSDavid Brownell 
24468caab75fSGeert Uytterhoeven 	device_initialize(&ctlr->dev);
24478caab75fSGeert Uytterhoeven 	ctlr->bus_num = -1;
24488caab75fSGeert Uytterhoeven 	ctlr->num_chipselect = 1;
24498caab75fSGeert Uytterhoeven 	ctlr->slave = slave;
24506c364062SGeert Uytterhoeven 	if (IS_ENABLED(CONFIG_SPI_SLAVE) && slave)
24518caab75fSGeert Uytterhoeven 		ctlr->dev.class = &spi_slave_class;
24526c364062SGeert Uytterhoeven 	else
24538caab75fSGeert Uytterhoeven 		ctlr->dev.class = &spi_master_class;
24548caab75fSGeert Uytterhoeven 	ctlr->dev.parent = dev;
24558caab75fSGeert Uytterhoeven 	pm_suspend_ignore_children(&ctlr->dev, true);
2456229e6af1SLukas Wunner 	spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
24578ae12a0dSDavid Brownell 
24588caab75fSGeert Uytterhoeven 	return ctlr;
24598ae12a0dSDavid Brownell }
24606c364062SGeert Uytterhoeven EXPORT_SYMBOL_GPL(__spi_alloc_controller);
24618ae12a0dSDavid Brownell 
24625e844cc3SLukas Wunner static void devm_spi_release_controller(struct device *dev, void *ctlr)
24635e844cc3SLukas Wunner {
24645e844cc3SLukas Wunner 	spi_controller_put(*(struct spi_controller **)ctlr);
24655e844cc3SLukas Wunner }
24665e844cc3SLukas Wunner 
24675e844cc3SLukas Wunner /**
24685e844cc3SLukas Wunner  * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
24695e844cc3SLukas Wunner  * @dev: physical device of SPI controller
24705e844cc3SLukas Wunner  * @size: how much zeroed driver-private data to allocate
24715e844cc3SLukas Wunner  * @slave: whether to allocate an SPI master (false) or SPI slave (true)
24725e844cc3SLukas Wunner  * Context: can sleep
24735e844cc3SLukas Wunner  *
24745e844cc3SLukas Wunner  * Allocate an SPI controller and automatically release a reference on it
24755e844cc3SLukas Wunner  * when @dev is unbound from its driver.  Drivers are thus relieved from
24765e844cc3SLukas Wunner  * having to call spi_controller_put().
24775e844cc3SLukas Wunner  *
24785e844cc3SLukas Wunner  * The arguments to this function are identical to __spi_alloc_controller().
24795e844cc3SLukas Wunner  *
24805e844cc3SLukas Wunner  * Return: the SPI controller structure on success, else NULL.
24815e844cc3SLukas Wunner  */
24825e844cc3SLukas Wunner struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
24835e844cc3SLukas Wunner 						   unsigned int size,
24845e844cc3SLukas Wunner 						   bool slave)
24855e844cc3SLukas Wunner {
24865e844cc3SLukas Wunner 	struct spi_controller **ptr, *ctlr;
24875e844cc3SLukas Wunner 
24885e844cc3SLukas Wunner 	ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
24895e844cc3SLukas Wunner 			   GFP_KERNEL);
24905e844cc3SLukas Wunner 	if (!ptr)
24915e844cc3SLukas Wunner 		return NULL;
24925e844cc3SLukas Wunner 
24935e844cc3SLukas Wunner 	ctlr = __spi_alloc_controller(dev, size, slave);
24945e844cc3SLukas Wunner 	if (ctlr) {
2495794aaf01SWilliam A. Kennington III 		ctlr->devm_allocated = true;
24965e844cc3SLukas Wunner 		*ptr = ctlr;
24975e844cc3SLukas Wunner 		devres_add(dev, ptr);
24985e844cc3SLukas Wunner 	} else {
24995e844cc3SLukas Wunner 		devres_free(ptr);
25005e844cc3SLukas Wunner 	}
25015e844cc3SLukas Wunner 
25025e844cc3SLukas Wunner 	return ctlr;
25035e844cc3SLukas Wunner }
25045e844cc3SLukas Wunner EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
25055e844cc3SLukas Wunner 
250674317984SJean-Christophe PLAGNIOL-VILLARD #ifdef CONFIG_OF
250743004f31SLinus Walleij static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
250874317984SJean-Christophe PLAGNIOL-VILLARD {
2509e80beb27SGrant Likely 	int nb, i, *cs;
25108caab75fSGeert Uytterhoeven 	struct device_node *np = ctlr->dev.of_node;
251174317984SJean-Christophe PLAGNIOL-VILLARD 
251274317984SJean-Christophe PLAGNIOL-VILLARD 	if (!np)
251374317984SJean-Christophe PLAGNIOL-VILLARD 		return 0;
251474317984SJean-Christophe PLAGNIOL-VILLARD 
251574317984SJean-Christophe PLAGNIOL-VILLARD 	nb = of_gpio_named_count(np, "cs-gpios");
25168caab75fSGeert Uytterhoeven 	ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
251774317984SJean-Christophe PLAGNIOL-VILLARD 
25188ec5d84eSAndreas Larsson 	/* Return error only for an incorrectly formed cs-gpios property */
25198ec5d84eSAndreas Larsson 	if (nb == 0 || nb == -ENOENT)
252074317984SJean-Christophe PLAGNIOL-VILLARD 		return 0;
25218ec5d84eSAndreas Larsson 	else if (nb < 0)
25228ec5d84eSAndreas Larsson 		return nb;
252374317984SJean-Christophe PLAGNIOL-VILLARD 
2524a86854d0SKees Cook 	cs = devm_kcalloc(&ctlr->dev, ctlr->num_chipselect, sizeof(int),
252574317984SJean-Christophe PLAGNIOL-VILLARD 			  GFP_KERNEL);
25268caab75fSGeert Uytterhoeven 	ctlr->cs_gpios = cs;
252774317984SJean-Christophe PLAGNIOL-VILLARD 
25288caab75fSGeert Uytterhoeven 	if (!ctlr->cs_gpios)
252974317984SJean-Christophe PLAGNIOL-VILLARD 		return -ENOMEM;
253074317984SJean-Christophe PLAGNIOL-VILLARD 
25318caab75fSGeert Uytterhoeven 	for (i = 0; i < ctlr->num_chipselect; i++)
2532446411e1SAndreas Larsson 		cs[i] = -ENOENT;
253374317984SJean-Christophe PLAGNIOL-VILLARD 
253474317984SJean-Christophe PLAGNIOL-VILLARD 	for (i = 0; i < nb; i++)
253574317984SJean-Christophe PLAGNIOL-VILLARD 		cs[i] = of_get_named_gpio(np, "cs-gpios", i);
253674317984SJean-Christophe PLAGNIOL-VILLARD 
253774317984SJean-Christophe PLAGNIOL-VILLARD 	return 0;
253874317984SJean-Christophe PLAGNIOL-VILLARD }
253974317984SJean-Christophe PLAGNIOL-VILLARD #else
254043004f31SLinus Walleij static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
254174317984SJean-Christophe PLAGNIOL-VILLARD {
254274317984SJean-Christophe PLAGNIOL-VILLARD 	return 0;
254374317984SJean-Christophe PLAGNIOL-VILLARD }
254474317984SJean-Christophe PLAGNIOL-VILLARD #endif
254574317984SJean-Christophe PLAGNIOL-VILLARD 
2546f3186dd8SLinus Walleij /**
2547f3186dd8SLinus Walleij  * spi_get_gpio_descs() - grab chip select GPIOs for the master
2548f3186dd8SLinus Walleij  * @ctlr: The SPI master to grab GPIO descriptors for
2549f3186dd8SLinus Walleij  */
2550f3186dd8SLinus Walleij static int spi_get_gpio_descs(struct spi_controller *ctlr)
2551f3186dd8SLinus Walleij {
2552f3186dd8SLinus Walleij 	int nb, i;
2553f3186dd8SLinus Walleij 	struct gpio_desc **cs;
2554f3186dd8SLinus Walleij 	struct device *dev = &ctlr->dev;
25557d93aecdSGeert Uytterhoeven 	unsigned long native_cs_mask = 0;
25567d93aecdSGeert Uytterhoeven 	unsigned int num_cs_gpios = 0;
2557f3186dd8SLinus Walleij 
2558f3186dd8SLinus Walleij 	nb = gpiod_count(dev, "cs");
255931ed8ebcSAndy Shevchenko 	if (nb < 0) {
2560f3186dd8SLinus Walleij 		/* No GPIOs at all is fine, else return the error */
256131ed8ebcSAndy Shevchenko 		if (nb == -ENOENT)
2562f3186dd8SLinus Walleij 			return 0;
2563f3186dd8SLinus Walleij 		return nb;
256431ed8ebcSAndy Shevchenko 	}
256531ed8ebcSAndy Shevchenko 
256631ed8ebcSAndy Shevchenko 	ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
2567f3186dd8SLinus Walleij 
2568f3186dd8SLinus Walleij 	cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
2569f3186dd8SLinus Walleij 			  GFP_KERNEL);
2570f3186dd8SLinus Walleij 	if (!cs)
2571f3186dd8SLinus Walleij 		return -ENOMEM;
2572f3186dd8SLinus Walleij 	ctlr->cs_gpiods = cs;
2573f3186dd8SLinus Walleij 
2574f3186dd8SLinus Walleij 	for (i = 0; i < nb; i++) {
2575f3186dd8SLinus Walleij 		/*
2576f3186dd8SLinus Walleij 		 * Most chipselects are active low, the inverted
2577f3186dd8SLinus Walleij 		 * semantics are handled by special quirks in gpiolib,
2578f3186dd8SLinus Walleij 		 * so initializing them GPIOD_OUT_LOW here means
2579f3186dd8SLinus Walleij 		 * "unasserted", in most cases this will drive the physical
2580f3186dd8SLinus Walleij 		 * line high.
2581f3186dd8SLinus Walleij 		 */
2582f3186dd8SLinus Walleij 		cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
2583f3186dd8SLinus Walleij 						      GPIOD_OUT_LOW);
25841723fdecSGeert Uytterhoeven 		if (IS_ERR(cs[i]))
25851723fdecSGeert Uytterhoeven 			return PTR_ERR(cs[i]);
2586f3186dd8SLinus Walleij 
2587f3186dd8SLinus Walleij 		if (cs[i]) {
2588f3186dd8SLinus Walleij 			/*
2589f3186dd8SLinus Walleij 			 * If we find a CS GPIO, name it after the device and
2590f3186dd8SLinus Walleij 			 * chip select line.
2591f3186dd8SLinus Walleij 			 */
2592f3186dd8SLinus Walleij 			char *gpioname;
2593f3186dd8SLinus Walleij 
2594f3186dd8SLinus Walleij 			gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
2595f3186dd8SLinus Walleij 						  dev_name(dev), i);
2596f3186dd8SLinus Walleij 			if (!gpioname)
2597f3186dd8SLinus Walleij 				return -ENOMEM;
2598f3186dd8SLinus Walleij 			gpiod_set_consumer_name(cs[i], gpioname);
25997d93aecdSGeert Uytterhoeven 			num_cs_gpios++;
26007d93aecdSGeert Uytterhoeven 			continue;
2601f3186dd8SLinus Walleij 		}
26027d93aecdSGeert Uytterhoeven 
26037d93aecdSGeert Uytterhoeven 		if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
26047d93aecdSGeert Uytterhoeven 			dev_err(dev, "Invalid native chip select %d\n", i);
26057d93aecdSGeert Uytterhoeven 			return -EINVAL;
26067d93aecdSGeert Uytterhoeven 		}
26077d93aecdSGeert Uytterhoeven 		native_cs_mask |= BIT(i);
26087d93aecdSGeert Uytterhoeven 	}
26097d93aecdSGeert Uytterhoeven 
26107d93aecdSGeert Uytterhoeven 	ctlr->unused_native_cs = ffz(native_cs_mask);
26117d93aecdSGeert Uytterhoeven 	if (num_cs_gpios && ctlr->max_native_cs &&
26127d93aecdSGeert Uytterhoeven 	    ctlr->unused_native_cs >= ctlr->max_native_cs) {
26137d93aecdSGeert Uytterhoeven 		dev_err(dev, "No unused native chip select available\n");
26147d93aecdSGeert Uytterhoeven 		return -EINVAL;
2615f3186dd8SLinus Walleij 	}
2616f3186dd8SLinus Walleij 
2617f3186dd8SLinus Walleij 	return 0;
2618f3186dd8SLinus Walleij }
2619f3186dd8SLinus Walleij 
2620bdf3a3b5SBoris Brezillon static int spi_controller_check_ops(struct spi_controller *ctlr)
2621bdf3a3b5SBoris Brezillon {
2622bdf3a3b5SBoris Brezillon 	/*
2623b5932f5cSBoris Brezillon 	 * The controller may implement only the high-level SPI-memory like
2624b5932f5cSBoris Brezillon 	 * operations if it does not support regular SPI transfers, and this is
2625b5932f5cSBoris Brezillon 	 * valid use case.
2626b5932f5cSBoris Brezillon 	 * If ->mem_ops is NULL, we request that at least one of the
2627b5932f5cSBoris Brezillon 	 * ->transfer_xxx() method be implemented.
2628bdf3a3b5SBoris Brezillon 	 */
2629b5932f5cSBoris Brezillon 	if (ctlr->mem_ops) {
2630b5932f5cSBoris Brezillon 		if (!ctlr->mem_ops->exec_op)
2631bdf3a3b5SBoris Brezillon 			return -EINVAL;
2632b5932f5cSBoris Brezillon 	} else if (!ctlr->transfer && !ctlr->transfer_one &&
2633b5932f5cSBoris Brezillon 		   !ctlr->transfer_one_message) {
2634b5932f5cSBoris Brezillon 		return -EINVAL;
2635b5932f5cSBoris Brezillon 	}
2636bdf3a3b5SBoris Brezillon 
2637bdf3a3b5SBoris Brezillon 	return 0;
2638bdf3a3b5SBoris Brezillon }
2639bdf3a3b5SBoris Brezillon 
26408ae12a0dSDavid Brownell /**
26418caab75fSGeert Uytterhoeven  * spi_register_controller - register SPI master or slave controller
26428caab75fSGeert Uytterhoeven  * @ctlr: initialized master, originally from spi_alloc_master() or
26438caab75fSGeert Uytterhoeven  *	spi_alloc_slave()
264433e34dc6SDavid Brownell  * Context: can sleep
26458ae12a0dSDavid Brownell  *
26468caab75fSGeert Uytterhoeven  * SPI controllers connect to their drivers using some non-SPI bus,
26478ae12a0dSDavid Brownell  * such as the platform bus.  The final stage of probe() in that code
26488caab75fSGeert Uytterhoeven  * includes calling spi_register_controller() to hook up to this SPI bus glue.
26498ae12a0dSDavid Brownell  *
26508ae12a0dSDavid Brownell  * SPI controllers use board specific (often SOC specific) bus numbers,
26518ae12a0dSDavid Brownell  * and board-specific addressing for SPI devices combines those numbers
26528ae12a0dSDavid Brownell  * with chip select numbers.  Since SPI does not directly support dynamic
26538ae12a0dSDavid Brownell  * device identification, boards need configuration tables telling which
26548ae12a0dSDavid Brownell  * chip is at which address.
26558ae12a0dSDavid Brownell  *
26568ae12a0dSDavid Brownell  * This must be called from context that can sleep.  It returns zero on
26578caab75fSGeert Uytterhoeven  * success, else a negative error code (dropping the controller's refcount).
26580c868461SDavid Brownell  * After a successful return, the caller is responsible for calling
26598caab75fSGeert Uytterhoeven  * spi_unregister_controller().
266097d56dc6SJavier Martinez Canillas  *
266197d56dc6SJavier Martinez Canillas  * Return: zero on success, else a negative error code.
26628ae12a0dSDavid Brownell  */
26638caab75fSGeert Uytterhoeven int spi_register_controller(struct spi_controller *ctlr)
26648ae12a0dSDavid Brownell {
26658caab75fSGeert Uytterhoeven 	struct device		*dev = ctlr->dev.parent;
26662b9603a0SFeng Tang 	struct boardinfo	*bi;
2667b93318a2SSergei Shtylyov 	int			status;
266842bdd706SLucas Stach 	int			id, first_dynamic;
26698ae12a0dSDavid Brownell 
26700c868461SDavid Brownell 	if (!dev)
26710c868461SDavid Brownell 		return -ENODEV;
26720c868461SDavid Brownell 
2673bdf3a3b5SBoris Brezillon 	/*
2674bdf3a3b5SBoris Brezillon 	 * Make sure all necessary hooks are implemented before registering
2675bdf3a3b5SBoris Brezillon 	 * the SPI controller.
2676bdf3a3b5SBoris Brezillon 	 */
2677bdf3a3b5SBoris Brezillon 	status = spi_controller_check_ops(ctlr);
2678bdf3a3b5SBoris Brezillon 	if (status)
2679bdf3a3b5SBoris Brezillon 		return status;
2680bdf3a3b5SBoris Brezillon 
268104b2d03aSGeert Uytterhoeven 	if (ctlr->bus_num >= 0) {
268204b2d03aSGeert Uytterhoeven 		/* devices with a fixed bus num must check-in with the num */
268304b2d03aSGeert Uytterhoeven 		mutex_lock(&board_lock);
268404b2d03aSGeert Uytterhoeven 		id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
268504b2d03aSGeert Uytterhoeven 			ctlr->bus_num + 1, GFP_KERNEL);
268604b2d03aSGeert Uytterhoeven 		mutex_unlock(&board_lock);
268704b2d03aSGeert Uytterhoeven 		if (WARN(id < 0, "couldn't get idr"))
268804b2d03aSGeert Uytterhoeven 			return id == -ENOSPC ? -EBUSY : id;
268904b2d03aSGeert Uytterhoeven 		ctlr->bus_num = id;
269004b2d03aSGeert Uytterhoeven 	} else if (ctlr->dev.of_node) {
26919b61e302SSuniel Mahesh 		/* allocate dynamic bus number using Linux idr */
26929b61e302SSuniel Mahesh 		id = of_alias_get_id(ctlr->dev.of_node, "spi");
26939b61e302SSuniel Mahesh 		if (id >= 0) {
26949b61e302SSuniel Mahesh 			ctlr->bus_num = id;
26959b61e302SSuniel Mahesh 			mutex_lock(&board_lock);
26969b61e302SSuniel Mahesh 			id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
26979b61e302SSuniel Mahesh 				       ctlr->bus_num + 1, GFP_KERNEL);
26989b61e302SSuniel Mahesh 			mutex_unlock(&board_lock);
26999b61e302SSuniel Mahesh 			if (WARN(id < 0, "couldn't get idr"))
27009b61e302SSuniel Mahesh 				return id == -ENOSPC ? -EBUSY : id;
27019b61e302SSuniel Mahesh 		}
27029b61e302SSuniel Mahesh 	}
27038caab75fSGeert Uytterhoeven 	if (ctlr->bus_num < 0) {
270442bdd706SLucas Stach 		first_dynamic = of_alias_get_highest_id("spi");
270542bdd706SLucas Stach 		if (first_dynamic < 0)
270642bdd706SLucas Stach 			first_dynamic = 0;
270742bdd706SLucas Stach 		else
270842bdd706SLucas Stach 			first_dynamic++;
270942bdd706SLucas Stach 
27109b61e302SSuniel Mahesh 		mutex_lock(&board_lock);
271142bdd706SLucas Stach 		id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
271242bdd706SLucas Stach 			       0, GFP_KERNEL);
27139b61e302SSuniel Mahesh 		mutex_unlock(&board_lock);
27149b61e302SSuniel Mahesh 		if (WARN(id < 0, "couldn't get idr"))
27159b61e302SSuniel Mahesh 			return id;
27169b61e302SSuniel Mahesh 		ctlr->bus_num = id;
27178ae12a0dSDavid Brownell 	}
27188caab75fSGeert Uytterhoeven 	INIT_LIST_HEAD(&ctlr->queue);
27198caab75fSGeert Uytterhoeven 	spin_lock_init(&ctlr->queue_lock);
27208caab75fSGeert Uytterhoeven 	spin_lock_init(&ctlr->bus_lock_spinlock);
27218caab75fSGeert Uytterhoeven 	mutex_init(&ctlr->bus_lock_mutex);
27228caab75fSGeert Uytterhoeven 	mutex_init(&ctlr->io_mutex);
27238caab75fSGeert Uytterhoeven 	ctlr->bus_lock_flag = 0;
27248caab75fSGeert Uytterhoeven 	init_completion(&ctlr->xfer_completion);
27258caab75fSGeert Uytterhoeven 	if (!ctlr->max_dma_len)
27268caab75fSGeert Uytterhoeven 		ctlr->max_dma_len = INT_MAX;
2727cf32b71eSErnst Schwab 
27288ae12a0dSDavid Brownell 	/* register the device, then userspace will see it.
27298ae12a0dSDavid Brownell 	 * registration fails if the bus ID is in use.
27308ae12a0dSDavid Brownell 	 */
27318caab75fSGeert Uytterhoeven 	dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
27320a919ae4SAndrey Smirnov 
27330a919ae4SAndrey Smirnov 	if (!spi_controller_is_slave(ctlr)) {
27340a919ae4SAndrey Smirnov 		if (ctlr->use_gpio_descriptors) {
27350a919ae4SAndrey Smirnov 			status = spi_get_gpio_descs(ctlr);
27360a919ae4SAndrey Smirnov 			if (status)
2737f9981d4fSAaro Koskinen 				goto free_bus_id;
27380a919ae4SAndrey Smirnov 			/*
27390a919ae4SAndrey Smirnov 			 * A controller using GPIO descriptors always
27400a919ae4SAndrey Smirnov 			 * supports SPI_CS_HIGH if need be.
27410a919ae4SAndrey Smirnov 			 */
27420a919ae4SAndrey Smirnov 			ctlr->mode_bits |= SPI_CS_HIGH;
27430a919ae4SAndrey Smirnov 		} else {
27440a919ae4SAndrey Smirnov 			/* Legacy code path for GPIOs from DT */
274543004f31SLinus Walleij 			status = of_spi_get_gpio_numbers(ctlr);
27460a919ae4SAndrey Smirnov 			if (status)
2747f9981d4fSAaro Koskinen 				goto free_bus_id;
27480a919ae4SAndrey Smirnov 		}
27490a919ae4SAndrey Smirnov 	}
27500a919ae4SAndrey Smirnov 
2751f9481b08STudor Ambarus 	/*
2752f9481b08STudor Ambarus 	 * Even if it's just one always-selected device, there must
2753f9481b08STudor Ambarus 	 * be at least one chipselect.
2754f9481b08STudor Ambarus 	 */
2755f9981d4fSAaro Koskinen 	if (!ctlr->num_chipselect) {
2756f9981d4fSAaro Koskinen 		status = -EINVAL;
2757f9981d4fSAaro Koskinen 		goto free_bus_id;
2758f9981d4fSAaro Koskinen 	}
2759f9481b08STudor Ambarus 
27608caab75fSGeert Uytterhoeven 	status = device_add(&ctlr->dev);
2761f9981d4fSAaro Koskinen 	if (status < 0)
2762f9981d4fSAaro Koskinen 		goto free_bus_id;
27639b61e302SSuniel Mahesh 	dev_dbg(dev, "registered %s %s\n",
27648caab75fSGeert Uytterhoeven 			spi_controller_is_slave(ctlr) ? "slave" : "master",
27659b61e302SSuniel Mahesh 			dev_name(&ctlr->dev));
27668ae12a0dSDavid Brownell 
2767b5932f5cSBoris Brezillon 	/*
2768b5932f5cSBoris Brezillon 	 * If we're using a queued driver, start the queue. Note that we don't
2769b5932f5cSBoris Brezillon 	 * need the queueing logic if the driver is only supporting high-level
2770b5932f5cSBoris Brezillon 	 * memory operations.
2771b5932f5cSBoris Brezillon 	 */
2772b5932f5cSBoris Brezillon 	if (ctlr->transfer) {
27738caab75fSGeert Uytterhoeven 		dev_info(dev, "controller is unqueued, this is deprecated\n");
2774b5932f5cSBoris Brezillon 	} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
27758caab75fSGeert Uytterhoeven 		status = spi_controller_initialize_queue(ctlr);
2776ffbbdd21SLinus Walleij 		if (status) {
27778caab75fSGeert Uytterhoeven 			device_del(&ctlr->dev);
2778f9981d4fSAaro Koskinen 			goto free_bus_id;
2779ffbbdd21SLinus Walleij 		}
2780ffbbdd21SLinus Walleij 	}
2781eca2ebc7SMartin Sperl 	/* add statistics */
27828caab75fSGeert Uytterhoeven 	spin_lock_init(&ctlr->statistics.lock);
2783ffbbdd21SLinus Walleij 
27842b9603a0SFeng Tang 	mutex_lock(&board_lock);
27858caab75fSGeert Uytterhoeven 	list_add_tail(&ctlr->list, &spi_controller_list);
27862b9603a0SFeng Tang 	list_for_each_entry(bi, &board_list, list)
27878caab75fSGeert Uytterhoeven 		spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
27882b9603a0SFeng Tang 	mutex_unlock(&board_lock);
27892b9603a0SFeng Tang 
279064bee4d2SMika Westerberg 	/* Register devices from the device tree and ACPI */
27918caab75fSGeert Uytterhoeven 	of_register_spi_devices(ctlr);
27928caab75fSGeert Uytterhoeven 	acpi_register_spi_devices(ctlr);
2793f9981d4fSAaro Koskinen 	return status;
2794f9981d4fSAaro Koskinen 
2795f9981d4fSAaro Koskinen free_bus_id:
2796f9981d4fSAaro Koskinen 	mutex_lock(&board_lock);
2797f9981d4fSAaro Koskinen 	idr_remove(&spi_master_idr, ctlr->bus_num);
2798f9981d4fSAaro Koskinen 	mutex_unlock(&board_lock);
27998ae12a0dSDavid Brownell 	return status;
28008ae12a0dSDavid Brownell }
28018caab75fSGeert Uytterhoeven EXPORT_SYMBOL_GPL(spi_register_controller);
28028ae12a0dSDavid Brownell 
280359ebbe40STian Tao static void devm_spi_unregister(void *ctlr)
2804666d5b4cSMark Brown {
280559ebbe40STian Tao 	spi_unregister_controller(ctlr);
2806666d5b4cSMark Brown }
2807666d5b4cSMark Brown 
2808666d5b4cSMark Brown /**
28098caab75fSGeert Uytterhoeven  * devm_spi_register_controller - register managed SPI master or slave
28108caab75fSGeert Uytterhoeven  *	controller
28118caab75fSGeert Uytterhoeven  * @dev:    device managing SPI controller
28128caab75fSGeert Uytterhoeven  * @ctlr: initialized controller, originally from spi_alloc_master() or
28138caab75fSGeert Uytterhoeven  *	spi_alloc_slave()
2814666d5b4cSMark Brown  * Context: can sleep
2815666d5b4cSMark Brown  *
28168caab75fSGeert Uytterhoeven  * Register a SPI device as with spi_register_controller() which will
281768b892f1SJohan Hovold  * automatically be unregistered and freed.
281897d56dc6SJavier Martinez Canillas  *
281997d56dc6SJavier Martinez Canillas  * Return: zero on success, else a negative error code.
2820666d5b4cSMark Brown  */
28218caab75fSGeert Uytterhoeven int devm_spi_register_controller(struct device *dev,
28228caab75fSGeert Uytterhoeven 				 struct spi_controller *ctlr)
2823666d5b4cSMark Brown {
2824666d5b4cSMark Brown 	int ret;
2825666d5b4cSMark Brown 
28268caab75fSGeert Uytterhoeven 	ret = spi_register_controller(ctlr);
282759ebbe40STian Tao 	if (ret)
2828666d5b4cSMark Brown 		return ret;
282959ebbe40STian Tao 
283059ebbe40STian Tao 	return devm_add_action_or_reset(dev, devm_spi_unregister, ctlr);
2831666d5b4cSMark Brown }
28328caab75fSGeert Uytterhoeven EXPORT_SYMBOL_GPL(devm_spi_register_controller);
2833666d5b4cSMark Brown 
283434860089SDavid Lamparter static int __unregister(struct device *dev, void *null)
28358ae12a0dSDavid Brownell {
28360c868461SDavid Brownell 	spi_unregister_device(to_spi_device(dev));
28378ae12a0dSDavid Brownell 	return 0;
28388ae12a0dSDavid Brownell }
28398ae12a0dSDavid Brownell 
28408ae12a0dSDavid Brownell /**
28418caab75fSGeert Uytterhoeven  * spi_unregister_controller - unregister SPI master or slave controller
28428caab75fSGeert Uytterhoeven  * @ctlr: the controller being unregistered
284333e34dc6SDavid Brownell  * Context: can sleep
28448ae12a0dSDavid Brownell  *
28458caab75fSGeert Uytterhoeven  * This call is used only by SPI controller drivers, which are the
28468ae12a0dSDavid Brownell  * only ones directly touching chip registers.
28478ae12a0dSDavid Brownell  *
28488ae12a0dSDavid Brownell  * This must be called from context that can sleep.
284968b892f1SJohan Hovold  *
285068b892f1SJohan Hovold  * Note that this function also drops a reference to the controller.
28518ae12a0dSDavid Brownell  */
28528caab75fSGeert Uytterhoeven void spi_unregister_controller(struct spi_controller *ctlr)
28538ae12a0dSDavid Brownell {
28549b61e302SSuniel Mahesh 	struct spi_controller *found;
285567f7b278SJohan Hovold 	int id = ctlr->bus_num;
285689fc9a1aSJeff Garzik 
2857ddf75be4SLukas Wunner 	/* Prevent addition of new devices, unregister existing ones */
2858ddf75be4SLukas Wunner 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
2859ddf75be4SLukas Wunner 		mutex_lock(&spi_add_lock);
2860ddf75be4SLukas Wunner 
286184855678SLukas Wunner 	device_for_each_child(&ctlr->dev, NULL, __unregister);
286284855678SLukas Wunner 
28639b61e302SSuniel Mahesh 	/* First make sure that this controller was ever added */
28649b61e302SSuniel Mahesh 	mutex_lock(&board_lock);
286567f7b278SJohan Hovold 	found = idr_find(&spi_master_idr, id);
28669b61e302SSuniel Mahesh 	mutex_unlock(&board_lock);
28678caab75fSGeert Uytterhoeven 	if (ctlr->queued) {
28688caab75fSGeert Uytterhoeven 		if (spi_destroy_queue(ctlr))
28698caab75fSGeert Uytterhoeven 			dev_err(&ctlr->dev, "queue remove failed\n");
2870ffbbdd21SLinus Walleij 	}
28712b9603a0SFeng Tang 	mutex_lock(&board_lock);
28728caab75fSGeert Uytterhoeven 	list_del(&ctlr->list);
28732b9603a0SFeng Tang 	mutex_unlock(&board_lock);
28742b9603a0SFeng Tang 
28755e844cc3SLukas Wunner 	device_del(&ctlr->dev);
28765e844cc3SLukas Wunner 
28775e844cc3SLukas Wunner 	/* Release the last reference on the controller if its driver
28785e844cc3SLukas Wunner 	 * has not yet been converted to devm_spi_alloc_master/slave().
28795e844cc3SLukas Wunner 	 */
2880794aaf01SWilliam A. Kennington III 	if (!ctlr->devm_allocated)
28815e844cc3SLukas Wunner 		put_device(&ctlr->dev);
28825e844cc3SLukas Wunner 
28839b61e302SSuniel Mahesh 	/* free bus id */
28849b61e302SSuniel Mahesh 	mutex_lock(&board_lock);
2885613bd1eaSJarkko Nikula 	if (found == ctlr)
288667f7b278SJohan Hovold 		idr_remove(&spi_master_idr, id);
28879b61e302SSuniel Mahesh 	mutex_unlock(&board_lock);
2888ddf75be4SLukas Wunner 
2889ddf75be4SLukas Wunner 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
2890ddf75be4SLukas Wunner 		mutex_unlock(&spi_add_lock);
28918ae12a0dSDavid Brownell }
28928caab75fSGeert Uytterhoeven EXPORT_SYMBOL_GPL(spi_unregister_controller);
28938ae12a0dSDavid Brownell 
28948caab75fSGeert Uytterhoeven int spi_controller_suspend(struct spi_controller *ctlr)
2895ffbbdd21SLinus Walleij {
2896ffbbdd21SLinus Walleij 	int ret;
2897ffbbdd21SLinus Walleij 
28988caab75fSGeert Uytterhoeven 	/* Basically no-ops for non-queued controllers */
28998caab75fSGeert Uytterhoeven 	if (!ctlr->queued)
2900ffbbdd21SLinus Walleij 		return 0;
2901ffbbdd21SLinus Walleij 
29028caab75fSGeert Uytterhoeven 	ret = spi_stop_queue(ctlr);
2903ffbbdd21SLinus Walleij 	if (ret)
29048caab75fSGeert Uytterhoeven 		dev_err(&ctlr->dev, "queue stop failed\n");
2905ffbbdd21SLinus Walleij 
2906ffbbdd21SLinus Walleij 	return ret;
2907ffbbdd21SLinus Walleij }
29088caab75fSGeert Uytterhoeven EXPORT_SYMBOL_GPL(spi_controller_suspend);
2909ffbbdd21SLinus Walleij 
29108caab75fSGeert Uytterhoeven int spi_controller_resume(struct spi_controller *ctlr)
2911ffbbdd21SLinus Walleij {
2912ffbbdd21SLinus Walleij 	int ret;
2913ffbbdd21SLinus Walleij 
29148caab75fSGeert Uytterhoeven 	if (!ctlr->queued)
2915ffbbdd21SLinus Walleij 		return 0;
2916ffbbdd21SLinus Walleij 
29178caab75fSGeert Uytterhoeven 	ret = spi_start_queue(ctlr);
2918ffbbdd21SLinus Walleij 	if (ret)
29198caab75fSGeert Uytterhoeven 		dev_err(&ctlr->dev, "queue restart failed\n");
2920ffbbdd21SLinus Walleij 
2921ffbbdd21SLinus Walleij 	return ret;
2922ffbbdd21SLinus Walleij }
29238caab75fSGeert Uytterhoeven EXPORT_SYMBOL_GPL(spi_controller_resume);
2924ffbbdd21SLinus Walleij 
29258caab75fSGeert Uytterhoeven static int __spi_controller_match(struct device *dev, const void *data)
29265ed2c832SDave Young {
29278caab75fSGeert Uytterhoeven 	struct spi_controller *ctlr;
29289f3b795aSMichał Mirosław 	const u16 *bus_num = data;
29295ed2c832SDave Young 
29308caab75fSGeert Uytterhoeven 	ctlr = container_of(dev, struct spi_controller, dev);
29318caab75fSGeert Uytterhoeven 	return ctlr->bus_num == *bus_num;
29325ed2c832SDave Young }
29335ed2c832SDave Young 
29348ae12a0dSDavid Brownell /**
29358ae12a0dSDavid Brownell  * spi_busnum_to_master - look up master associated with bus_num
29368ae12a0dSDavid Brownell  * @bus_num: the master's bus number
293733e34dc6SDavid Brownell  * Context: can sleep
29388ae12a0dSDavid Brownell  *
29398ae12a0dSDavid Brownell  * This call may be used with devices that are registered after
29408ae12a0dSDavid Brownell  * arch init time.  It returns a refcounted pointer to the relevant
29418caab75fSGeert Uytterhoeven  * spi_controller (which the caller must release), or NULL if there is
29428ae12a0dSDavid Brownell  * no such master registered.
294397d56dc6SJavier Martinez Canillas  *
294497d56dc6SJavier Martinez Canillas  * Return: the SPI master structure on success, else NULL.
29458ae12a0dSDavid Brownell  */
29468caab75fSGeert Uytterhoeven struct spi_controller *spi_busnum_to_master(u16 bus_num)
29478ae12a0dSDavid Brownell {
294849dce689STony Jones 	struct device		*dev;
29498caab75fSGeert Uytterhoeven 	struct spi_controller	*ctlr = NULL;
29508ae12a0dSDavid Brownell 
2951695794aeSGreg Kroah-Hartman 	dev = class_find_device(&spi_master_class, NULL, &bus_num,
29528caab75fSGeert Uytterhoeven 				__spi_controller_match);
29535ed2c832SDave Young 	if (dev)
29548caab75fSGeert Uytterhoeven 		ctlr = container_of(dev, struct spi_controller, dev);
29555ed2c832SDave Young 	/* reference got in class_find_device */
29568caab75fSGeert Uytterhoeven 	return ctlr;
29578ae12a0dSDavid Brownell }
29588ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_busnum_to_master);
29598ae12a0dSDavid Brownell 
2960d780c371SMartin Sperl /*-------------------------------------------------------------------------*/
2961d780c371SMartin Sperl 
2962d780c371SMartin Sperl /* Core methods for SPI resource management */
2963d780c371SMartin Sperl 
2964d780c371SMartin Sperl /**
2965d780c371SMartin Sperl  * spi_res_alloc - allocate a spi resource that is life-cycle managed
2966d780c371SMartin Sperl  *                 during the processing of a spi_message while using
2967d780c371SMartin Sperl  *                 spi_transfer_one
2968d780c371SMartin Sperl  * @spi:     the spi device for which we allocate memory
2969d780c371SMartin Sperl  * @release: the release code to execute for this resource
2970d780c371SMartin Sperl  * @size:    size to alloc and return
2971d780c371SMartin Sperl  * @gfp:     GFP allocation flags
2972d780c371SMartin Sperl  *
2973d780c371SMartin Sperl  * Return: the pointer to the allocated data
2974d780c371SMartin Sperl  *
2975d780c371SMartin Sperl  * This may get enhanced in the future to allocate from a memory pool
29768caab75fSGeert Uytterhoeven  * of the @spi_device or @spi_controller to avoid repeated allocations.
2977d780c371SMartin Sperl  */
2978d780c371SMartin Sperl void *spi_res_alloc(struct spi_device *spi,
2979d780c371SMartin Sperl 		    spi_res_release_t release,
2980d780c371SMartin Sperl 		    size_t size, gfp_t gfp)
2981d780c371SMartin Sperl {
2982d780c371SMartin Sperl 	struct spi_res *sres;
2983d780c371SMartin Sperl 
2984d780c371SMartin Sperl 	sres = kzalloc(sizeof(*sres) + size, gfp);
2985d780c371SMartin Sperl 	if (!sres)
2986d780c371SMartin Sperl 		return NULL;
2987d780c371SMartin Sperl 
2988d780c371SMartin Sperl 	INIT_LIST_HEAD(&sres->entry);
2989d780c371SMartin Sperl 	sres->release = release;
2990d780c371SMartin Sperl 
2991d780c371SMartin Sperl 	return sres->data;
2992d780c371SMartin Sperl }
2993d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_alloc);
2994d780c371SMartin Sperl 
2995d780c371SMartin Sperl /**
2996d780c371SMartin Sperl  * spi_res_free - free an spi resource
2997d780c371SMartin Sperl  * @res: pointer to the custom data of a resource
2998d780c371SMartin Sperl  *
2999d780c371SMartin Sperl  */
3000d780c371SMartin Sperl void spi_res_free(void *res)
3001d780c371SMartin Sperl {
3002d780c371SMartin Sperl 	struct spi_res *sres = container_of(res, struct spi_res, data);
3003d780c371SMartin Sperl 
3004d780c371SMartin Sperl 	if (!res)
3005d780c371SMartin Sperl 		return;
3006d780c371SMartin Sperl 
3007d780c371SMartin Sperl 	WARN_ON(!list_empty(&sres->entry));
3008d780c371SMartin Sperl 	kfree(sres);
3009d780c371SMartin Sperl }
3010d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_free);
3011d780c371SMartin Sperl 
3012d780c371SMartin Sperl /**
3013d780c371SMartin Sperl  * spi_res_add - add a spi_res to the spi_message
3014d780c371SMartin Sperl  * @message: the spi message
3015d780c371SMartin Sperl  * @res:     the spi_resource
3016d780c371SMartin Sperl  */
3017d780c371SMartin Sperl void spi_res_add(struct spi_message *message, void *res)
3018d780c371SMartin Sperl {
3019d780c371SMartin Sperl 	struct spi_res *sres = container_of(res, struct spi_res, data);
3020d780c371SMartin Sperl 
3021d780c371SMartin Sperl 	WARN_ON(!list_empty(&sres->entry));
3022d780c371SMartin Sperl 	list_add_tail(&sres->entry, &message->resources);
3023d780c371SMartin Sperl }
3024d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_add);
3025d780c371SMartin Sperl 
3026d780c371SMartin Sperl /**
3027d780c371SMartin Sperl  * spi_res_release - release all spi resources for this message
30288caab75fSGeert Uytterhoeven  * @ctlr:  the @spi_controller
3029d780c371SMartin Sperl  * @message: the @spi_message
3030d780c371SMartin Sperl  */
30318caab75fSGeert Uytterhoeven void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
3032d780c371SMartin Sperl {
3033f5694369SVladimir Zapolskiy 	struct spi_res *res, *tmp;
3034d780c371SMartin Sperl 
3035f5694369SVladimir Zapolskiy 	list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
3036d780c371SMartin Sperl 		if (res->release)
30378caab75fSGeert Uytterhoeven 			res->release(ctlr, message, res->data);
3038d780c371SMartin Sperl 
3039d780c371SMartin Sperl 		list_del(&res->entry);
3040d780c371SMartin Sperl 
3041d780c371SMartin Sperl 		kfree(res);
3042d780c371SMartin Sperl 	}
3043d780c371SMartin Sperl }
3044d780c371SMartin Sperl EXPORT_SYMBOL_GPL(spi_res_release);
30458ae12a0dSDavid Brownell 
30468ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/
30478ae12a0dSDavid Brownell 
3048523baf5aSMartin Sperl /* Core methods for spi_message alterations */
3049523baf5aSMartin Sperl 
30508caab75fSGeert Uytterhoeven static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3051523baf5aSMartin Sperl 					    struct spi_message *msg,
3052523baf5aSMartin Sperl 					    void *res)
3053523baf5aSMartin Sperl {
3054523baf5aSMartin Sperl 	struct spi_replaced_transfers *rxfer = res;
3055523baf5aSMartin Sperl 	size_t i;
3056523baf5aSMartin Sperl 
3057523baf5aSMartin Sperl 	/* call extra callback if requested */
3058523baf5aSMartin Sperl 	if (rxfer->release)
30598caab75fSGeert Uytterhoeven 		rxfer->release(ctlr, msg, res);
3060523baf5aSMartin Sperl 
3061523baf5aSMartin Sperl 	/* insert replaced transfers back into the message */
3062523baf5aSMartin Sperl 	list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3063523baf5aSMartin Sperl 
3064523baf5aSMartin Sperl 	/* remove the formerly inserted entries */
3065523baf5aSMartin Sperl 	for (i = 0; i < rxfer->inserted; i++)
3066523baf5aSMartin Sperl 		list_del(&rxfer->inserted_transfers[i].transfer_list);
3067523baf5aSMartin Sperl }
3068523baf5aSMartin Sperl 
3069523baf5aSMartin Sperl /**
3070523baf5aSMartin Sperl  * spi_replace_transfers - replace transfers with several transfers
3071523baf5aSMartin Sperl  *                         and register change with spi_message.resources
3072523baf5aSMartin Sperl  * @msg:           the spi_message we work upon
3073523baf5aSMartin Sperl  * @xfer_first:    the first spi_transfer we want to replace
3074523baf5aSMartin Sperl  * @remove:        number of transfers to remove
3075523baf5aSMartin Sperl  * @insert:        the number of transfers we want to insert instead
3076523baf5aSMartin Sperl  * @release:       extra release code necessary in some circumstances
3077523baf5aSMartin Sperl  * @extradatasize: extra data to allocate (with alignment guarantees
3078523baf5aSMartin Sperl  *                 of struct @spi_transfer)
307905885397SMartin Sperl  * @gfp:           gfp flags
3080523baf5aSMartin Sperl  *
3081523baf5aSMartin Sperl  * Returns: pointer to @spi_replaced_transfers,
3082523baf5aSMartin Sperl  *          PTR_ERR(...) in case of errors.
3083523baf5aSMartin Sperl  */
3084523baf5aSMartin Sperl struct spi_replaced_transfers *spi_replace_transfers(
3085523baf5aSMartin Sperl 	struct spi_message *msg,
3086523baf5aSMartin Sperl 	struct spi_transfer *xfer_first,
3087523baf5aSMartin Sperl 	size_t remove,
3088523baf5aSMartin Sperl 	size_t insert,
3089523baf5aSMartin Sperl 	spi_replaced_release_t release,
3090523baf5aSMartin Sperl 	size_t extradatasize,
3091523baf5aSMartin Sperl 	gfp_t gfp)
3092523baf5aSMartin Sperl {
3093523baf5aSMartin Sperl 	struct spi_replaced_transfers *rxfer;
3094523baf5aSMartin Sperl 	struct spi_transfer *xfer;
3095523baf5aSMartin Sperl 	size_t i;
3096523baf5aSMartin Sperl 
3097523baf5aSMartin Sperl 	/* allocate the structure using spi_res */
3098523baf5aSMartin Sperl 	rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3099aef97522SGustavo A. R. Silva 			      struct_size(rxfer, inserted_transfers, insert)
3100523baf5aSMartin Sperl 			      + extradatasize,
3101523baf5aSMartin Sperl 			      gfp);
3102523baf5aSMartin Sperl 	if (!rxfer)
3103523baf5aSMartin Sperl 		return ERR_PTR(-ENOMEM);
3104523baf5aSMartin Sperl 
3105523baf5aSMartin Sperl 	/* the release code to invoke before running the generic release */
3106523baf5aSMartin Sperl 	rxfer->release = release;
3107523baf5aSMartin Sperl 
3108523baf5aSMartin Sperl 	/* assign extradata */
3109523baf5aSMartin Sperl 	if (extradatasize)
3110523baf5aSMartin Sperl 		rxfer->extradata =
3111523baf5aSMartin Sperl 			&rxfer->inserted_transfers[insert];
3112523baf5aSMartin Sperl 
3113523baf5aSMartin Sperl 	/* init the replaced_transfers list */
3114523baf5aSMartin Sperl 	INIT_LIST_HEAD(&rxfer->replaced_transfers);
3115523baf5aSMartin Sperl 
3116523baf5aSMartin Sperl 	/* assign the list_entry after which we should reinsert
3117523baf5aSMartin Sperl 	 * the @replaced_transfers - it may be spi_message.messages!
3118523baf5aSMartin Sperl 	 */
3119523baf5aSMartin Sperl 	rxfer->replaced_after = xfer_first->transfer_list.prev;
3120523baf5aSMartin Sperl 
3121523baf5aSMartin Sperl 	/* remove the requested number of transfers */
3122523baf5aSMartin Sperl 	for (i = 0; i < remove; i++) {
3123523baf5aSMartin Sperl 		/* if the entry after replaced_after it is msg->transfers
3124523baf5aSMartin Sperl 		 * then we have been requested to remove more transfers
3125523baf5aSMartin Sperl 		 * than are in the list
3126523baf5aSMartin Sperl 		 */
3127523baf5aSMartin Sperl 		if (rxfer->replaced_after->next == &msg->transfers) {
3128523baf5aSMartin Sperl 			dev_err(&msg->spi->dev,
3129523baf5aSMartin Sperl 				"requested to remove more spi_transfers than are available\n");
3130523baf5aSMartin Sperl 			/* insert replaced transfers back into the message */
3131523baf5aSMartin Sperl 			list_splice(&rxfer->replaced_transfers,
3132523baf5aSMartin Sperl 				    rxfer->replaced_after);
3133523baf5aSMartin Sperl 
3134523baf5aSMartin Sperl 			/* free the spi_replace_transfer structure */
3135523baf5aSMartin Sperl 			spi_res_free(rxfer);
3136523baf5aSMartin Sperl 
3137523baf5aSMartin Sperl 			/* and return with an error */
3138523baf5aSMartin Sperl 			return ERR_PTR(-EINVAL);
3139523baf5aSMartin Sperl 		}
3140523baf5aSMartin Sperl 
3141523baf5aSMartin Sperl 		/* remove the entry after replaced_after from list of
3142523baf5aSMartin Sperl 		 * transfers and add it to list of replaced_transfers
3143523baf5aSMartin Sperl 		 */
3144523baf5aSMartin Sperl 		list_move_tail(rxfer->replaced_after->next,
3145523baf5aSMartin Sperl 			       &rxfer->replaced_transfers);
3146523baf5aSMartin Sperl 	}
3147523baf5aSMartin Sperl 
3148523baf5aSMartin Sperl 	/* create copy of the given xfer with identical settings
3149523baf5aSMartin Sperl 	 * based on the first transfer to get removed
3150523baf5aSMartin Sperl 	 */
3151523baf5aSMartin Sperl 	for (i = 0; i < insert; i++) {
3152523baf5aSMartin Sperl 		/* we need to run in reverse order */
3153523baf5aSMartin Sperl 		xfer = &rxfer->inserted_transfers[insert - 1 - i];
3154523baf5aSMartin Sperl 
3155523baf5aSMartin Sperl 		/* copy all spi_transfer data */
3156523baf5aSMartin Sperl 		memcpy(xfer, xfer_first, sizeof(*xfer));
3157523baf5aSMartin Sperl 
3158523baf5aSMartin Sperl 		/* add to list */
3159523baf5aSMartin Sperl 		list_add(&xfer->transfer_list, rxfer->replaced_after);
3160523baf5aSMartin Sperl 
3161bebcfd27SAlexandru Ardelean 		/* clear cs_change and delay for all but the last */
3162523baf5aSMartin Sperl 		if (i) {
3163523baf5aSMartin Sperl 			xfer->cs_change = false;
3164bebcfd27SAlexandru Ardelean 			xfer->delay.value = 0;
3165523baf5aSMartin Sperl 		}
3166523baf5aSMartin Sperl 	}
3167523baf5aSMartin Sperl 
3168523baf5aSMartin Sperl 	/* set up inserted */
3169523baf5aSMartin Sperl 	rxfer->inserted = insert;
3170523baf5aSMartin Sperl 
3171523baf5aSMartin Sperl 	/* and register it with spi_res/spi_message */
3172523baf5aSMartin Sperl 	spi_res_add(msg, rxfer);
3173523baf5aSMartin Sperl 
3174523baf5aSMartin Sperl 	return rxfer;
3175523baf5aSMartin Sperl }
3176523baf5aSMartin Sperl EXPORT_SYMBOL_GPL(spi_replace_transfers);
3177523baf5aSMartin Sperl 
31788caab75fSGeert Uytterhoeven static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3179d9f12122SMartin Sperl 					struct spi_message *msg,
3180d9f12122SMartin Sperl 					struct spi_transfer **xferp,
3181d9f12122SMartin Sperl 					size_t maxsize,
3182d9f12122SMartin Sperl 					gfp_t gfp)
3183d9f12122SMartin Sperl {
3184d9f12122SMartin Sperl 	struct spi_transfer *xfer = *xferp, *xfers;
3185d9f12122SMartin Sperl 	struct spi_replaced_transfers *srt;
3186d9f12122SMartin Sperl 	size_t offset;
3187d9f12122SMartin Sperl 	size_t count, i;
3188d9f12122SMartin Sperl 
3189d9f12122SMartin Sperl 	/* calculate how many we have to replace */
3190d9f12122SMartin Sperl 	count = DIV_ROUND_UP(xfer->len, maxsize);
3191d9f12122SMartin Sperl 
3192d9f12122SMartin Sperl 	/* create replacement */
3193d9f12122SMartin Sperl 	srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
3194657d32efSDan Carpenter 	if (IS_ERR(srt))
3195657d32efSDan Carpenter 		return PTR_ERR(srt);
3196d9f12122SMartin Sperl 	xfers = srt->inserted_transfers;
3197d9f12122SMartin Sperl 
3198d9f12122SMartin Sperl 	/* now handle each of those newly inserted spi_transfers
3199d9f12122SMartin Sperl 	 * note that the replacements spi_transfers all are preset
3200d9f12122SMartin Sperl 	 * to the same values as *xferp, so tx_buf, rx_buf and len
3201d9f12122SMartin Sperl 	 * are all identical (as well as most others)
3202d9f12122SMartin Sperl 	 * so we just have to fix up len and the pointers.
3203d9f12122SMartin Sperl 	 *
3204d9f12122SMartin Sperl 	 * this also includes support for the depreciated
3205d9f12122SMartin Sperl 	 * spi_message.is_dma_mapped interface
3206d9f12122SMartin Sperl 	 */
3207d9f12122SMartin Sperl 
3208d9f12122SMartin Sperl 	/* the first transfer just needs the length modified, so we
3209d9f12122SMartin Sperl 	 * run it outside the loop
3210d9f12122SMartin Sperl 	 */
3211c8dab77aSFabio Estevam 	xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3212d9f12122SMartin Sperl 
3213d9f12122SMartin Sperl 	/* all the others need rx_buf/tx_buf also set */
3214d9f12122SMartin Sperl 	for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3215d9f12122SMartin Sperl 		/* update rx_buf, tx_buf and dma */
3216d9f12122SMartin Sperl 		if (xfers[i].rx_buf)
3217d9f12122SMartin Sperl 			xfers[i].rx_buf += offset;
3218d9f12122SMartin Sperl 		if (xfers[i].rx_dma)
3219d9f12122SMartin Sperl 			xfers[i].rx_dma += offset;
3220d9f12122SMartin Sperl 		if (xfers[i].tx_buf)
3221d9f12122SMartin Sperl 			xfers[i].tx_buf += offset;
3222d9f12122SMartin Sperl 		if (xfers[i].tx_dma)
3223d9f12122SMartin Sperl 			xfers[i].tx_dma += offset;
3224d9f12122SMartin Sperl 
3225d9f12122SMartin Sperl 		/* update length */
3226d9f12122SMartin Sperl 		xfers[i].len = min(maxsize, xfers[i].len - offset);
3227d9f12122SMartin Sperl 	}
3228d9f12122SMartin Sperl 
3229d9f12122SMartin Sperl 	/* we set up xferp to the last entry we have inserted,
3230d9f12122SMartin Sperl 	 * so that we skip those already split transfers
3231d9f12122SMartin Sperl 	 */
3232d9f12122SMartin Sperl 	*xferp = &xfers[count - 1];
3233d9f12122SMartin Sperl 
3234d9f12122SMartin Sperl 	/* increment statistics counters */
32358caab75fSGeert Uytterhoeven 	SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3236d9f12122SMartin Sperl 				       transfers_split_maxsize);
3237d9f12122SMartin Sperl 	SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
3238d9f12122SMartin Sperl 				       transfers_split_maxsize);
3239d9f12122SMartin Sperl 
3240d9f12122SMartin Sperl 	return 0;
3241d9f12122SMartin Sperl }
3242d9f12122SMartin Sperl 
3243d9f12122SMartin Sperl /**
3244ce2424d7SMauro Carvalho Chehab  * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3245d9f12122SMartin Sperl  *                               when an individual transfer exceeds a
3246d9f12122SMartin Sperl  *                               certain size
32478caab75fSGeert Uytterhoeven  * @ctlr:    the @spi_controller for this transfer
32483700ce95SMasanari Iida  * @msg:   the @spi_message to transform
32493700ce95SMasanari Iida  * @maxsize:  the maximum when to apply this
325010f11a22SJavier Martinez Canillas  * @gfp: GFP allocation flags
3251d9f12122SMartin Sperl  *
3252d9f12122SMartin Sperl  * Return: status of transformation
3253d9f12122SMartin Sperl  */
32548caab75fSGeert Uytterhoeven int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3255d9f12122SMartin Sperl 				struct spi_message *msg,
3256d9f12122SMartin Sperl 				size_t maxsize,
3257d9f12122SMartin Sperl 				gfp_t gfp)
3258d9f12122SMartin Sperl {
3259d9f12122SMartin Sperl 	struct spi_transfer *xfer;
3260d9f12122SMartin Sperl 	int ret;
3261d9f12122SMartin Sperl 
3262d9f12122SMartin Sperl 	/* iterate over the transfer_list,
3263d9f12122SMartin Sperl 	 * but note that xfer is advanced to the last transfer inserted
3264d9f12122SMartin Sperl 	 * to avoid checking sizes again unnecessarily (also xfer does
3265d9f12122SMartin Sperl 	 * potentiall belong to a different list by the time the
3266d9f12122SMartin Sperl 	 * replacement has happened
3267d9f12122SMartin Sperl 	 */
3268d9f12122SMartin Sperl 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3269d9f12122SMartin Sperl 		if (xfer->len > maxsize) {
32708caab75fSGeert Uytterhoeven 			ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
32718caab75fSGeert Uytterhoeven 							   maxsize, gfp);
3272d9f12122SMartin Sperl 			if (ret)
3273d9f12122SMartin Sperl 				return ret;
3274d9f12122SMartin Sperl 		}
3275d9f12122SMartin Sperl 	}
3276d9f12122SMartin Sperl 
3277d9f12122SMartin Sperl 	return 0;
3278d9f12122SMartin Sperl }
3279d9f12122SMartin Sperl EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
32808ae12a0dSDavid Brownell 
32818ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/
32828ae12a0dSDavid Brownell 
32838caab75fSGeert Uytterhoeven /* Core methods for SPI controller protocol drivers.  Some of the
32847d077197SDavid Brownell  * other core methods are currently defined as inline functions.
32857d077197SDavid Brownell  */
32867d077197SDavid Brownell 
32878caab75fSGeert Uytterhoeven static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
32888caab75fSGeert Uytterhoeven 					u8 bits_per_word)
328963ab645fSStefan Brüns {
32908caab75fSGeert Uytterhoeven 	if (ctlr->bits_per_word_mask) {
329163ab645fSStefan Brüns 		/* Only 32 bits fit in the mask */
329263ab645fSStefan Brüns 		if (bits_per_word > 32)
329363ab645fSStefan Brüns 			return -EINVAL;
32948caab75fSGeert Uytterhoeven 		if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
329563ab645fSStefan Brüns 			return -EINVAL;
329663ab645fSStefan Brüns 	}
329763ab645fSStefan Brüns 
329863ab645fSStefan Brüns 	return 0;
329963ab645fSStefan Brüns }
330063ab645fSStefan Brüns 
33017d077197SDavid Brownell /**
33027d077197SDavid Brownell  * spi_setup - setup SPI mode and clock rate
33037d077197SDavid Brownell  * @spi: the device whose settings are being modified
33047d077197SDavid Brownell  * Context: can sleep, and no requests are queued to the device
33057d077197SDavid Brownell  *
33067d077197SDavid Brownell  * SPI protocol drivers may need to update the transfer mode if the
33077d077197SDavid Brownell  * device doesn't work with its default.  They may likewise need
33087d077197SDavid Brownell  * to update clock rates or word sizes from initial values.  This function
33097d077197SDavid Brownell  * changes those settings, and must be called from a context that can sleep.
33107d077197SDavid Brownell  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
33117d077197SDavid Brownell  * effect the next time the device is selected and data is transferred to
33127d077197SDavid Brownell  * or from it.  When this function returns, the spi device is deselected.
33137d077197SDavid Brownell  *
33147d077197SDavid Brownell  * Note that this call will fail if the protocol driver specifies an option
33157d077197SDavid Brownell  * that the underlying controller or its driver does not support.  For
33167d077197SDavid Brownell  * example, not all hardware supports wire transfers using nine bit words,
33177d077197SDavid Brownell  * LSB-first wire encoding, or active-high chipselects.
331897d56dc6SJavier Martinez Canillas  *
331997d56dc6SJavier Martinez Canillas  * Return: zero on success, else a negative error code.
33207d077197SDavid Brownell  */
33217d077197SDavid Brownell int spi_setup(struct spi_device *spi)
33227d077197SDavid Brownell {
332383596fbeSGeert Uytterhoeven 	unsigned	bad_bits, ugly_bits;
33245ab8d262SAndy Shevchenko 	int		status;
33257d077197SDavid Brownell 
3326d962608cSDragos Bogdan 	/*
3327d962608cSDragos Bogdan 	 * check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3328d962608cSDragos Bogdan 	 * are set at the same time
3329f477b7fbSwangyuhang 	 */
3330d962608cSDragos Bogdan 	if ((hweight_long(spi->mode &
3331d962608cSDragos Bogdan 		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3332d962608cSDragos Bogdan 	    (hweight_long(spi->mode &
3333d962608cSDragos Bogdan 		(SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3334f477b7fbSwangyuhang 		dev_err(&spi->dev,
3335d962608cSDragos Bogdan 		"setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3336f477b7fbSwangyuhang 		return -EINVAL;
3337f477b7fbSwangyuhang 	}
3338f477b7fbSwangyuhang 	/* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
3339f477b7fbSwangyuhang 	 */
3340f477b7fbSwangyuhang 	if ((spi->mode & SPI_3WIRE) && (spi->mode &
33416b03061fSYogesh Narayan Gaur 		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
33426b03061fSYogesh Narayan Gaur 		 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3343f477b7fbSwangyuhang 		return -EINVAL;
3344e7db06b5SDavid Brownell 	/* help drivers fail *cleanly* when they need options
33458caab75fSGeert Uytterhoeven 	 * that aren't supported with their current controller
3346cbaa62e0SDavid Lechner 	 * SPI_CS_WORD has a fallback software implementation,
3347cbaa62e0SDavid Lechner 	 * so it is ignored here.
3348e7db06b5SDavid Brownell 	 */
3349d962608cSDragos Bogdan 	bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3350d962608cSDragos Bogdan 				 SPI_NO_TX | SPI_NO_RX);
3351d61ad23cSSerge Semin 	/* nothing prevents from working with active-high CS in case if it
3352d61ad23cSSerge Semin 	 * is driven by GPIO.
3353d61ad23cSSerge Semin 	 */
3354d61ad23cSSerge Semin 	if (gpio_is_valid(spi->cs_gpio))
3355d61ad23cSSerge Semin 		bad_bits &= ~SPI_CS_HIGH;
335683596fbeSGeert Uytterhoeven 	ugly_bits = bad_bits &
33576b03061fSYogesh Narayan Gaur 		    (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
33586b03061fSYogesh Narayan Gaur 		     SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
335983596fbeSGeert Uytterhoeven 	if (ugly_bits) {
336083596fbeSGeert Uytterhoeven 		dev_warn(&spi->dev,
336183596fbeSGeert Uytterhoeven 			 "setup: ignoring unsupported mode bits %x\n",
336283596fbeSGeert Uytterhoeven 			 ugly_bits);
336383596fbeSGeert Uytterhoeven 		spi->mode &= ~ugly_bits;
336483596fbeSGeert Uytterhoeven 		bad_bits &= ~ugly_bits;
336583596fbeSGeert Uytterhoeven 	}
3366e7db06b5SDavid Brownell 	if (bad_bits) {
3367eb288a1fSLinus Walleij 		dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3368e7db06b5SDavid Brownell 			bad_bits);
3369e7db06b5SDavid Brownell 		return -EINVAL;
3370e7db06b5SDavid Brownell 	}
3371e7db06b5SDavid Brownell 
33727d077197SDavid Brownell 	if (!spi->bits_per_word)
33737d077197SDavid Brownell 		spi->bits_per_word = 8;
33747d077197SDavid Brownell 
33758caab75fSGeert Uytterhoeven 	status = __spi_validate_bits_per_word(spi->controller,
33768caab75fSGeert Uytterhoeven 					      spi->bits_per_word);
33775ab8d262SAndy Shevchenko 	if (status)
33785ab8d262SAndy Shevchenko 		return status;
337963ab645fSStefan Brüns 
33806820e812STudor Ambarus 	if (spi->controller->max_speed_hz &&
33816820e812STudor Ambarus 	    (!spi->max_speed_hz ||
33826820e812STudor Ambarus 	     spi->max_speed_hz > spi->controller->max_speed_hz))
33838caab75fSGeert Uytterhoeven 		spi->max_speed_hz = spi->controller->max_speed_hz;
3384052eb2d4SAxel Lin 
33854fae3a58SSerge Semin 	mutex_lock(&spi->controller->io_mutex);
33864fae3a58SSerge Semin 
3387c914dbf8SJoe Burmeister 	if (spi->controller->setup) {
33888caab75fSGeert Uytterhoeven 		status = spi->controller->setup(spi);
3389c914dbf8SJoe Burmeister 		if (status) {
3390c914dbf8SJoe Burmeister 			mutex_unlock(&spi->controller->io_mutex);
3391c914dbf8SJoe Burmeister 			dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3392c914dbf8SJoe Burmeister 				status);
3393c914dbf8SJoe Burmeister 			return status;
3394c914dbf8SJoe Burmeister 		}
3395c914dbf8SJoe Burmeister 	}
33967d077197SDavid Brownell 
3397d948e6caSLuhua Xu 	if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3398d948e6caSLuhua Xu 		status = pm_runtime_get_sync(spi->controller->dev.parent);
3399d948e6caSLuhua Xu 		if (status < 0) {
34004fae3a58SSerge Semin 			mutex_unlock(&spi->controller->io_mutex);
3401d948e6caSLuhua Xu 			pm_runtime_put_noidle(spi->controller->dev.parent);
3402d948e6caSLuhua Xu 			dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3403d948e6caSLuhua Xu 				status);
3404d948e6caSLuhua Xu 			return status;
3405d948e6caSLuhua Xu 		}
340657a94607STony Lindgren 
340757a94607STony Lindgren 		/*
340857a94607STony Lindgren 		 * We do not want to return positive value from pm_runtime_get,
340957a94607STony Lindgren 		 * there are many instances of devices calling spi_setup() and
341057a94607STony Lindgren 		 * checking for a non-zero return value instead of a negative
341157a94607STony Lindgren 		 * return value.
341257a94607STony Lindgren 		 */
341357a94607STony Lindgren 		status = 0;
341457a94607STony Lindgren 
3415d347b4aaSDavid Bauer 		spi_set_cs(spi, false, true);
3416d948e6caSLuhua Xu 		pm_runtime_mark_last_busy(spi->controller->dev.parent);
3417d948e6caSLuhua Xu 		pm_runtime_put_autosuspend(spi->controller->dev.parent);
3418d948e6caSLuhua Xu 	} else {
3419d347b4aaSDavid Bauer 		spi_set_cs(spi, false, true);
3420d948e6caSLuhua Xu 	}
3421abeedb01SFranklin S Cooper Jr 
34224fae3a58SSerge Semin 	mutex_unlock(&spi->controller->io_mutex);
34234fae3a58SSerge Semin 
3424924b5867SDouglas Anderson 	if (spi->rt && !spi->controller->rt) {
3425924b5867SDouglas Anderson 		spi->controller->rt = true;
3426924b5867SDouglas Anderson 		spi_set_thread_rt(spi->controller);
3427924b5867SDouglas Anderson 	}
3428924b5867SDouglas Anderson 
34295fe5f05eSJingoo Han 	dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
34307d077197SDavid Brownell 			(int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
34317d077197SDavid Brownell 			(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
34327d077197SDavid Brownell 			(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
34337d077197SDavid Brownell 			(spi->mode & SPI_3WIRE) ? "3wire, " : "",
34347d077197SDavid Brownell 			(spi->mode & SPI_LOOP) ? "loopback, " : "",
34357d077197SDavid Brownell 			spi->bits_per_word, spi->max_speed_hz,
34367d077197SDavid Brownell 			status);
34377d077197SDavid Brownell 
34387d077197SDavid Brownell 	return status;
34397d077197SDavid Brownell }
34407d077197SDavid Brownell EXPORT_SYMBOL_GPL(spi_setup);
34417d077197SDavid Brownell 
3442f1ca9992SSowjanya Komatineni /**
3443f1ca9992SSowjanya Komatineni  * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3444f1ca9992SSowjanya Komatineni  * @spi: the device that requires specific CS timing configuration
344581059366SAlexandru Ardelean  * @setup: CS setup time specified via @spi_delay
344681059366SAlexandru Ardelean  * @hold: CS hold time specified via @spi_delay
344781059366SAlexandru Ardelean  * @inactive: CS inactive delay between transfers specified via @spi_delay
344881059366SAlexandru Ardelean  *
344981059366SAlexandru Ardelean  * Return: zero on success, else a negative error code.
3450f1ca9992SSowjanya Komatineni  */
345181059366SAlexandru Ardelean int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup,
345281059366SAlexandru Ardelean 		      struct spi_delay *hold, struct spi_delay *inactive)
3453f1ca9992SSowjanya Komatineni {
34544cea6b8cSleilk.liu 	struct device *parent = spi->controller->dev.parent;
345525093bdeSAlexandru Ardelean 	size_t len;
34564cea6b8cSleilk.liu 	int status;
345725093bdeSAlexandru Ardelean 
34580486d9f9Sleilk.liu 	if (spi->controller->set_cs_timing &&
34590486d9f9Sleilk.liu 	    !(spi->cs_gpiod || gpio_is_valid(spi->cs_gpio))) {
34604cea6b8cSleilk.liu 		if (spi->controller->auto_runtime_pm) {
34614cea6b8cSleilk.liu 			status = pm_runtime_get_sync(parent);
34624cea6b8cSleilk.liu 			if (status < 0) {
34634cea6b8cSleilk.liu 				pm_runtime_put_noidle(parent);
34644cea6b8cSleilk.liu 				dev_err(&spi->controller->dev, "Failed to power device: %d\n",
34654cea6b8cSleilk.liu 					status);
34664cea6b8cSleilk.liu 				return status;
34674cea6b8cSleilk.liu 			}
34684cea6b8cSleilk.liu 
34694cea6b8cSleilk.liu 			status = spi->controller->set_cs_timing(spi, setup,
34704cea6b8cSleilk.liu 								hold, inactive);
34714cea6b8cSleilk.liu 			pm_runtime_mark_last_busy(parent);
34724cea6b8cSleilk.liu 			pm_runtime_put_autosuspend(parent);
34734cea6b8cSleilk.liu 			return status;
34744cea6b8cSleilk.liu 		} else {
347581059366SAlexandru Ardelean 			return spi->controller->set_cs_timing(spi, setup, hold,
347681059366SAlexandru Ardelean 							      inactive);
34774cea6b8cSleilk.liu 		}
34784cea6b8cSleilk.liu 	}
347925093bdeSAlexandru Ardelean 
348025093bdeSAlexandru Ardelean 	if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) ||
348125093bdeSAlexandru Ardelean 	    (hold && hold->unit == SPI_DELAY_UNIT_SCK) ||
348225093bdeSAlexandru Ardelean 	    (inactive && inactive->unit == SPI_DELAY_UNIT_SCK)) {
348325093bdeSAlexandru Ardelean 		dev_err(&spi->dev,
348425093bdeSAlexandru Ardelean 			"Clock-cycle delays for CS not supported in SW mode\n");
348581059366SAlexandru Ardelean 		return -ENOTSUPP;
3486f1ca9992SSowjanya Komatineni 	}
348725093bdeSAlexandru Ardelean 
348825093bdeSAlexandru Ardelean 	len = sizeof(struct spi_delay);
348925093bdeSAlexandru Ardelean 
349025093bdeSAlexandru Ardelean 	/* copy delays to controller */
349125093bdeSAlexandru Ardelean 	if (setup)
349225093bdeSAlexandru Ardelean 		memcpy(&spi->controller->cs_setup, setup, len);
349325093bdeSAlexandru Ardelean 	else
349425093bdeSAlexandru Ardelean 		memset(&spi->controller->cs_setup, 0, len);
349525093bdeSAlexandru Ardelean 
349625093bdeSAlexandru Ardelean 	if (hold)
349725093bdeSAlexandru Ardelean 		memcpy(&spi->controller->cs_hold, hold, len);
349825093bdeSAlexandru Ardelean 	else
349925093bdeSAlexandru Ardelean 		memset(&spi->controller->cs_hold, 0, len);
350025093bdeSAlexandru Ardelean 
350125093bdeSAlexandru Ardelean 	if (inactive)
350225093bdeSAlexandru Ardelean 		memcpy(&spi->controller->cs_inactive, inactive, len);
350325093bdeSAlexandru Ardelean 	else
350425093bdeSAlexandru Ardelean 		memset(&spi->controller->cs_inactive, 0, len);
350525093bdeSAlexandru Ardelean 
350625093bdeSAlexandru Ardelean 	return 0;
3507f1ca9992SSowjanya Komatineni }
3508f1ca9992SSowjanya Komatineni EXPORT_SYMBOL_GPL(spi_set_cs_timing);
3509f1ca9992SSowjanya Komatineni 
35106c613f68SAlexandru Ardelean static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
35116c613f68SAlexandru Ardelean 				       struct spi_device *spi)
35126c613f68SAlexandru Ardelean {
35136c613f68SAlexandru Ardelean 	int delay1, delay2;
35146c613f68SAlexandru Ardelean 
35153984d39bSAlexandru Ardelean 	delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
35166c613f68SAlexandru Ardelean 	if (delay1 < 0)
35176c613f68SAlexandru Ardelean 		return delay1;
35186c613f68SAlexandru Ardelean 
35193984d39bSAlexandru Ardelean 	delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
35206c613f68SAlexandru Ardelean 	if (delay2 < 0)
35216c613f68SAlexandru Ardelean 		return delay2;
35226c613f68SAlexandru Ardelean 
35236c613f68SAlexandru Ardelean 	if (delay1 < delay2)
35246c613f68SAlexandru Ardelean 		memcpy(&xfer->word_delay, &spi->word_delay,
35256c613f68SAlexandru Ardelean 		       sizeof(xfer->word_delay));
35266c613f68SAlexandru Ardelean 
35276c613f68SAlexandru Ardelean 	return 0;
35286c613f68SAlexandru Ardelean }
35296c613f68SAlexandru Ardelean 
353090808738SMark Brown static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3531cf32b71eSErnst Schwab {
35328caab75fSGeert Uytterhoeven 	struct spi_controller *ctlr = spi->controller;
3533e6811d1dSLaxman Dewangan 	struct spi_transfer *xfer;
35346ea31293SAtsushi Nemoto 	int w_size;
3535cf32b71eSErnst Schwab 
353624a0013aSMark Brown 	if (list_empty(&message->transfers))
353724a0013aSMark Brown 		return -EINVAL;
353824a0013aSMark Brown 
3539cbaa62e0SDavid Lechner 	/* If an SPI controller does not support toggling the CS line on each
354071388b21SDavid Lechner 	 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
354171388b21SDavid Lechner 	 * for the CS line, we can emulate the CS-per-word hardware function by
3542cbaa62e0SDavid Lechner 	 * splitting transfers into one-word transfers and ensuring that
3543cbaa62e0SDavid Lechner 	 * cs_change is set for each transfer.
3544cbaa62e0SDavid Lechner 	 */
354571388b21SDavid Lechner 	if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
3546f3186dd8SLinus Walleij 					  spi->cs_gpiod ||
354771388b21SDavid Lechner 					  gpio_is_valid(spi->cs_gpio))) {
3548cbaa62e0SDavid Lechner 		size_t maxsize;
3549cbaa62e0SDavid Lechner 		int ret;
3550cbaa62e0SDavid Lechner 
3551cbaa62e0SDavid Lechner 		maxsize = (spi->bits_per_word + 7) / 8;
3552cbaa62e0SDavid Lechner 
3553cbaa62e0SDavid Lechner 		/* spi_split_transfers_maxsize() requires message->spi */
3554cbaa62e0SDavid Lechner 		message->spi = spi;
3555cbaa62e0SDavid Lechner 
3556cbaa62e0SDavid Lechner 		ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
3557cbaa62e0SDavid Lechner 						  GFP_KERNEL);
3558cbaa62e0SDavid Lechner 		if (ret)
3559cbaa62e0SDavid Lechner 			return ret;
3560cbaa62e0SDavid Lechner 
3561cbaa62e0SDavid Lechner 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
3562cbaa62e0SDavid Lechner 			/* don't change cs_change on the last entry in the list */
3563cbaa62e0SDavid Lechner 			if (list_is_last(&xfer->transfer_list, &message->transfers))
3564cbaa62e0SDavid Lechner 				break;
3565cbaa62e0SDavid Lechner 			xfer->cs_change = 1;
3566cbaa62e0SDavid Lechner 		}
3567cbaa62e0SDavid Lechner 	}
3568cbaa62e0SDavid Lechner 
3569cf32b71eSErnst Schwab 	/* Half-duplex links include original MicroWire, and ones with
3570cf32b71eSErnst Schwab 	 * only one data pin like SPI_3WIRE (switches direction) or where
3571cf32b71eSErnst Schwab 	 * either MOSI or MISO is missing.  They can also be caused by
3572cf32b71eSErnst Schwab 	 * software limitations.
3573cf32b71eSErnst Schwab 	 */
35748caab75fSGeert Uytterhoeven 	if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
35758caab75fSGeert Uytterhoeven 	    (spi->mode & SPI_3WIRE)) {
35768caab75fSGeert Uytterhoeven 		unsigned flags = ctlr->flags;
3577cf32b71eSErnst Schwab 
3578cf32b71eSErnst Schwab 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
3579cf32b71eSErnst Schwab 			if (xfer->rx_buf && xfer->tx_buf)
3580cf32b71eSErnst Schwab 				return -EINVAL;
35818caab75fSGeert Uytterhoeven 			if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
3582cf32b71eSErnst Schwab 				return -EINVAL;
35838caab75fSGeert Uytterhoeven 			if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
3584cf32b71eSErnst Schwab 				return -EINVAL;
3585cf32b71eSErnst Schwab 		}
3586cf32b71eSErnst Schwab 	}
3587cf32b71eSErnst Schwab 
3588e6811d1dSLaxman Dewangan 	/**
3589059b8ffeSLaxman Dewangan 	 * Set transfer bits_per_word and max speed as spi device default if
3590059b8ffeSLaxman Dewangan 	 * it is not set for this transfer.
3591f477b7fbSwangyuhang 	 * Set transfer tx_nbits and rx_nbits as single transfer default
3592f477b7fbSwangyuhang 	 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
3593b7bb367aSJonas Bonn 	 * Ensure transfer word_delay is at least as long as that required by
3594b7bb367aSJonas Bonn 	 * device itself.
3595e6811d1dSLaxman Dewangan 	 */
359677e80588SMartin Sperl 	message->frame_length = 0;
3597e6811d1dSLaxman Dewangan 	list_for_each_entry(xfer, &message->transfers, transfer_list) {
35985d7e2b5eSMartin Sperl 		xfer->effective_speed_hz = 0;
3599078726ceSSourav Poddar 		message->frame_length += xfer->len;
3600e6811d1dSLaxman Dewangan 		if (!xfer->bits_per_word)
3601e6811d1dSLaxman Dewangan 			xfer->bits_per_word = spi->bits_per_word;
3602a6f87fadSAxel Lin 
3603a6f87fadSAxel Lin 		if (!xfer->speed_hz)
3604059b8ffeSLaxman Dewangan 			xfer->speed_hz = spi->max_speed_hz;
3605a6f87fadSAxel Lin 
36068caab75fSGeert Uytterhoeven 		if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
36078caab75fSGeert Uytterhoeven 			xfer->speed_hz = ctlr->max_speed_hz;
360856ede94aSGabor Juhos 
36098caab75fSGeert Uytterhoeven 		if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
3610543bb255SStephen Warren 			return -EINVAL;
3611a2fd4f9fSMark Brown 
36124d94bd21SIvan T. Ivanov 		/*
36134d94bd21SIvan T. Ivanov 		 * SPI transfer length should be multiple of SPI word size
36144d94bd21SIvan T. Ivanov 		 * where SPI word size should be power-of-two multiple
36154d94bd21SIvan T. Ivanov 		 */
36164d94bd21SIvan T. Ivanov 		if (xfer->bits_per_word <= 8)
36174d94bd21SIvan T. Ivanov 			w_size = 1;
36184d94bd21SIvan T. Ivanov 		else if (xfer->bits_per_word <= 16)
36194d94bd21SIvan T. Ivanov 			w_size = 2;
36204d94bd21SIvan T. Ivanov 		else
36214d94bd21SIvan T. Ivanov 			w_size = 4;
36224d94bd21SIvan T. Ivanov 
36234d94bd21SIvan T. Ivanov 		/* No partial transfers accepted */
36246ea31293SAtsushi Nemoto 		if (xfer->len % w_size)
36254d94bd21SIvan T. Ivanov 			return -EINVAL;
36264d94bd21SIvan T. Ivanov 
36278caab75fSGeert Uytterhoeven 		if (xfer->speed_hz && ctlr->min_speed_hz &&
36288caab75fSGeert Uytterhoeven 		    xfer->speed_hz < ctlr->min_speed_hz)
3629a2fd4f9fSMark Brown 			return -EINVAL;
3630f477b7fbSwangyuhang 
3631f477b7fbSwangyuhang 		if (xfer->tx_buf && !xfer->tx_nbits)
3632f477b7fbSwangyuhang 			xfer->tx_nbits = SPI_NBITS_SINGLE;
3633f477b7fbSwangyuhang 		if (xfer->rx_buf && !xfer->rx_nbits)
3634f477b7fbSwangyuhang 			xfer->rx_nbits = SPI_NBITS_SINGLE;
3635f477b7fbSwangyuhang 		/* check transfer tx/rx_nbits:
36361afd9989SGeert Uytterhoeven 		 * 1. check the value matches one of single, dual and quad
36371afd9989SGeert Uytterhoeven 		 * 2. check tx/rx_nbits match the mode in spi_device
3638f477b7fbSwangyuhang 		 */
3639db90a441SSourav Poddar 		if (xfer->tx_buf) {
3640d962608cSDragos Bogdan 			if (spi->mode & SPI_NO_TX)
3641d962608cSDragos Bogdan 				return -EINVAL;
3642f477b7fbSwangyuhang 			if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
3643f477b7fbSwangyuhang 				xfer->tx_nbits != SPI_NBITS_DUAL &&
3644f477b7fbSwangyuhang 				xfer->tx_nbits != SPI_NBITS_QUAD)
3645a2fd4f9fSMark Brown 				return -EINVAL;
3646f477b7fbSwangyuhang 			if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
3647f477b7fbSwangyuhang 				!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
3648f477b7fbSwangyuhang 				return -EINVAL;
3649f477b7fbSwangyuhang 			if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
3650f477b7fbSwangyuhang 				!(spi->mode & SPI_TX_QUAD))
3651f477b7fbSwangyuhang 				return -EINVAL;
3652db90a441SSourav Poddar 		}
3653f477b7fbSwangyuhang 		/* check transfer rx_nbits */
3654db90a441SSourav Poddar 		if (xfer->rx_buf) {
3655d962608cSDragos Bogdan 			if (spi->mode & SPI_NO_RX)
3656d962608cSDragos Bogdan 				return -EINVAL;
3657f477b7fbSwangyuhang 			if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
3658f477b7fbSwangyuhang 				xfer->rx_nbits != SPI_NBITS_DUAL &&
3659f477b7fbSwangyuhang 				xfer->rx_nbits != SPI_NBITS_QUAD)
3660f477b7fbSwangyuhang 				return -EINVAL;
3661f477b7fbSwangyuhang 			if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
3662f477b7fbSwangyuhang 				!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
3663f477b7fbSwangyuhang 				return -EINVAL;
3664f477b7fbSwangyuhang 			if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
3665f477b7fbSwangyuhang 				!(spi->mode & SPI_RX_QUAD))
3666f477b7fbSwangyuhang 				return -EINVAL;
3667e6811d1dSLaxman Dewangan 		}
3668b7bb367aSJonas Bonn 
36696c613f68SAlexandru Ardelean 		if (_spi_xfer_word_delay_update(xfer, spi))
36706c613f68SAlexandru Ardelean 			return -EINVAL;
3671e6811d1dSLaxman Dewangan 	}
3672e6811d1dSLaxman Dewangan 
3673cf32b71eSErnst Schwab 	message->status = -EINPROGRESS;
367490808738SMark Brown 
367590808738SMark Brown 	return 0;
367690808738SMark Brown }
367790808738SMark Brown 
367890808738SMark Brown static int __spi_async(struct spi_device *spi, struct spi_message *message)
367990808738SMark Brown {
36808caab75fSGeert Uytterhoeven 	struct spi_controller *ctlr = spi->controller;
3681b42faeeeSVladimir Oltean 	struct spi_transfer *xfer;
368290808738SMark Brown 
3683b5932f5cSBoris Brezillon 	/*
3684b5932f5cSBoris Brezillon 	 * Some controllers do not support doing regular SPI transfers. Return
3685b5932f5cSBoris Brezillon 	 * ENOTSUPP when this is the case.
3686b5932f5cSBoris Brezillon 	 */
3687b5932f5cSBoris Brezillon 	if (!ctlr->transfer)
3688b5932f5cSBoris Brezillon 		return -ENOTSUPP;
3689b5932f5cSBoris Brezillon 
369090808738SMark Brown 	message->spi = spi;
369190808738SMark Brown 
36928caab75fSGeert Uytterhoeven 	SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
3693eca2ebc7SMartin Sperl 	SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_async);
3694eca2ebc7SMartin Sperl 
369590808738SMark Brown 	trace_spi_message_submit(message);
369690808738SMark Brown 
3697b42faeeeSVladimir Oltean 	if (!ctlr->ptp_sts_supported) {
3698b42faeeeSVladimir Oltean 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
3699b42faeeeSVladimir Oltean 			xfer->ptp_sts_word_pre = 0;
3700b42faeeeSVladimir Oltean 			ptp_read_system_prets(xfer->ptp_sts);
3701b42faeeeSVladimir Oltean 		}
3702b42faeeeSVladimir Oltean 	}
3703b42faeeeSVladimir Oltean 
37048caab75fSGeert Uytterhoeven 	return ctlr->transfer(spi, message);
3705cf32b71eSErnst Schwab }
3706cf32b71eSErnst Schwab 
3707568d0697SDavid Brownell /**
3708568d0697SDavid Brownell  * spi_async - asynchronous SPI transfer
3709568d0697SDavid Brownell  * @spi: device with which data will be exchanged
3710568d0697SDavid Brownell  * @message: describes the data transfers, including completion callback
3711568d0697SDavid Brownell  * Context: any (irqs may be blocked, etc)
3712568d0697SDavid Brownell  *
3713568d0697SDavid Brownell  * This call may be used in_irq and other contexts which can't sleep,
3714568d0697SDavid Brownell  * as well as from task contexts which can sleep.
3715568d0697SDavid Brownell  *
3716568d0697SDavid Brownell  * The completion callback is invoked in a context which can't sleep.
3717568d0697SDavid Brownell  * Before that invocation, the value of message->status is undefined.
3718568d0697SDavid Brownell  * When the callback is issued, message->status holds either zero (to
3719568d0697SDavid Brownell  * indicate complete success) or a negative error code.  After that
3720568d0697SDavid Brownell  * callback returns, the driver which issued the transfer request may
3721568d0697SDavid Brownell  * deallocate the associated memory; it's no longer in use by any SPI
3722568d0697SDavid Brownell  * core or controller driver code.
3723568d0697SDavid Brownell  *
3724568d0697SDavid Brownell  * Note that although all messages to a spi_device are handled in
3725568d0697SDavid Brownell  * FIFO order, messages may go to different devices in other orders.
3726568d0697SDavid Brownell  * Some device might be higher priority, or have various "hard" access
3727568d0697SDavid Brownell  * time requirements, for example.
3728568d0697SDavid Brownell  *
3729568d0697SDavid Brownell  * On detection of any fault during the transfer, processing of
3730568d0697SDavid Brownell  * the entire message is aborted, and the device is deselected.
3731568d0697SDavid Brownell  * Until returning from the associated message completion callback,
3732568d0697SDavid Brownell  * no other spi_message queued to that device will be processed.
3733568d0697SDavid Brownell  * (This rule applies equally to all the synchronous transfer calls,
3734568d0697SDavid Brownell  * which are wrappers around this core asynchronous primitive.)
373597d56dc6SJavier Martinez Canillas  *
373697d56dc6SJavier Martinez Canillas  * Return: zero on success, else a negative error code.
3737568d0697SDavid Brownell  */
3738568d0697SDavid Brownell int spi_async(struct spi_device *spi, struct spi_message *message)
3739568d0697SDavid Brownell {
37408caab75fSGeert Uytterhoeven 	struct spi_controller *ctlr = spi->controller;
3741cf32b71eSErnst Schwab 	int ret;
3742cf32b71eSErnst Schwab 	unsigned long flags;
3743568d0697SDavid Brownell 
374490808738SMark Brown 	ret = __spi_validate(spi, message);
374590808738SMark Brown 	if (ret != 0)
374690808738SMark Brown 		return ret;
374790808738SMark Brown 
37488caab75fSGeert Uytterhoeven 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3749568d0697SDavid Brownell 
37508caab75fSGeert Uytterhoeven 	if (ctlr->bus_lock_flag)
3751cf32b71eSErnst Schwab 		ret = -EBUSY;
3752cf32b71eSErnst Schwab 	else
3753cf32b71eSErnst Schwab 		ret = __spi_async(spi, message);
3754568d0697SDavid Brownell 
37558caab75fSGeert Uytterhoeven 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3756cf32b71eSErnst Schwab 
3757cf32b71eSErnst Schwab 	return ret;
3758568d0697SDavid Brownell }
3759568d0697SDavid Brownell EXPORT_SYMBOL_GPL(spi_async);
3760568d0697SDavid Brownell 
3761cf32b71eSErnst Schwab /**
3762cf32b71eSErnst Schwab  * spi_async_locked - version of spi_async with exclusive bus usage
3763cf32b71eSErnst Schwab  * @spi: device with which data will be exchanged
3764cf32b71eSErnst Schwab  * @message: describes the data transfers, including completion callback
3765cf32b71eSErnst Schwab  * Context: any (irqs may be blocked, etc)
3766cf32b71eSErnst Schwab  *
3767cf32b71eSErnst Schwab  * This call may be used in_irq and other contexts which can't sleep,
3768cf32b71eSErnst Schwab  * as well as from task contexts which can sleep.
3769cf32b71eSErnst Schwab  *
3770cf32b71eSErnst Schwab  * The completion callback is invoked in a context which can't sleep.
3771cf32b71eSErnst Schwab  * Before that invocation, the value of message->status is undefined.
3772cf32b71eSErnst Schwab  * When the callback is issued, message->status holds either zero (to
3773cf32b71eSErnst Schwab  * indicate complete success) or a negative error code.  After that
3774cf32b71eSErnst Schwab  * callback returns, the driver which issued the transfer request may
3775cf32b71eSErnst Schwab  * deallocate the associated memory; it's no longer in use by any SPI
3776cf32b71eSErnst Schwab  * core or controller driver code.
3777cf32b71eSErnst Schwab  *
3778cf32b71eSErnst Schwab  * Note that although all messages to a spi_device are handled in
3779cf32b71eSErnst Schwab  * FIFO order, messages may go to different devices in other orders.
3780cf32b71eSErnst Schwab  * Some device might be higher priority, or have various "hard" access
3781cf32b71eSErnst Schwab  * time requirements, for example.
3782cf32b71eSErnst Schwab  *
3783cf32b71eSErnst Schwab  * On detection of any fault during the transfer, processing of
3784cf32b71eSErnst Schwab  * the entire message is aborted, and the device is deselected.
3785cf32b71eSErnst Schwab  * Until returning from the associated message completion callback,
3786cf32b71eSErnst Schwab  * no other spi_message queued to that device will be processed.
3787cf32b71eSErnst Schwab  * (This rule applies equally to all the synchronous transfer calls,
3788cf32b71eSErnst Schwab  * which are wrappers around this core asynchronous primitive.)
378997d56dc6SJavier Martinez Canillas  *
379097d56dc6SJavier Martinez Canillas  * Return: zero on success, else a negative error code.
3791cf32b71eSErnst Schwab  */
3792cf32b71eSErnst Schwab int spi_async_locked(struct spi_device *spi, struct spi_message *message)
3793cf32b71eSErnst Schwab {
37948caab75fSGeert Uytterhoeven 	struct spi_controller *ctlr = spi->controller;
3795cf32b71eSErnst Schwab 	int ret;
3796cf32b71eSErnst Schwab 	unsigned long flags;
3797cf32b71eSErnst Schwab 
379890808738SMark Brown 	ret = __spi_validate(spi, message);
379990808738SMark Brown 	if (ret != 0)
380090808738SMark Brown 		return ret;
380190808738SMark Brown 
38028caab75fSGeert Uytterhoeven 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
3803cf32b71eSErnst Schwab 
3804cf32b71eSErnst Schwab 	ret = __spi_async(spi, message);
3805cf32b71eSErnst Schwab 
38068caab75fSGeert Uytterhoeven 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3807cf32b71eSErnst Schwab 
3808cf32b71eSErnst Schwab 	return ret;
3809cf32b71eSErnst Schwab 
3810cf32b71eSErnst Schwab }
3811cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_async_locked);
3812cf32b71eSErnst Schwab 
38137d077197SDavid Brownell /*-------------------------------------------------------------------------*/
38147d077197SDavid Brownell 
38158caab75fSGeert Uytterhoeven /* Utility methods for SPI protocol drivers, layered on
38167d077197SDavid Brownell  * top of the core.  Some other utility methods are defined as
38177d077197SDavid Brownell  * inline functions.
38187d077197SDavid Brownell  */
38197d077197SDavid Brownell 
38205d870c8eSAndrew Morton static void spi_complete(void *arg)
38215d870c8eSAndrew Morton {
38225d870c8eSAndrew Morton 	complete(arg);
38235d870c8eSAndrew Morton }
38245d870c8eSAndrew Morton 
3825ef4d96ecSMark Brown static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3826cf32b71eSErnst Schwab {
3827cf32b71eSErnst Schwab 	DECLARE_COMPLETION_ONSTACK(done);
3828cf32b71eSErnst Schwab 	int status;
38298caab75fSGeert Uytterhoeven 	struct spi_controller *ctlr = spi->controller;
38300461a414SMark Brown 	unsigned long flags;
38310461a414SMark Brown 
38320461a414SMark Brown 	status = __spi_validate(spi, message);
38330461a414SMark Brown 	if (status != 0)
38340461a414SMark Brown 		return status;
3835cf32b71eSErnst Schwab 
3836cf32b71eSErnst Schwab 	message->complete = spi_complete;
3837cf32b71eSErnst Schwab 	message->context = &done;
38380461a414SMark Brown 	message->spi = spi;
3839cf32b71eSErnst Schwab 
38408caab75fSGeert Uytterhoeven 	SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_sync);
3841eca2ebc7SMartin Sperl 	SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, spi_sync);
3842eca2ebc7SMartin Sperl 
38430461a414SMark Brown 	/* If we're not using the legacy transfer method then we will
38440461a414SMark Brown 	 * try to transfer in the calling context so special case.
38450461a414SMark Brown 	 * This code would be less tricky if we could remove the
38460461a414SMark Brown 	 * support for driver implemented message queues.
38470461a414SMark Brown 	 */
38488caab75fSGeert Uytterhoeven 	if (ctlr->transfer == spi_queued_transfer) {
38498caab75fSGeert Uytterhoeven 		spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
38500461a414SMark Brown 
38510461a414SMark Brown 		trace_spi_message_submit(message);
38520461a414SMark Brown 
38530461a414SMark Brown 		status = __spi_queued_transfer(spi, message, false);
38540461a414SMark Brown 
38558caab75fSGeert Uytterhoeven 		spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
38560461a414SMark Brown 	} else {
3857cf32b71eSErnst Schwab 		status = spi_async_locked(spi, message);
38580461a414SMark Brown 	}
3859cf32b71eSErnst Schwab 
3860cf32b71eSErnst Schwab 	if (status == 0) {
38610461a414SMark Brown 		/* Push out the messages in the calling context if we
38620461a414SMark Brown 		 * can.
38630461a414SMark Brown 		 */
38648caab75fSGeert Uytterhoeven 		if (ctlr->transfer == spi_queued_transfer) {
38658caab75fSGeert Uytterhoeven 			SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics,
3866eca2ebc7SMartin Sperl 						       spi_sync_immediate);
3867eca2ebc7SMartin Sperl 			SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
3868eca2ebc7SMartin Sperl 						       spi_sync_immediate);
38698caab75fSGeert Uytterhoeven 			__spi_pump_messages(ctlr, false);
3870eca2ebc7SMartin Sperl 		}
38710461a414SMark Brown 
3872cf32b71eSErnst Schwab 		wait_for_completion(&done);
3873cf32b71eSErnst Schwab 		status = message->status;
3874cf32b71eSErnst Schwab 	}
3875cf32b71eSErnst Schwab 	message->context = NULL;
3876cf32b71eSErnst Schwab 	return status;
3877cf32b71eSErnst Schwab }
3878cf32b71eSErnst Schwab 
38798ae12a0dSDavid Brownell /**
38808ae12a0dSDavid Brownell  * spi_sync - blocking/synchronous SPI data transfers
38818ae12a0dSDavid Brownell  * @spi: device with which data will be exchanged
38828ae12a0dSDavid Brownell  * @message: describes the data transfers
388333e34dc6SDavid Brownell  * Context: can sleep
38848ae12a0dSDavid Brownell  *
38858ae12a0dSDavid Brownell  * This call may only be used from a context that may sleep.  The sleep
38868ae12a0dSDavid Brownell  * is non-interruptible, and has no timeout.  Low-overhead controller
38878ae12a0dSDavid Brownell  * drivers may DMA directly into and out of the message buffers.
38888ae12a0dSDavid Brownell  *
38898ae12a0dSDavid Brownell  * Note that the SPI device's chip select is active during the message,
38908ae12a0dSDavid Brownell  * and then is normally disabled between messages.  Drivers for some
38918ae12a0dSDavid Brownell  * frequently-used devices may want to minimize costs of selecting a chip,
38928ae12a0dSDavid Brownell  * by leaving it selected in anticipation that the next message will go
38938ae12a0dSDavid Brownell  * to the same chip.  (That may increase power usage.)
38948ae12a0dSDavid Brownell  *
38950c868461SDavid Brownell  * Also, the caller is guaranteeing that the memory associated with the
38960c868461SDavid Brownell  * message will not be freed before this call returns.
38970c868461SDavid Brownell  *
389897d56dc6SJavier Martinez Canillas  * Return: zero on success, else a negative error code.
38998ae12a0dSDavid Brownell  */
39008ae12a0dSDavid Brownell int spi_sync(struct spi_device *spi, struct spi_message *message)
39018ae12a0dSDavid Brownell {
3902ef4d96ecSMark Brown 	int ret;
3903ef4d96ecSMark Brown 
39048caab75fSGeert Uytterhoeven 	mutex_lock(&spi->controller->bus_lock_mutex);
3905ef4d96ecSMark Brown 	ret = __spi_sync(spi, message);
39068caab75fSGeert Uytterhoeven 	mutex_unlock(&spi->controller->bus_lock_mutex);
3907ef4d96ecSMark Brown 
3908ef4d96ecSMark Brown 	return ret;
39098ae12a0dSDavid Brownell }
39108ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_sync);
39118ae12a0dSDavid Brownell 
3912cf32b71eSErnst Schwab /**
3913cf32b71eSErnst Schwab  * spi_sync_locked - version of spi_sync with exclusive bus usage
3914cf32b71eSErnst Schwab  * @spi: device with which data will be exchanged
3915cf32b71eSErnst Schwab  * @message: describes the data transfers
3916cf32b71eSErnst Schwab  * Context: can sleep
3917cf32b71eSErnst Schwab  *
3918cf32b71eSErnst Schwab  * This call may only be used from a context that may sleep.  The sleep
3919cf32b71eSErnst Schwab  * is non-interruptible, and has no timeout.  Low-overhead controller
3920cf32b71eSErnst Schwab  * drivers may DMA directly into and out of the message buffers.
3921cf32b71eSErnst Schwab  *
3922cf32b71eSErnst Schwab  * This call should be used by drivers that require exclusive access to the
392325985edcSLucas De Marchi  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
3924cf32b71eSErnst Schwab  * be released by a spi_bus_unlock call when the exclusive access is over.
3925cf32b71eSErnst Schwab  *
392697d56dc6SJavier Martinez Canillas  * Return: zero on success, else a negative error code.
3927cf32b71eSErnst Schwab  */
3928cf32b71eSErnst Schwab int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
3929cf32b71eSErnst Schwab {
3930ef4d96ecSMark Brown 	return __spi_sync(spi, message);
3931cf32b71eSErnst Schwab }
3932cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_sync_locked);
3933cf32b71eSErnst Schwab 
3934cf32b71eSErnst Schwab /**
3935cf32b71eSErnst Schwab  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
39368caab75fSGeert Uytterhoeven  * @ctlr: SPI bus master that should be locked for exclusive bus access
3937cf32b71eSErnst Schwab  * Context: can sleep
3938cf32b71eSErnst Schwab  *
3939cf32b71eSErnst Schwab  * This call may only be used from a context that may sleep.  The sleep
3940cf32b71eSErnst Schwab  * is non-interruptible, and has no timeout.
3941cf32b71eSErnst Schwab  *
3942cf32b71eSErnst Schwab  * This call should be used by drivers that require exclusive access to the
3943cf32b71eSErnst Schwab  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
3944cf32b71eSErnst Schwab  * exclusive access is over. Data transfer must be done by spi_sync_locked
3945cf32b71eSErnst Schwab  * and spi_async_locked calls when the SPI bus lock is held.
3946cf32b71eSErnst Schwab  *
394797d56dc6SJavier Martinez Canillas  * Return: always zero.
3948cf32b71eSErnst Schwab  */
39498caab75fSGeert Uytterhoeven int spi_bus_lock(struct spi_controller *ctlr)
3950cf32b71eSErnst Schwab {
3951cf32b71eSErnst Schwab 	unsigned long flags;
3952cf32b71eSErnst Schwab 
39538caab75fSGeert Uytterhoeven 	mutex_lock(&ctlr->bus_lock_mutex);
3954cf32b71eSErnst Schwab 
39558caab75fSGeert Uytterhoeven 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
39568caab75fSGeert Uytterhoeven 	ctlr->bus_lock_flag = 1;
39578caab75fSGeert Uytterhoeven 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
3958cf32b71eSErnst Schwab 
3959cf32b71eSErnst Schwab 	/* mutex remains locked until spi_bus_unlock is called */
3960cf32b71eSErnst Schwab 
3961cf32b71eSErnst Schwab 	return 0;
3962cf32b71eSErnst Schwab }
3963cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_bus_lock);
3964cf32b71eSErnst Schwab 
3965cf32b71eSErnst Schwab /**
3966cf32b71eSErnst Schwab  * spi_bus_unlock - release the lock for exclusive SPI bus usage
39678caab75fSGeert Uytterhoeven  * @ctlr: SPI bus master that was locked for exclusive bus access
3968cf32b71eSErnst Schwab  * Context: can sleep
3969cf32b71eSErnst Schwab  *
3970cf32b71eSErnst Schwab  * This call may only be used from a context that may sleep.  The sleep
3971cf32b71eSErnst Schwab  * is non-interruptible, and has no timeout.
3972cf32b71eSErnst Schwab  *
3973cf32b71eSErnst Schwab  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
3974cf32b71eSErnst Schwab  * call.
3975cf32b71eSErnst Schwab  *
397697d56dc6SJavier Martinez Canillas  * Return: always zero.
3977cf32b71eSErnst Schwab  */
39788caab75fSGeert Uytterhoeven int spi_bus_unlock(struct spi_controller *ctlr)
3979cf32b71eSErnst Schwab {
39808caab75fSGeert Uytterhoeven 	ctlr->bus_lock_flag = 0;
3981cf32b71eSErnst Schwab 
39828caab75fSGeert Uytterhoeven 	mutex_unlock(&ctlr->bus_lock_mutex);
3983cf32b71eSErnst Schwab 
3984cf32b71eSErnst Schwab 	return 0;
3985cf32b71eSErnst Schwab }
3986cf32b71eSErnst Schwab EXPORT_SYMBOL_GPL(spi_bus_unlock);
3987cf32b71eSErnst Schwab 
3988a9948b61SDavid Brownell /* portable code must never pass more than 32 bytes */
3989a9948b61SDavid Brownell #define	SPI_BUFSIZ	max(32, SMP_CACHE_BYTES)
39908ae12a0dSDavid Brownell 
39918ae12a0dSDavid Brownell static u8	*buf;
39928ae12a0dSDavid Brownell 
39938ae12a0dSDavid Brownell /**
39948ae12a0dSDavid Brownell  * spi_write_then_read - SPI synchronous write followed by read
39958ae12a0dSDavid Brownell  * @spi: device with which data will be exchanged
39968ae12a0dSDavid Brownell  * @txbuf: data to be written (need not be dma-safe)
39978ae12a0dSDavid Brownell  * @n_tx: size of txbuf, in bytes
399827570497SJiri Pirko  * @rxbuf: buffer into which data will be read (need not be dma-safe)
399927570497SJiri Pirko  * @n_rx: size of rxbuf, in bytes
400033e34dc6SDavid Brownell  * Context: can sleep
40018ae12a0dSDavid Brownell  *
40028ae12a0dSDavid Brownell  * This performs a half duplex MicroWire style transaction with the
40038ae12a0dSDavid Brownell  * device, sending txbuf and then reading rxbuf.  The return value
40048ae12a0dSDavid Brownell  * is zero for success, else a negative errno status code.
4005b885244eSDavid Brownell  * This call may only be used from a context that may sleep.
40068ae12a0dSDavid Brownell  *
4007c373643bSMark Brown  * Parameters to this routine are always copied using a small buffer.
400833e34dc6SDavid Brownell  * Performance-sensitive or bulk transfer code should instead use
40090c868461SDavid Brownell  * spi_{async,sync}() calls with dma-safe buffers.
401097d56dc6SJavier Martinez Canillas  *
401197d56dc6SJavier Martinez Canillas  * Return: zero on success, else a negative error code.
40128ae12a0dSDavid Brownell  */
40138ae12a0dSDavid Brownell int spi_write_then_read(struct spi_device *spi,
40140c4a1590SMark Brown 		const void *txbuf, unsigned n_tx,
40150c4a1590SMark Brown 		void *rxbuf, unsigned n_rx)
40168ae12a0dSDavid Brownell {
4017068f4070SDavid Brownell 	static DEFINE_MUTEX(lock);
40188ae12a0dSDavid Brownell 
40198ae12a0dSDavid Brownell 	int			status;
40208ae12a0dSDavid Brownell 	struct spi_message	message;
4021bdff549eSDavid Brownell 	struct spi_transfer	x[2];
40228ae12a0dSDavid Brownell 	u8			*local_buf;
40238ae12a0dSDavid Brownell 
4024b3a223eeSMark Brown 	/* Use preallocated DMA-safe buffer if we can.  We can't avoid
4025b3a223eeSMark Brown 	 * copying here, (as a pure convenience thing), but we can
4026b3a223eeSMark Brown 	 * keep heap costs out of the hot path unless someone else is
4027b3a223eeSMark Brown 	 * using the pre-allocated buffer or the transfer is too large.
40288ae12a0dSDavid Brownell 	 */
4029b3a223eeSMark Brown 	if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
40302cd94c8aSMark Brown 		local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
40312cd94c8aSMark Brown 				    GFP_KERNEL | GFP_DMA);
4032b3a223eeSMark Brown 		if (!local_buf)
4033b3a223eeSMark Brown 			return -ENOMEM;
4034b3a223eeSMark Brown 	} else {
4035b3a223eeSMark Brown 		local_buf = buf;
4036b3a223eeSMark Brown 	}
40378ae12a0dSDavid Brownell 
40388275c642SVitaly Wool 	spi_message_init(&message);
40395fe5f05eSJingoo Han 	memset(x, 0, sizeof(x));
4040bdff549eSDavid Brownell 	if (n_tx) {
4041bdff549eSDavid Brownell 		x[0].len = n_tx;
4042bdff549eSDavid Brownell 		spi_message_add_tail(&x[0], &message);
4043bdff549eSDavid Brownell 	}
4044bdff549eSDavid Brownell 	if (n_rx) {
4045bdff549eSDavid Brownell 		x[1].len = n_rx;
4046bdff549eSDavid Brownell 		spi_message_add_tail(&x[1], &message);
4047bdff549eSDavid Brownell 	}
40488275c642SVitaly Wool 
40498ae12a0dSDavid Brownell 	memcpy(local_buf, txbuf, n_tx);
4050bdff549eSDavid Brownell 	x[0].tx_buf = local_buf;
4051bdff549eSDavid Brownell 	x[1].rx_buf = local_buf + n_tx;
40528ae12a0dSDavid Brownell 
40538ae12a0dSDavid Brownell 	/* do the i/o */
40548ae12a0dSDavid Brownell 	status = spi_sync(spi, &message);
40559b938b74SMarc Pignat 	if (status == 0)
4056bdff549eSDavid Brownell 		memcpy(rxbuf, x[1].rx_buf, n_rx);
40578ae12a0dSDavid Brownell 
4058bdff549eSDavid Brownell 	if (x[0].tx_buf == buf)
4059068f4070SDavid Brownell 		mutex_unlock(&lock);
40608ae12a0dSDavid Brownell 	else
40618ae12a0dSDavid Brownell 		kfree(local_buf);
40628ae12a0dSDavid Brownell 
40638ae12a0dSDavid Brownell 	return status;
40648ae12a0dSDavid Brownell }
40658ae12a0dSDavid Brownell EXPORT_SYMBOL_GPL(spi_write_then_read);
40668ae12a0dSDavid Brownell 
40678ae12a0dSDavid Brownell /*-------------------------------------------------------------------------*/
40688ae12a0dSDavid Brownell 
40695f143af7SMarco Felsch #if IS_ENABLED(CONFIG_OF)
4070ce79d54aSPantelis Antoniou /* must call put_device() when done with returned spi_device device */
40715f143af7SMarco Felsch struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4072ce79d54aSPantelis Antoniou {
4073cfba5de9SSuzuki K Poulose 	struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4074cfba5de9SSuzuki K Poulose 
4075ce79d54aSPantelis Antoniou 	return dev ? to_spi_device(dev) : NULL;
4076ce79d54aSPantelis Antoniou }
40775f143af7SMarco Felsch EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
40785f143af7SMarco Felsch #endif /* IS_ENABLED(CONFIG_OF) */
4079ce79d54aSPantelis Antoniou 
40805f143af7SMarco Felsch #if IS_ENABLED(CONFIG_OF_DYNAMIC)
40818caab75fSGeert Uytterhoeven /* the spi controllers are not using spi_bus, so we find it with another way */
40828caab75fSGeert Uytterhoeven static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4083ce79d54aSPantelis Antoniou {
4084ce79d54aSPantelis Antoniou 	struct device *dev;
4085ce79d54aSPantelis Antoniou 
4086cfba5de9SSuzuki K Poulose 	dev = class_find_device_by_of_node(&spi_master_class, node);
40876c364062SGeert Uytterhoeven 	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4088cfba5de9SSuzuki K Poulose 		dev = class_find_device_by_of_node(&spi_slave_class, node);
4089ce79d54aSPantelis Antoniou 	if (!dev)
4090ce79d54aSPantelis Antoniou 		return NULL;
4091ce79d54aSPantelis Antoniou 
4092ce79d54aSPantelis Antoniou 	/* reference got in class_find_device */
40938caab75fSGeert Uytterhoeven 	return container_of(dev, struct spi_controller, dev);
4094ce79d54aSPantelis Antoniou }
4095ce79d54aSPantelis Antoniou 
4096ce79d54aSPantelis Antoniou static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4097ce79d54aSPantelis Antoniou 			 void *arg)
4098ce79d54aSPantelis Antoniou {
4099ce79d54aSPantelis Antoniou 	struct of_reconfig_data *rd = arg;
41008caab75fSGeert Uytterhoeven 	struct spi_controller *ctlr;
4101ce79d54aSPantelis Antoniou 	struct spi_device *spi;
4102ce79d54aSPantelis Antoniou 
4103ce79d54aSPantelis Antoniou 	switch (of_reconfig_get_state_change(action, arg)) {
4104ce79d54aSPantelis Antoniou 	case OF_RECONFIG_CHANGE_ADD:
41058caab75fSGeert Uytterhoeven 		ctlr = of_find_spi_controller_by_node(rd->dn->parent);
41068caab75fSGeert Uytterhoeven 		if (ctlr == NULL)
4107ce79d54aSPantelis Antoniou 			return NOTIFY_OK;	/* not for us */
4108ce79d54aSPantelis Antoniou 
4109bd6c1644SGeert Uytterhoeven 		if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
41108caab75fSGeert Uytterhoeven 			put_device(&ctlr->dev);
4111bd6c1644SGeert Uytterhoeven 			return NOTIFY_OK;
4112bd6c1644SGeert Uytterhoeven 		}
4113bd6c1644SGeert Uytterhoeven 
41148caab75fSGeert Uytterhoeven 		spi = of_register_spi_device(ctlr, rd->dn);
41158caab75fSGeert Uytterhoeven 		put_device(&ctlr->dev);
4116ce79d54aSPantelis Antoniou 
4117ce79d54aSPantelis Antoniou 		if (IS_ERR(spi)) {
411825c56c88SRob Herring 			pr_err("%s: failed to create for '%pOF'\n",
411925c56c88SRob Herring 					__func__, rd->dn);
4120e0af98a7SRalf Ramsauer 			of_node_clear_flag(rd->dn, OF_POPULATED);
4121ce79d54aSPantelis Antoniou 			return notifier_from_errno(PTR_ERR(spi));
4122ce79d54aSPantelis Antoniou 		}
4123ce79d54aSPantelis Antoniou 		break;
4124ce79d54aSPantelis Antoniou 
4125ce79d54aSPantelis Antoniou 	case OF_RECONFIG_CHANGE_REMOVE:
4126bd6c1644SGeert Uytterhoeven 		/* already depopulated? */
4127bd6c1644SGeert Uytterhoeven 		if (!of_node_check_flag(rd->dn, OF_POPULATED))
4128bd6c1644SGeert Uytterhoeven 			return NOTIFY_OK;
4129bd6c1644SGeert Uytterhoeven 
4130ce79d54aSPantelis Antoniou 		/* find our device by node */
4131ce79d54aSPantelis Antoniou 		spi = of_find_spi_device_by_node(rd->dn);
4132ce79d54aSPantelis Antoniou 		if (spi == NULL)
4133ce79d54aSPantelis Antoniou 			return NOTIFY_OK;	/* no? not meant for us */
4134ce79d54aSPantelis Antoniou 
4135ce79d54aSPantelis Antoniou 		/* unregister takes one ref away */
4136ce79d54aSPantelis Antoniou 		spi_unregister_device(spi);
4137ce79d54aSPantelis Antoniou 
4138ce79d54aSPantelis Antoniou 		/* and put the reference of the find */
4139ce79d54aSPantelis Antoniou 		put_device(&spi->dev);
4140ce79d54aSPantelis Antoniou 		break;
4141ce79d54aSPantelis Antoniou 	}
4142ce79d54aSPantelis Antoniou 
4143ce79d54aSPantelis Antoniou 	return NOTIFY_OK;
4144ce79d54aSPantelis Antoniou }
4145ce79d54aSPantelis Antoniou 
4146ce79d54aSPantelis Antoniou static struct notifier_block spi_of_notifier = {
4147ce79d54aSPantelis Antoniou 	.notifier_call = of_spi_notify,
4148ce79d54aSPantelis Antoniou };
4149ce79d54aSPantelis Antoniou #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4150ce79d54aSPantelis Antoniou extern struct notifier_block spi_of_notifier;
4151ce79d54aSPantelis Antoniou #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4152ce79d54aSPantelis Antoniou 
41537f24467fSOctavian Purdila #if IS_ENABLED(CONFIG_ACPI)
41548caab75fSGeert Uytterhoeven static int spi_acpi_controller_match(struct device *dev, const void *data)
41557f24467fSOctavian Purdila {
41567f24467fSOctavian Purdila 	return ACPI_COMPANION(dev->parent) == data;
41577f24467fSOctavian Purdila }
41587f24467fSOctavian Purdila 
41598caab75fSGeert Uytterhoeven static struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
41607f24467fSOctavian Purdila {
41617f24467fSOctavian Purdila 	struct device *dev;
41627f24467fSOctavian Purdila 
41637f24467fSOctavian Purdila 	dev = class_find_device(&spi_master_class, NULL, adev,
41648caab75fSGeert Uytterhoeven 				spi_acpi_controller_match);
41656c364062SGeert Uytterhoeven 	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
41666c364062SGeert Uytterhoeven 		dev = class_find_device(&spi_slave_class, NULL, adev,
41678caab75fSGeert Uytterhoeven 					spi_acpi_controller_match);
41687f24467fSOctavian Purdila 	if (!dev)
41697f24467fSOctavian Purdila 		return NULL;
41707f24467fSOctavian Purdila 
41718caab75fSGeert Uytterhoeven 	return container_of(dev, struct spi_controller, dev);
41727f24467fSOctavian Purdila }
41737f24467fSOctavian Purdila 
41747f24467fSOctavian Purdila static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
41757f24467fSOctavian Purdila {
41767f24467fSOctavian Purdila 	struct device *dev;
41777f24467fSOctavian Purdila 
417800500147SSuzuki K Poulose 	dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
41795b16668eSWolfram Sang 	return to_spi_device(dev);
41807f24467fSOctavian Purdila }
41817f24467fSOctavian Purdila 
41827f24467fSOctavian Purdila static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
41837f24467fSOctavian Purdila 			   void *arg)
41847f24467fSOctavian Purdila {
41857f24467fSOctavian Purdila 	struct acpi_device *adev = arg;
41868caab75fSGeert Uytterhoeven 	struct spi_controller *ctlr;
41877f24467fSOctavian Purdila 	struct spi_device *spi;
41887f24467fSOctavian Purdila 
41897f24467fSOctavian Purdila 	switch (value) {
41907f24467fSOctavian Purdila 	case ACPI_RECONFIG_DEVICE_ADD:
41918caab75fSGeert Uytterhoeven 		ctlr = acpi_spi_find_controller_by_adev(adev->parent);
41928caab75fSGeert Uytterhoeven 		if (!ctlr)
41937f24467fSOctavian Purdila 			break;
41947f24467fSOctavian Purdila 
41958caab75fSGeert Uytterhoeven 		acpi_register_spi_device(ctlr, adev);
41968caab75fSGeert Uytterhoeven 		put_device(&ctlr->dev);
41977f24467fSOctavian Purdila 		break;
41987f24467fSOctavian Purdila 	case ACPI_RECONFIG_DEVICE_REMOVE:
41997f24467fSOctavian Purdila 		if (!acpi_device_enumerated(adev))
42007f24467fSOctavian Purdila 			break;
42017f24467fSOctavian Purdila 
42027f24467fSOctavian Purdila 		spi = acpi_spi_find_device_by_adev(adev);
42037f24467fSOctavian Purdila 		if (!spi)
42047f24467fSOctavian Purdila 			break;
42057f24467fSOctavian Purdila 
42067f24467fSOctavian Purdila 		spi_unregister_device(spi);
42077f24467fSOctavian Purdila 		put_device(&spi->dev);
42087f24467fSOctavian Purdila 		break;
42097f24467fSOctavian Purdila 	}
42107f24467fSOctavian Purdila 
42117f24467fSOctavian Purdila 	return NOTIFY_OK;
42127f24467fSOctavian Purdila }
42137f24467fSOctavian Purdila 
42147f24467fSOctavian Purdila static struct notifier_block spi_acpi_notifier = {
42157f24467fSOctavian Purdila 	.notifier_call = acpi_spi_notify,
42167f24467fSOctavian Purdila };
42177f24467fSOctavian Purdila #else
42187f24467fSOctavian Purdila extern struct notifier_block spi_acpi_notifier;
42197f24467fSOctavian Purdila #endif
42207f24467fSOctavian Purdila 
42218ae12a0dSDavid Brownell static int __init spi_init(void)
42228ae12a0dSDavid Brownell {
4223b885244eSDavid Brownell 	int	status;
42248ae12a0dSDavid Brownell 
4225e94b1766SChristoph Lameter 	buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4226b885244eSDavid Brownell 	if (!buf) {
4227b885244eSDavid Brownell 		status = -ENOMEM;
4228b885244eSDavid Brownell 		goto err0;
42298ae12a0dSDavid Brownell 	}
4230b885244eSDavid Brownell 
4231b885244eSDavid Brownell 	status = bus_register(&spi_bus_type);
4232b885244eSDavid Brownell 	if (status < 0)
4233b885244eSDavid Brownell 		goto err1;
4234b885244eSDavid Brownell 
4235b885244eSDavid Brownell 	status = class_register(&spi_master_class);
4236b885244eSDavid Brownell 	if (status < 0)
4237b885244eSDavid Brownell 		goto err2;
4238ce79d54aSPantelis Antoniou 
42396c364062SGeert Uytterhoeven 	if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
42406c364062SGeert Uytterhoeven 		status = class_register(&spi_slave_class);
42416c364062SGeert Uytterhoeven 		if (status < 0)
42426c364062SGeert Uytterhoeven 			goto err3;
42436c364062SGeert Uytterhoeven 	}
42446c364062SGeert Uytterhoeven 
42455267720eSFabio Estevam 	if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4246ce79d54aSPantelis Antoniou 		WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
42477f24467fSOctavian Purdila 	if (IS_ENABLED(CONFIG_ACPI))
42487f24467fSOctavian Purdila 		WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4249ce79d54aSPantelis Antoniou 
4250b885244eSDavid Brownell 	return 0;
4251b885244eSDavid Brownell 
42526c364062SGeert Uytterhoeven err3:
42536c364062SGeert Uytterhoeven 	class_unregister(&spi_master_class);
4254b885244eSDavid Brownell err2:
4255b885244eSDavid Brownell 	bus_unregister(&spi_bus_type);
4256b885244eSDavid Brownell err1:
4257b885244eSDavid Brownell 	kfree(buf);
4258b885244eSDavid Brownell 	buf = NULL;
4259b885244eSDavid Brownell err0:
4260b885244eSDavid Brownell 	return status;
4261b885244eSDavid Brownell }
4262b885244eSDavid Brownell 
42638ae12a0dSDavid Brownell /* board_info is normally registered in arch_initcall(),
42648ae12a0dSDavid Brownell  * but even essential drivers wait till later
4265b885244eSDavid Brownell  *
4266b885244eSDavid Brownell  * REVISIT only boardinfo really needs static linking. the rest (device and
4267b885244eSDavid Brownell  * driver registration) _could_ be dynamically linked (modular) ... costs
4268b885244eSDavid Brownell  * include needing to have boardinfo data structures be much more public.
42698ae12a0dSDavid Brownell  */
4270673c0c00SDavid Brownell postcore_initcall(spi_init);
4271