xref: /linux/drivers/spi/spi.c (revision 0262163136de813894cb172aa8ccf762b92e5fd7)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 // SPI init/core code
3 //
4 // Copyright (C) 2005 David Brownell
5 // Copyright (C) 2008 Secret Lab Technologies Ltd.
6 
7 #include <linux/acpi.h>
8 #include <linux/cache.h>
9 #include <linux/clk/clk-conf.h>
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/dmaengine.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/export.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/highmem.h>
17 #include <linux/idr.h>
18 #include <linux/init.h>
19 #include <linux/ioport.h>
20 #include <linux/kernel.h>
21 #include <linux/kthread.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/mutex.h>
24 #include <linux/of_device.h>
25 #include <linux/of_irq.h>
26 #include <linux/percpu.h>
27 #include <linux/platform_data/x86/apple.h>
28 #include <linux/pm_domain.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/property.h>
31 #include <linux/ptp_clock_kernel.h>
32 #include <linux/sched/rt.h>
33 #include <linux/slab.h>
34 #include <linux/spi/offload/types.h>
35 #include <linux/spi/spi.h>
36 #include <linux/spi/spi-mem.h>
37 #include <uapi/linux/sched/types.h>
38 
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/spi.h>
41 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_start);
42 EXPORT_TRACEPOINT_SYMBOL(spi_transfer_stop);
43 
44 #include "internals.h"
45 
46 static DEFINE_IDR(spi_controller_idr);
47 
spidev_release(struct device * dev)48 static void spidev_release(struct device *dev)
49 {
50 	struct spi_device	*spi = to_spi_device(dev);
51 
52 	spi_controller_put(spi->controller);
53 	kfree(spi->driver_override);
54 	free_percpu(spi->pcpu_statistics);
55 	kfree(spi);
56 }
57 
58 static ssize_t
modalias_show(struct device * dev,struct device_attribute * a,char * buf)59 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
60 {
61 	const struct spi_device	*spi = to_spi_device(dev);
62 	int len;
63 
64 	len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
65 	if (len != -ENODEV)
66 		return len;
67 
68 	return sysfs_emit(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
69 }
70 static DEVICE_ATTR_RO(modalias);
71 
driver_override_store(struct device * dev,struct device_attribute * a,const char * buf,size_t count)72 static ssize_t driver_override_store(struct device *dev,
73 				     struct device_attribute *a,
74 				     const char *buf, size_t count)
75 {
76 	struct spi_device *spi = to_spi_device(dev);
77 	int ret;
78 
79 	ret = driver_set_override(dev, &spi->driver_override, buf, count);
80 	if (ret)
81 		return ret;
82 
83 	return count;
84 }
85 
driver_override_show(struct device * dev,struct device_attribute * a,char * buf)86 static ssize_t driver_override_show(struct device *dev,
87 				    struct device_attribute *a, char *buf)
88 {
89 	const struct spi_device *spi = to_spi_device(dev);
90 	ssize_t len;
91 
92 	device_lock(dev);
93 	len = sysfs_emit(buf, "%s\n", spi->driver_override ? : "");
94 	device_unlock(dev);
95 	return len;
96 }
97 static DEVICE_ATTR_RW(driver_override);
98 
spi_alloc_pcpu_stats(struct device * dev)99 static struct spi_statistics __percpu *spi_alloc_pcpu_stats(struct device *dev)
100 {
101 	struct spi_statistics __percpu *pcpu_stats;
102 
103 	if (dev)
104 		pcpu_stats = devm_alloc_percpu(dev, struct spi_statistics);
105 	else
106 		pcpu_stats = alloc_percpu_gfp(struct spi_statistics, GFP_KERNEL);
107 
108 	if (pcpu_stats) {
109 		int cpu;
110 
111 		for_each_possible_cpu(cpu) {
112 			struct spi_statistics *stat;
113 
114 			stat = per_cpu_ptr(pcpu_stats, cpu);
115 			u64_stats_init(&stat->syncp);
116 		}
117 	}
118 	return pcpu_stats;
119 }
120 
spi_emit_pcpu_stats(struct spi_statistics __percpu * stat,char * buf,size_t offset)121 static ssize_t spi_emit_pcpu_stats(struct spi_statistics __percpu *stat,
122 				   char *buf, size_t offset)
123 {
124 	u64 val = 0;
125 	int i;
126 
127 	for_each_possible_cpu(i) {
128 		const struct spi_statistics *pcpu_stats;
129 		u64_stats_t *field;
130 		unsigned int start;
131 		u64 inc;
132 
133 		pcpu_stats = per_cpu_ptr(stat, i);
134 		field = (void *)pcpu_stats + offset;
135 		do {
136 			start = u64_stats_fetch_begin(&pcpu_stats->syncp);
137 			inc = u64_stats_read(field);
138 		} while (u64_stats_fetch_retry(&pcpu_stats->syncp, start));
139 		val += inc;
140 	}
141 	return sysfs_emit(buf, "%llu\n", val);
142 }
143 
144 #define SPI_STATISTICS_ATTRS(field, file)				\
145 static ssize_t spi_controller_##field##_show(struct device *dev,	\
146 					     struct device_attribute *attr, \
147 					     char *buf)			\
148 {									\
149 	struct spi_controller *ctlr = container_of(dev,			\
150 					 struct spi_controller, dev);	\
151 	return spi_statistics_##field##_show(ctlr->pcpu_statistics, buf); \
152 }									\
153 static struct device_attribute dev_attr_spi_controller_##field = {	\
154 	.attr = { .name = file, .mode = 0444 },				\
155 	.show = spi_controller_##field##_show,				\
156 };									\
157 static ssize_t spi_device_##field##_show(struct device *dev,		\
158 					 struct device_attribute *attr,	\
159 					char *buf)			\
160 {									\
161 	struct spi_device *spi = to_spi_device(dev);			\
162 	return spi_statistics_##field##_show(spi->pcpu_statistics, buf); \
163 }									\
164 static struct device_attribute dev_attr_spi_device_##field = {		\
165 	.attr = { .name = file, .mode = 0444 },				\
166 	.show = spi_device_##field##_show,				\
167 }
168 
169 #define SPI_STATISTICS_SHOW_NAME(name, file, field)			\
170 static ssize_t spi_statistics_##name##_show(struct spi_statistics __percpu *stat, \
171 					    char *buf)			\
172 {									\
173 	return spi_emit_pcpu_stats(stat, buf,				\
174 			offsetof(struct spi_statistics, field));	\
175 }									\
176 SPI_STATISTICS_ATTRS(name, file)
177 
178 #define SPI_STATISTICS_SHOW(field)					\
179 	SPI_STATISTICS_SHOW_NAME(field, __stringify(field),		\
180 				 field)
181 
182 SPI_STATISTICS_SHOW(messages);
183 SPI_STATISTICS_SHOW(transfers);
184 SPI_STATISTICS_SHOW(errors);
185 SPI_STATISTICS_SHOW(timedout);
186 
187 SPI_STATISTICS_SHOW(spi_sync);
188 SPI_STATISTICS_SHOW(spi_sync_immediate);
189 SPI_STATISTICS_SHOW(spi_async);
190 
191 SPI_STATISTICS_SHOW(bytes);
192 SPI_STATISTICS_SHOW(bytes_rx);
193 SPI_STATISTICS_SHOW(bytes_tx);
194 
195 #define SPI_STATISTICS_TRANSFER_BYTES_HISTO(index, number)		\
196 	SPI_STATISTICS_SHOW_NAME(transfer_bytes_histo##index,		\
197 				 "transfer_bytes_histo_" number,	\
198 				 transfer_bytes_histo[index])
199 SPI_STATISTICS_TRANSFER_BYTES_HISTO(0,  "0-1");
200 SPI_STATISTICS_TRANSFER_BYTES_HISTO(1,  "2-3");
201 SPI_STATISTICS_TRANSFER_BYTES_HISTO(2,  "4-7");
202 SPI_STATISTICS_TRANSFER_BYTES_HISTO(3,  "8-15");
203 SPI_STATISTICS_TRANSFER_BYTES_HISTO(4,  "16-31");
204 SPI_STATISTICS_TRANSFER_BYTES_HISTO(5,  "32-63");
205 SPI_STATISTICS_TRANSFER_BYTES_HISTO(6,  "64-127");
206 SPI_STATISTICS_TRANSFER_BYTES_HISTO(7,  "128-255");
207 SPI_STATISTICS_TRANSFER_BYTES_HISTO(8,  "256-511");
208 SPI_STATISTICS_TRANSFER_BYTES_HISTO(9,  "512-1023");
209 SPI_STATISTICS_TRANSFER_BYTES_HISTO(10, "1024-2047");
210 SPI_STATISTICS_TRANSFER_BYTES_HISTO(11, "2048-4095");
211 SPI_STATISTICS_TRANSFER_BYTES_HISTO(12, "4096-8191");
212 SPI_STATISTICS_TRANSFER_BYTES_HISTO(13, "8192-16383");
213 SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
214 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
215 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
216 
217 SPI_STATISTICS_SHOW(transfers_split_maxsize);
218 
219 static struct attribute *spi_dev_attrs[] = {
220 	&dev_attr_modalias.attr,
221 	&dev_attr_driver_override.attr,
222 	NULL,
223 };
224 
225 static const struct attribute_group spi_dev_group = {
226 	.attrs  = spi_dev_attrs,
227 };
228 
229 static struct attribute *spi_device_statistics_attrs[] = {
230 	&dev_attr_spi_device_messages.attr,
231 	&dev_attr_spi_device_transfers.attr,
232 	&dev_attr_spi_device_errors.attr,
233 	&dev_attr_spi_device_timedout.attr,
234 	&dev_attr_spi_device_spi_sync.attr,
235 	&dev_attr_spi_device_spi_sync_immediate.attr,
236 	&dev_attr_spi_device_spi_async.attr,
237 	&dev_attr_spi_device_bytes.attr,
238 	&dev_attr_spi_device_bytes_rx.attr,
239 	&dev_attr_spi_device_bytes_tx.attr,
240 	&dev_attr_spi_device_transfer_bytes_histo0.attr,
241 	&dev_attr_spi_device_transfer_bytes_histo1.attr,
242 	&dev_attr_spi_device_transfer_bytes_histo2.attr,
243 	&dev_attr_spi_device_transfer_bytes_histo3.attr,
244 	&dev_attr_spi_device_transfer_bytes_histo4.attr,
245 	&dev_attr_spi_device_transfer_bytes_histo5.attr,
246 	&dev_attr_spi_device_transfer_bytes_histo6.attr,
247 	&dev_attr_spi_device_transfer_bytes_histo7.attr,
248 	&dev_attr_spi_device_transfer_bytes_histo8.attr,
249 	&dev_attr_spi_device_transfer_bytes_histo9.attr,
250 	&dev_attr_spi_device_transfer_bytes_histo10.attr,
251 	&dev_attr_spi_device_transfer_bytes_histo11.attr,
252 	&dev_attr_spi_device_transfer_bytes_histo12.attr,
253 	&dev_attr_spi_device_transfer_bytes_histo13.attr,
254 	&dev_attr_spi_device_transfer_bytes_histo14.attr,
255 	&dev_attr_spi_device_transfer_bytes_histo15.attr,
256 	&dev_attr_spi_device_transfer_bytes_histo16.attr,
257 	&dev_attr_spi_device_transfers_split_maxsize.attr,
258 	NULL,
259 };
260 
261 static const struct attribute_group spi_device_statistics_group = {
262 	.name  = "statistics",
263 	.attrs  = spi_device_statistics_attrs,
264 };
265 
266 static const struct attribute_group *spi_dev_groups[] = {
267 	&spi_dev_group,
268 	&spi_device_statistics_group,
269 	NULL,
270 };
271 
272 static struct attribute *spi_controller_statistics_attrs[] = {
273 	&dev_attr_spi_controller_messages.attr,
274 	&dev_attr_spi_controller_transfers.attr,
275 	&dev_attr_spi_controller_errors.attr,
276 	&dev_attr_spi_controller_timedout.attr,
277 	&dev_attr_spi_controller_spi_sync.attr,
278 	&dev_attr_spi_controller_spi_sync_immediate.attr,
279 	&dev_attr_spi_controller_spi_async.attr,
280 	&dev_attr_spi_controller_bytes.attr,
281 	&dev_attr_spi_controller_bytes_rx.attr,
282 	&dev_attr_spi_controller_bytes_tx.attr,
283 	&dev_attr_spi_controller_transfer_bytes_histo0.attr,
284 	&dev_attr_spi_controller_transfer_bytes_histo1.attr,
285 	&dev_attr_spi_controller_transfer_bytes_histo2.attr,
286 	&dev_attr_spi_controller_transfer_bytes_histo3.attr,
287 	&dev_attr_spi_controller_transfer_bytes_histo4.attr,
288 	&dev_attr_spi_controller_transfer_bytes_histo5.attr,
289 	&dev_attr_spi_controller_transfer_bytes_histo6.attr,
290 	&dev_attr_spi_controller_transfer_bytes_histo7.attr,
291 	&dev_attr_spi_controller_transfer_bytes_histo8.attr,
292 	&dev_attr_spi_controller_transfer_bytes_histo9.attr,
293 	&dev_attr_spi_controller_transfer_bytes_histo10.attr,
294 	&dev_attr_spi_controller_transfer_bytes_histo11.attr,
295 	&dev_attr_spi_controller_transfer_bytes_histo12.attr,
296 	&dev_attr_spi_controller_transfer_bytes_histo13.attr,
297 	&dev_attr_spi_controller_transfer_bytes_histo14.attr,
298 	&dev_attr_spi_controller_transfer_bytes_histo15.attr,
299 	&dev_attr_spi_controller_transfer_bytes_histo16.attr,
300 	&dev_attr_spi_controller_transfers_split_maxsize.attr,
301 	NULL,
302 };
303 
304 static const struct attribute_group spi_controller_statistics_group = {
305 	.name  = "statistics",
306 	.attrs  = spi_controller_statistics_attrs,
307 };
308 
309 static const struct attribute_group *spi_controller_groups[] = {
310 	&spi_controller_statistics_group,
311 	NULL,
312 };
313 
spi_statistics_add_transfer_stats(struct spi_statistics __percpu * pcpu_stats,struct spi_transfer * xfer,struct spi_message * msg)314 static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats,
315 					      struct spi_transfer *xfer,
316 					      struct spi_message *msg)
317 {
318 	int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
319 	struct spi_statistics *stats;
320 
321 	if (l2len < 0)
322 		l2len = 0;
323 
324 	get_cpu();
325 	stats = this_cpu_ptr(pcpu_stats);
326 	u64_stats_update_begin(&stats->syncp);
327 
328 	u64_stats_inc(&stats->transfers);
329 	u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
330 
331 	u64_stats_add(&stats->bytes, xfer->len);
332 	if (spi_valid_txbuf(msg, xfer))
333 		u64_stats_add(&stats->bytes_tx, xfer->len);
334 	if (spi_valid_rxbuf(msg, xfer))
335 		u64_stats_add(&stats->bytes_rx, xfer->len);
336 
337 	u64_stats_update_end(&stats->syncp);
338 	put_cpu();
339 }
340 
341 /*
342  * modalias support makes "modprobe $MODALIAS" new-style hotplug work,
343  * and the sysfs version makes coldplug work too.
344  */
spi_match_id(const struct spi_device_id * id,const char * name)345 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id, const char *name)
346 {
347 	while (id->name[0]) {
348 		if (!strcmp(name, id->name))
349 			return id;
350 		id++;
351 	}
352 	return NULL;
353 }
354 
spi_get_device_id(const struct spi_device * sdev)355 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
356 {
357 	const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
358 
359 	return spi_match_id(sdrv->id_table, sdev->modalias);
360 }
361 EXPORT_SYMBOL_GPL(spi_get_device_id);
362 
spi_get_device_match_data(const struct spi_device * sdev)363 const void *spi_get_device_match_data(const struct spi_device *sdev)
364 {
365 	const void *match;
366 
367 	match = device_get_match_data(&sdev->dev);
368 	if (match)
369 		return match;
370 
371 	return (const void *)spi_get_device_id(sdev)->driver_data;
372 }
373 EXPORT_SYMBOL_GPL(spi_get_device_match_data);
374 
spi_match_device(struct device * dev,const struct device_driver * drv)375 static int spi_match_device(struct device *dev, const struct device_driver *drv)
376 {
377 	const struct spi_device	*spi = to_spi_device(dev);
378 	const struct spi_driver	*sdrv = to_spi_driver(drv);
379 
380 	/* Check override first, and if set, only use the named driver */
381 	if (spi->driver_override)
382 		return strcmp(spi->driver_override, drv->name) == 0;
383 
384 	/* Attempt an OF style match */
385 	if (of_driver_match_device(dev, drv))
386 		return 1;
387 
388 	/* Then try ACPI */
389 	if (acpi_driver_match_device(dev, drv))
390 		return 1;
391 
392 	if (sdrv->id_table)
393 		return !!spi_match_id(sdrv->id_table, spi->modalias);
394 
395 	return strcmp(spi->modalias, drv->name) == 0;
396 }
397 
spi_uevent(const struct device * dev,struct kobj_uevent_env * env)398 static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env)
399 {
400 	const struct spi_device		*spi = to_spi_device(dev);
401 	int rc;
402 
403 	rc = acpi_device_uevent_modalias(dev, env);
404 	if (rc != -ENODEV)
405 		return rc;
406 
407 	return add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
408 }
409 
spi_probe(struct device * dev)410 static int spi_probe(struct device *dev)
411 {
412 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
413 	struct spi_device		*spi = to_spi_device(dev);
414 	struct fwnode_handle		*fwnode = dev_fwnode(dev);
415 	int ret;
416 
417 	ret = of_clk_set_defaults(dev->of_node, false);
418 	if (ret)
419 		return ret;
420 
421 	if (is_of_node(fwnode))
422 		spi->irq = of_irq_get(dev->of_node, 0);
423 	else if (is_acpi_device_node(fwnode) && spi->irq < 0)
424 		spi->irq = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), 0);
425 	if (spi->irq == -EPROBE_DEFER)
426 		return dev_err_probe(dev, spi->irq, "Failed to get irq\n");
427 	if (spi->irq < 0)
428 		spi->irq = 0;
429 
430 	ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON);
431 	if (ret)
432 		return ret;
433 
434 	if (sdrv->probe) {
435 		ret = sdrv->probe(spi);
436 		if (ret)
437 			dev_pm_domain_detach(dev, true);
438 	}
439 
440 	return ret;
441 }
442 
spi_remove(struct device * dev)443 static void spi_remove(struct device *dev)
444 {
445 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
446 
447 	if (sdrv->remove)
448 		sdrv->remove(to_spi_device(dev));
449 
450 	dev_pm_domain_detach(dev, true);
451 }
452 
spi_shutdown(struct device * dev)453 static void spi_shutdown(struct device *dev)
454 {
455 	if (dev->driver) {
456 		const struct spi_driver	*sdrv = to_spi_driver(dev->driver);
457 
458 		if (sdrv->shutdown)
459 			sdrv->shutdown(to_spi_device(dev));
460 	}
461 }
462 
463 const struct bus_type spi_bus_type = {
464 	.name		= "spi",
465 	.dev_groups	= spi_dev_groups,
466 	.match		= spi_match_device,
467 	.uevent		= spi_uevent,
468 	.probe		= spi_probe,
469 	.remove		= spi_remove,
470 	.shutdown	= spi_shutdown,
471 };
472 EXPORT_SYMBOL_GPL(spi_bus_type);
473 
474 /**
475  * __spi_register_driver - register a SPI driver
476  * @owner: owner module of the driver to register
477  * @sdrv: the driver to register
478  * Context: can sleep
479  *
480  * Return: zero on success, else a negative error code.
481  */
__spi_register_driver(struct module * owner,struct spi_driver * sdrv)482 int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
483 {
484 	sdrv->driver.owner = owner;
485 	sdrv->driver.bus = &spi_bus_type;
486 
487 	/*
488 	 * For Really Good Reasons we use spi: modaliases not of:
489 	 * modaliases for DT so module autoloading won't work if we
490 	 * don't have a spi_device_id as well as a compatible string.
491 	 */
492 	if (sdrv->driver.of_match_table) {
493 		const struct of_device_id *of_id;
494 
495 		for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
496 		     of_id++) {
497 			const char *of_name;
498 
499 			/* Strip off any vendor prefix */
500 			of_name = strnchr(of_id->compatible,
501 					  sizeof(of_id->compatible), ',');
502 			if (of_name)
503 				of_name++;
504 			else
505 				of_name = of_id->compatible;
506 
507 			if (sdrv->id_table) {
508 				const struct spi_device_id *spi_id;
509 
510 				spi_id = spi_match_id(sdrv->id_table, of_name);
511 				if (spi_id)
512 					continue;
513 			} else {
514 				if (strcmp(sdrv->driver.name, of_name) == 0)
515 					continue;
516 			}
517 
518 			pr_warn("SPI driver %s has no spi_device_id for %s\n",
519 				sdrv->driver.name, of_id->compatible);
520 		}
521 	}
522 
523 	return driver_register(&sdrv->driver);
524 }
525 EXPORT_SYMBOL_GPL(__spi_register_driver);
526 
527 /*-------------------------------------------------------------------------*/
528 
529 /*
530  * SPI devices should normally not be created by SPI device drivers; that
531  * would make them board-specific.  Similarly with SPI controller drivers.
532  * Device registration normally goes into like arch/.../mach.../board-YYY.c
533  * with other readonly (flashable) information about mainboard devices.
534  */
535 
536 struct boardinfo {
537 	struct list_head	list;
538 	struct spi_board_info	board_info;
539 };
540 
541 static LIST_HEAD(board_list);
542 static LIST_HEAD(spi_controller_list);
543 
544 /*
545  * Used to protect add/del operation for board_info list and
546  * spi_controller list, and their matching process also used
547  * to protect object of type struct idr.
548  */
549 static DEFINE_MUTEX(board_lock);
550 
551 /**
552  * spi_alloc_device - Allocate a new SPI device
553  * @ctlr: Controller to which device is connected
554  * Context: can sleep
555  *
556  * Allows a driver to allocate and initialize a spi_device without
557  * registering it immediately.  This allows a driver to directly
558  * fill the spi_device with device parameters before calling
559  * spi_add_device() on it.
560  *
561  * Caller is responsible to call spi_add_device() on the returned
562  * spi_device structure to add it to the SPI controller.  If the caller
563  * needs to discard the spi_device without adding it, then it should
564  * call spi_dev_put() on it.
565  *
566  * Return: a pointer to the new device, or NULL.
567  */
spi_alloc_device(struct spi_controller * ctlr)568 struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
569 {
570 	struct spi_device	*spi;
571 
572 	if (!spi_controller_get(ctlr))
573 		return NULL;
574 
575 	spi = kzalloc(sizeof(*spi), GFP_KERNEL);
576 	if (!spi) {
577 		spi_controller_put(ctlr);
578 		return NULL;
579 	}
580 
581 	spi->pcpu_statistics = spi_alloc_pcpu_stats(NULL);
582 	if (!spi->pcpu_statistics) {
583 		kfree(spi);
584 		spi_controller_put(ctlr);
585 		return NULL;
586 	}
587 
588 	spi->controller = ctlr;
589 	spi->dev.parent = &ctlr->dev;
590 	spi->dev.bus = &spi_bus_type;
591 	spi->dev.release = spidev_release;
592 	spi->mode = ctlr->buswidth_override_bits;
593 
594 	device_initialize(&spi->dev);
595 	return spi;
596 }
597 EXPORT_SYMBOL_GPL(spi_alloc_device);
598 
spi_dev_set_name(struct spi_device * spi)599 static void spi_dev_set_name(struct spi_device *spi)
600 {
601 	struct device *dev = &spi->dev;
602 	struct fwnode_handle *fwnode = dev_fwnode(dev);
603 
604 	if (is_acpi_device_node(fwnode)) {
605 		dev_set_name(dev, "spi-%s", acpi_dev_name(to_acpi_device_node(fwnode)));
606 		return;
607 	}
608 
609 	if (is_software_node(fwnode)) {
610 		dev_set_name(dev, "spi-%pfwP", fwnode);
611 		return;
612 	}
613 
614 	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
615 		     spi_get_chipselect(spi, 0));
616 }
617 
618 /*
619  * Zero(0) is a valid physical CS value and can be located at any
620  * logical CS in the spi->chip_select[]. If all the physical CS
621  * are initialized to 0 then It would be difficult to differentiate
622  * between a valid physical CS 0 & an unused logical CS whose physical
623  * CS can be 0. As a solution to this issue initialize all the CS to -1.
624  * Now all the unused logical CS will have -1 physical CS value & can be
625  * ignored while performing physical CS validity checks.
626  */
627 #define SPI_INVALID_CS		((s8)-1)
628 
is_valid_cs(s8 chip_select)629 static inline bool is_valid_cs(s8 chip_select)
630 {
631 	return chip_select != SPI_INVALID_CS;
632 }
633 
spi_dev_check_cs(struct device * dev,struct spi_device * spi,u8 idx,struct spi_device * new_spi,u8 new_idx)634 static inline int spi_dev_check_cs(struct device *dev,
635 				   struct spi_device *spi, u8 idx,
636 				   struct spi_device *new_spi, u8 new_idx)
637 {
638 	u8 cs, cs_new;
639 	u8 idx_new;
640 
641 	cs = spi_get_chipselect(spi, idx);
642 	for (idx_new = new_idx; idx_new < SPI_CS_CNT_MAX; idx_new++) {
643 		cs_new = spi_get_chipselect(new_spi, idx_new);
644 		if (is_valid_cs(cs) && is_valid_cs(cs_new) && cs == cs_new) {
645 			dev_err(dev, "chipselect %u already in use\n", cs_new);
646 			return -EBUSY;
647 		}
648 	}
649 	return 0;
650 }
651 
spi_dev_check(struct device * dev,void * data)652 static int spi_dev_check(struct device *dev, void *data)
653 {
654 	struct spi_device *spi = to_spi_device(dev);
655 	struct spi_device *new_spi = data;
656 	int status, idx;
657 
658 	if (spi->controller == new_spi->controller) {
659 		for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
660 			status = spi_dev_check_cs(dev, spi, idx, new_spi, 0);
661 			if (status)
662 				return status;
663 		}
664 	}
665 	return 0;
666 }
667 
spi_cleanup(struct spi_device * spi)668 static void spi_cleanup(struct spi_device *spi)
669 {
670 	if (spi->controller->cleanup)
671 		spi->controller->cleanup(spi);
672 }
673 
__spi_add_device(struct spi_device * spi)674 static int __spi_add_device(struct spi_device *spi)
675 {
676 	struct spi_controller *ctlr = spi->controller;
677 	struct device *dev = ctlr->dev.parent;
678 	int status, idx;
679 	u8 cs;
680 
681 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
682 		/* Chipselects are numbered 0..max; validate. */
683 		cs = spi_get_chipselect(spi, idx);
684 		if (is_valid_cs(cs) && cs >= ctlr->num_chipselect) {
685 			dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx),
686 				ctlr->num_chipselect);
687 			return -EINVAL;
688 		}
689 	}
690 
691 	/*
692 	 * Make sure that multiple logical CS doesn't map to the same physical CS.
693 	 * For example, spi->chip_select[0] != spi->chip_select[1] and so on.
694 	 */
695 	if (!spi_controller_is_target(ctlr)) {
696 		for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
697 			status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
698 			if (status)
699 				return status;
700 		}
701 	}
702 
703 	/* Set the bus ID string */
704 	spi_dev_set_name(spi);
705 
706 	/*
707 	 * We need to make sure there's no other device with this
708 	 * chipselect **BEFORE** we call setup(), else we'll trash
709 	 * its configuration.
710 	 */
711 	status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
712 	if (status)
713 		return status;
714 
715 	/* Controller may unregister concurrently */
716 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
717 	    !device_is_registered(&ctlr->dev)) {
718 		return -ENODEV;
719 	}
720 
721 	if (ctlr->cs_gpiods) {
722 		u8 cs;
723 
724 		for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
725 			cs = spi_get_chipselect(spi, idx);
726 			if (is_valid_cs(cs))
727 				spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
728 		}
729 	}
730 
731 	/*
732 	 * Drivers may modify this initial i/o setup, but will
733 	 * normally rely on the device being setup.  Devices
734 	 * using SPI_CS_HIGH can't coexist well otherwise...
735 	 */
736 	status = spi_setup(spi);
737 	if (status < 0) {
738 		dev_err(dev, "can't setup %s, status %d\n",
739 				dev_name(&spi->dev), status);
740 		return status;
741 	}
742 
743 	/* Device may be bound to an active driver when this returns */
744 	status = device_add(&spi->dev);
745 	if (status < 0) {
746 		dev_err(dev, "can't add %s, status %d\n",
747 				dev_name(&spi->dev), status);
748 		spi_cleanup(spi);
749 	} else {
750 		dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
751 	}
752 
753 	return status;
754 }
755 
756 /**
757  * spi_add_device - Add spi_device allocated with spi_alloc_device
758  * @spi: spi_device to register
759  *
760  * Companion function to spi_alloc_device.  Devices allocated with
761  * spi_alloc_device can be added onto the SPI bus with this function.
762  *
763  * Return: 0 on success; negative errno on failure
764  */
spi_add_device(struct spi_device * spi)765 int spi_add_device(struct spi_device *spi)
766 {
767 	struct spi_controller *ctlr = spi->controller;
768 	int status;
769 
770 	/* Set the bus ID string */
771 	spi_dev_set_name(spi);
772 
773 	mutex_lock(&ctlr->add_lock);
774 	status = __spi_add_device(spi);
775 	mutex_unlock(&ctlr->add_lock);
776 	return status;
777 }
778 EXPORT_SYMBOL_GPL(spi_add_device);
779 
spi_set_all_cs_unused(struct spi_device * spi)780 static void spi_set_all_cs_unused(struct spi_device *spi)
781 {
782 	u8 idx;
783 
784 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
785 		spi_set_chipselect(spi, idx, SPI_INVALID_CS);
786 }
787 
788 /**
789  * spi_new_device - instantiate one new SPI device
790  * @ctlr: Controller to which device is connected
791  * @chip: Describes the SPI device
792  * Context: can sleep
793  *
794  * On typical mainboards, this is purely internal; and it's not needed
795  * after board init creates the hard-wired devices.  Some development
796  * platforms may not be able to use spi_register_board_info though, and
797  * this is exported so that for example a USB or parport based adapter
798  * driver could add devices (which it would learn about out-of-band).
799  *
800  * Return: the new device, or NULL.
801  */
spi_new_device(struct spi_controller * ctlr,struct spi_board_info * chip)802 struct spi_device *spi_new_device(struct spi_controller *ctlr,
803 				  struct spi_board_info *chip)
804 {
805 	struct spi_device	*proxy;
806 	int			status;
807 
808 	/*
809 	 * NOTE:  caller did any chip->bus_num checks necessary.
810 	 *
811 	 * Also, unless we change the return value convention to use
812 	 * error-or-pointer (not NULL-or-pointer), troubleshootability
813 	 * suggests syslogged diagnostics are best here (ugh).
814 	 */
815 
816 	proxy = spi_alloc_device(ctlr);
817 	if (!proxy)
818 		return NULL;
819 
820 	WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
821 
822 	/* Use provided chip-select for proxy device */
823 	spi_set_all_cs_unused(proxy);
824 	spi_set_chipselect(proxy, 0, chip->chip_select);
825 
826 	proxy->max_speed_hz = chip->max_speed_hz;
827 	proxy->mode = chip->mode;
828 	proxy->irq = chip->irq;
829 	strscpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
830 	proxy->dev.platform_data = (void *) chip->platform_data;
831 	proxy->controller_data = chip->controller_data;
832 	proxy->controller_state = NULL;
833 	/*
834 	 * By default spi->chip_select[0] will hold the physical CS number,
835 	 * so set bit 0 in spi->cs_index_mask.
836 	 */
837 	proxy->cs_index_mask = BIT(0);
838 
839 	if (chip->swnode) {
840 		status = device_add_software_node(&proxy->dev, chip->swnode);
841 		if (status) {
842 			dev_err(&ctlr->dev, "failed to add software node to '%s': %d\n",
843 				chip->modalias, status);
844 			goto err_dev_put;
845 		}
846 	}
847 
848 	status = spi_add_device(proxy);
849 	if (status < 0)
850 		goto err_dev_put;
851 
852 	return proxy;
853 
854 err_dev_put:
855 	device_remove_software_node(&proxy->dev);
856 	spi_dev_put(proxy);
857 	return NULL;
858 }
859 EXPORT_SYMBOL_GPL(spi_new_device);
860 
861 /**
862  * spi_unregister_device - unregister a single SPI device
863  * @spi: spi_device to unregister
864  *
865  * Start making the passed SPI device vanish. Normally this would be handled
866  * by spi_unregister_controller().
867  */
spi_unregister_device(struct spi_device * spi)868 void spi_unregister_device(struct spi_device *spi)
869 {
870 	struct fwnode_handle *fwnode;
871 
872 	if (!spi)
873 		return;
874 
875 	fwnode = dev_fwnode(&spi->dev);
876 	if (is_of_node(fwnode)) {
877 		of_node_clear_flag(to_of_node(fwnode), OF_POPULATED);
878 		of_node_put(to_of_node(fwnode));
879 	} else if (is_acpi_device_node(fwnode)) {
880 		acpi_device_clear_enumerated(to_acpi_device_node(fwnode));
881 	}
882 	device_remove_software_node(&spi->dev);
883 	device_del(&spi->dev);
884 	spi_cleanup(spi);
885 	put_device(&spi->dev);
886 }
887 EXPORT_SYMBOL_GPL(spi_unregister_device);
888 
spi_match_controller_to_boardinfo(struct spi_controller * ctlr,struct spi_board_info * bi)889 static void spi_match_controller_to_boardinfo(struct spi_controller *ctlr,
890 					      struct spi_board_info *bi)
891 {
892 	struct spi_device *dev;
893 
894 	if (ctlr->bus_num != bi->bus_num)
895 		return;
896 
897 	dev = spi_new_device(ctlr, bi);
898 	if (!dev)
899 		dev_err(ctlr->dev.parent, "can't create new device for %s\n",
900 			bi->modalias);
901 }
902 
903 /**
904  * spi_register_board_info - register SPI devices for a given board
905  * @info: array of chip descriptors
906  * @n: how many descriptors are provided
907  * Context: can sleep
908  *
909  * Board-specific early init code calls this (probably during arch_initcall)
910  * with segments of the SPI device table.  Any device nodes are created later,
911  * after the relevant parent SPI controller (bus_num) is defined.  We keep
912  * this table of devices forever, so that reloading a controller driver will
913  * not make Linux forget about these hard-wired devices.
914  *
915  * Other code can also call this, e.g. a particular add-on board might provide
916  * SPI devices through its expansion connector, so code initializing that board
917  * would naturally declare its SPI devices.
918  *
919  * The board info passed can safely be __initdata ... but be careful of
920  * any embedded pointers (platform_data, etc), they're copied as-is.
921  *
922  * Return: zero on success, else a negative error code.
923  */
spi_register_board_info(struct spi_board_info const * info,unsigned n)924 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
925 {
926 	struct boardinfo *bi;
927 	int i;
928 
929 	if (!n)
930 		return 0;
931 
932 	bi = kcalloc(n, sizeof(*bi), GFP_KERNEL);
933 	if (!bi)
934 		return -ENOMEM;
935 
936 	for (i = 0; i < n; i++, bi++, info++) {
937 		struct spi_controller *ctlr;
938 
939 		memcpy(&bi->board_info, info, sizeof(*info));
940 
941 		mutex_lock(&board_lock);
942 		list_add_tail(&bi->list, &board_list);
943 		list_for_each_entry(ctlr, &spi_controller_list, list)
944 			spi_match_controller_to_boardinfo(ctlr,
945 							  &bi->board_info);
946 		mutex_unlock(&board_lock);
947 	}
948 
949 	return 0;
950 }
951 
952 /*-------------------------------------------------------------------------*/
953 
954 /* Core methods for SPI resource management */
955 
956 /**
957  * spi_res_alloc - allocate a spi resource that is life-cycle managed
958  *                 during the processing of a spi_message while using
959  *                 spi_transfer_one
960  * @spi:     the SPI device for which we allocate memory
961  * @release: the release code to execute for this resource
962  * @size:    size to alloc and return
963  * @gfp:     GFP allocation flags
964  *
965  * Return: the pointer to the allocated data
966  *
967  * This may get enhanced in the future to allocate from a memory pool
968  * of the @spi_device or @spi_controller to avoid repeated allocations.
969  */
spi_res_alloc(struct spi_device * spi,spi_res_release_t release,size_t size,gfp_t gfp)970 static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
971 			   size_t size, gfp_t gfp)
972 {
973 	struct spi_res *sres;
974 
975 	sres = kzalloc(sizeof(*sres) + size, gfp);
976 	if (!sres)
977 		return NULL;
978 
979 	INIT_LIST_HEAD(&sres->entry);
980 	sres->release = release;
981 
982 	return sres->data;
983 }
984 
985 /**
986  * spi_res_free - free an SPI resource
987  * @res: pointer to the custom data of a resource
988  */
spi_res_free(void * res)989 static void spi_res_free(void *res)
990 {
991 	struct spi_res *sres = container_of(res, struct spi_res, data);
992 
993 	WARN_ON(!list_empty(&sres->entry));
994 	kfree(sres);
995 }
996 
997 /**
998  * spi_res_add - add a spi_res to the spi_message
999  * @message: the SPI message
1000  * @res:     the spi_resource
1001  */
spi_res_add(struct spi_message * message,void * res)1002 static void spi_res_add(struct spi_message *message, void *res)
1003 {
1004 	struct spi_res *sres = container_of(res, struct spi_res, data);
1005 
1006 	WARN_ON(!list_empty(&sres->entry));
1007 	list_add_tail(&sres->entry, &message->resources);
1008 }
1009 
1010 /**
1011  * spi_res_release - release all SPI resources for this message
1012  * @ctlr:  the @spi_controller
1013  * @message: the @spi_message
1014  */
spi_res_release(struct spi_controller * ctlr,struct spi_message * message)1015 static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
1016 {
1017 	struct spi_res *res, *tmp;
1018 
1019 	list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
1020 		if (res->release)
1021 			res->release(ctlr, message, res->data);
1022 
1023 		list_del(&res->entry);
1024 
1025 		kfree(res);
1026 	}
1027 }
1028 
1029 /*-------------------------------------------------------------------------*/
1030 #define spi_for_each_valid_cs(spi, idx)				\
1031 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)		\
1032 		if (!(spi->cs_index_mask & BIT(idx))) {} else
1033 
spi_is_last_cs(struct spi_device * spi)1034 static inline bool spi_is_last_cs(struct spi_device *spi)
1035 {
1036 	u8 idx;
1037 	bool last = false;
1038 
1039 	spi_for_each_valid_cs(spi, idx) {
1040 		if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx))
1041 			last = true;
1042 	}
1043 	return last;
1044 }
1045 
spi_toggle_csgpiod(struct spi_device * spi,u8 idx,bool enable,bool activate)1046 static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool activate)
1047 {
1048 	/*
1049 	 * Historically ACPI has no means of the GPIO polarity and
1050 	 * thus the SPISerialBus() resource defines it on the per-chip
1051 	 * basis. In order to avoid a chain of negations, the GPIO
1052 	 * polarity is considered being Active High. Even for the cases
1053 	 * when _DSD() is involved (in the updated versions of ACPI)
1054 	 * the GPIO CS polarity must be defined Active High to avoid
1055 	 * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
1056 	 * into account.
1057 	 */
1058 	if (is_acpi_device_node(dev_fwnode(&spi->dev)))
1059 		gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable);
1060 	else
1061 		/* Polarity handled by GPIO library */
1062 		gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), activate);
1063 
1064 	if (activate)
1065 		spi_delay_exec(&spi->cs_setup, NULL);
1066 	else
1067 		spi_delay_exec(&spi->cs_inactive, NULL);
1068 }
1069 
spi_set_cs(struct spi_device * spi,bool enable,bool force)1070 static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
1071 {
1072 	bool activate = enable;
1073 	u8 idx;
1074 
1075 	/*
1076 	 * Avoid calling into the driver (or doing delays) if the chip select
1077 	 * isn't actually changing from the last time this was called.
1078 	 */
1079 	if (!force && (enable == spi_is_last_cs(spi)) &&
1080 	    (spi->controller->last_cs_index_mask == spi->cs_index_mask) &&
1081 	    (spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
1082 		return;
1083 
1084 	trace_spi_set_cs(spi, activate);
1085 
1086 	spi->controller->last_cs_index_mask = spi->cs_index_mask;
1087 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
1088 		spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : SPI_INVALID_CS;
1089 
1090 	spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
1091 	if (spi->controller->last_cs_mode_high)
1092 		enable = !enable;
1093 
1094 	/*
1095 	 * Handle chip select delays for GPIO based CS or controllers without
1096 	 * programmable chip select timing.
1097 	 */
1098 	if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate)
1099 		spi_delay_exec(&spi->cs_hold, NULL);
1100 
1101 	if (spi_is_csgpiod(spi)) {
1102 		if (!(spi->mode & SPI_NO_CS)) {
1103 			spi_for_each_valid_cs(spi, idx) {
1104 				if (spi_get_csgpiod(spi, idx))
1105 					spi_toggle_csgpiod(spi, idx, enable, activate);
1106 			}
1107 		}
1108 		/* Some SPI controllers need both GPIO CS & ->set_cs() */
1109 		if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
1110 		    spi->controller->set_cs)
1111 			spi->controller->set_cs(spi, !enable);
1112 	} else if (spi->controller->set_cs) {
1113 		spi->controller->set_cs(spi, !enable);
1114 	}
1115 
1116 	if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) {
1117 		if (activate)
1118 			spi_delay_exec(&spi->cs_setup, NULL);
1119 		else
1120 			spi_delay_exec(&spi->cs_inactive, NULL);
1121 	}
1122 }
1123 
1124 #ifdef CONFIG_HAS_DMA
spi_map_buf_attrs(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir,unsigned long attrs)1125 static int spi_map_buf_attrs(struct spi_controller *ctlr, struct device *dev,
1126 			     struct sg_table *sgt, void *buf, size_t len,
1127 			     enum dma_data_direction dir, unsigned long attrs)
1128 {
1129 	const bool vmalloced_buf = is_vmalloc_addr(buf);
1130 	unsigned int max_seg_size = dma_get_max_seg_size(dev);
1131 #ifdef CONFIG_HIGHMEM
1132 	const bool kmap_buf = ((unsigned long)buf >= PKMAP_BASE &&
1133 				(unsigned long)buf < (PKMAP_BASE +
1134 					(LAST_PKMAP * PAGE_SIZE)));
1135 #else
1136 	const bool kmap_buf = false;
1137 #endif
1138 	int desc_len;
1139 	int sgs;
1140 	struct page *vm_page;
1141 	struct scatterlist *sg;
1142 	void *sg_buf;
1143 	size_t min;
1144 	int i, ret;
1145 
1146 	if (vmalloced_buf || kmap_buf) {
1147 		desc_len = min_t(unsigned long, max_seg_size, PAGE_SIZE);
1148 		sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
1149 	} else if (virt_addr_valid(buf)) {
1150 		desc_len = min_t(size_t, max_seg_size, ctlr->max_dma_len);
1151 		sgs = DIV_ROUND_UP(len, desc_len);
1152 	} else {
1153 		return -EINVAL;
1154 	}
1155 
1156 	ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
1157 	if (ret != 0)
1158 		return ret;
1159 
1160 	sg = &sgt->sgl[0];
1161 	for (i = 0; i < sgs; i++) {
1162 
1163 		if (vmalloced_buf || kmap_buf) {
1164 			/*
1165 			 * Next scatterlist entry size is the minimum between
1166 			 * the desc_len and the remaining buffer length that
1167 			 * fits in a page.
1168 			 */
1169 			min = min_t(size_t, desc_len,
1170 				    min_t(size_t, len,
1171 					  PAGE_SIZE - offset_in_page(buf)));
1172 			if (vmalloced_buf)
1173 				vm_page = vmalloc_to_page(buf);
1174 			else
1175 				vm_page = kmap_to_page(buf);
1176 			if (!vm_page) {
1177 				sg_free_table(sgt);
1178 				return -ENOMEM;
1179 			}
1180 			sg_set_page(sg, vm_page,
1181 				    min, offset_in_page(buf));
1182 		} else {
1183 			min = min_t(size_t, len, desc_len);
1184 			sg_buf = buf;
1185 			sg_set_buf(sg, sg_buf, min);
1186 		}
1187 
1188 		buf += min;
1189 		len -= min;
1190 		sg = sg_next(sg);
1191 	}
1192 
1193 	ret = dma_map_sgtable(dev, sgt, dir, attrs);
1194 	if (ret < 0) {
1195 		sg_free_table(sgt);
1196 		return ret;
1197 	}
1198 
1199 	return 0;
1200 }
1201 
spi_map_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,void * buf,size_t len,enum dma_data_direction dir)1202 int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
1203 		struct sg_table *sgt, void *buf, size_t len,
1204 		enum dma_data_direction dir)
1205 {
1206 	return spi_map_buf_attrs(ctlr, dev, sgt, buf, len, dir, 0);
1207 }
1208 
spi_unmap_buf_attrs(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir,unsigned long attrs)1209 static void spi_unmap_buf_attrs(struct spi_controller *ctlr,
1210 				struct device *dev, struct sg_table *sgt,
1211 				enum dma_data_direction dir,
1212 				unsigned long attrs)
1213 {
1214 	dma_unmap_sgtable(dev, sgt, dir, attrs);
1215 	sg_free_table(sgt);
1216 	sgt->orig_nents = 0;
1217 	sgt->nents = 0;
1218 }
1219 
spi_unmap_buf(struct spi_controller * ctlr,struct device * dev,struct sg_table * sgt,enum dma_data_direction dir)1220 void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev,
1221 		   struct sg_table *sgt, enum dma_data_direction dir)
1222 {
1223 	spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0);
1224 }
1225 
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1226 static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1227 {
1228 	struct device *tx_dev, *rx_dev;
1229 	struct spi_transfer *xfer;
1230 	int ret;
1231 
1232 	if (!ctlr->can_dma)
1233 		return 0;
1234 
1235 	if (ctlr->dma_tx)
1236 		tx_dev = ctlr->dma_tx->device->dev;
1237 	else if (ctlr->dma_map_dev)
1238 		tx_dev = ctlr->dma_map_dev;
1239 	else
1240 		tx_dev = ctlr->dev.parent;
1241 
1242 	if (ctlr->dma_rx)
1243 		rx_dev = ctlr->dma_rx->device->dev;
1244 	else if (ctlr->dma_map_dev)
1245 		rx_dev = ctlr->dma_map_dev;
1246 	else
1247 		rx_dev = ctlr->dev.parent;
1248 
1249 	ret = -ENOMSG;
1250 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1251 		/* The sync is done before each transfer. */
1252 		unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1253 
1254 		if (!ctlr->can_dma(ctlr, msg->spi, xfer))
1255 			continue;
1256 
1257 		if (xfer->tx_buf != NULL) {
1258 			ret = spi_map_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1259 						(void *)xfer->tx_buf,
1260 						xfer->len, DMA_TO_DEVICE,
1261 						attrs);
1262 			if (ret != 0)
1263 				return ret;
1264 
1265 			xfer->tx_sg_mapped = true;
1266 		}
1267 
1268 		if (xfer->rx_buf != NULL) {
1269 			ret = spi_map_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1270 						xfer->rx_buf, xfer->len,
1271 						DMA_FROM_DEVICE, attrs);
1272 			if (ret != 0) {
1273 				spi_unmap_buf_attrs(ctlr, tx_dev,
1274 						&xfer->tx_sg, DMA_TO_DEVICE,
1275 						attrs);
1276 
1277 				return ret;
1278 			}
1279 
1280 			xfer->rx_sg_mapped = true;
1281 		}
1282 	}
1283 	/* No transfer has been mapped, bail out with success */
1284 	if (ret)
1285 		return 0;
1286 
1287 	ctlr->cur_rx_dma_dev = rx_dev;
1288 	ctlr->cur_tx_dma_dev = tx_dev;
1289 
1290 	return 0;
1291 }
1292 
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1293 static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
1294 {
1295 	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1296 	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1297 	struct spi_transfer *xfer;
1298 
1299 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1300 		/* The sync has already been done after each transfer. */
1301 		unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
1302 
1303 		if (xfer->rx_sg_mapped)
1304 			spi_unmap_buf_attrs(ctlr, rx_dev, &xfer->rx_sg,
1305 					    DMA_FROM_DEVICE, attrs);
1306 		xfer->rx_sg_mapped = false;
1307 
1308 		if (xfer->tx_sg_mapped)
1309 			spi_unmap_buf_attrs(ctlr, tx_dev, &xfer->tx_sg,
1310 					    DMA_TO_DEVICE, attrs);
1311 		xfer->tx_sg_mapped = false;
1312 	}
1313 
1314 	return 0;
1315 }
1316 
spi_dma_sync_for_device(struct spi_controller * ctlr,struct spi_transfer * xfer)1317 static void spi_dma_sync_for_device(struct spi_controller *ctlr,
1318 				    struct spi_transfer *xfer)
1319 {
1320 	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1321 	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1322 
1323 	if (xfer->tx_sg_mapped)
1324 		dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1325 	if (xfer->rx_sg_mapped)
1326 		dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1327 }
1328 
spi_dma_sync_for_cpu(struct spi_controller * ctlr,struct spi_transfer * xfer)1329 static void spi_dma_sync_for_cpu(struct spi_controller *ctlr,
1330 				 struct spi_transfer *xfer)
1331 {
1332 	struct device *rx_dev = ctlr->cur_rx_dma_dev;
1333 	struct device *tx_dev = ctlr->cur_tx_dma_dev;
1334 
1335 	if (xfer->rx_sg_mapped)
1336 		dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
1337 	if (xfer->tx_sg_mapped)
1338 		dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
1339 }
1340 #else /* !CONFIG_HAS_DMA */
__spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1341 static inline int __spi_map_msg(struct spi_controller *ctlr,
1342 				struct spi_message *msg)
1343 {
1344 	return 0;
1345 }
1346 
__spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1347 static inline int __spi_unmap_msg(struct spi_controller *ctlr,
1348 				  struct spi_message *msg)
1349 {
1350 	return 0;
1351 }
1352 
spi_dma_sync_for_device(struct spi_controller * ctrl,struct spi_transfer * xfer)1353 static void spi_dma_sync_for_device(struct spi_controller *ctrl,
1354 				    struct spi_transfer *xfer)
1355 {
1356 }
1357 
spi_dma_sync_for_cpu(struct spi_controller * ctrl,struct spi_transfer * xfer)1358 static void spi_dma_sync_for_cpu(struct spi_controller *ctrl,
1359 				 struct spi_transfer *xfer)
1360 {
1361 }
1362 #endif /* !CONFIG_HAS_DMA */
1363 
spi_unmap_msg(struct spi_controller * ctlr,struct spi_message * msg)1364 static inline int spi_unmap_msg(struct spi_controller *ctlr,
1365 				struct spi_message *msg)
1366 {
1367 	struct spi_transfer *xfer;
1368 
1369 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1370 		/*
1371 		 * Restore the original value of tx_buf or rx_buf if they are
1372 		 * NULL.
1373 		 */
1374 		if (xfer->tx_buf == ctlr->dummy_tx)
1375 			xfer->tx_buf = NULL;
1376 		if (xfer->rx_buf == ctlr->dummy_rx)
1377 			xfer->rx_buf = NULL;
1378 	}
1379 
1380 	return __spi_unmap_msg(ctlr, msg);
1381 }
1382 
spi_map_msg(struct spi_controller * ctlr,struct spi_message * msg)1383 static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
1384 {
1385 	struct spi_transfer *xfer;
1386 	void *tmp;
1387 	unsigned int max_tx, max_rx;
1388 
1389 	if ((ctlr->flags & (SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX))
1390 		&& !(msg->spi->mode & SPI_3WIRE)) {
1391 		max_tx = 0;
1392 		max_rx = 0;
1393 
1394 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1395 			if ((ctlr->flags & SPI_CONTROLLER_MUST_TX) &&
1396 			    !xfer->tx_buf)
1397 				max_tx = max(xfer->len, max_tx);
1398 			if ((ctlr->flags & SPI_CONTROLLER_MUST_RX) &&
1399 			    !xfer->rx_buf)
1400 				max_rx = max(xfer->len, max_rx);
1401 		}
1402 
1403 		if (max_tx) {
1404 			tmp = krealloc(ctlr->dummy_tx, max_tx,
1405 				       GFP_KERNEL | GFP_DMA | __GFP_ZERO);
1406 			if (!tmp)
1407 				return -ENOMEM;
1408 			ctlr->dummy_tx = tmp;
1409 		}
1410 
1411 		if (max_rx) {
1412 			tmp = krealloc(ctlr->dummy_rx, max_rx,
1413 				       GFP_KERNEL | GFP_DMA);
1414 			if (!tmp)
1415 				return -ENOMEM;
1416 			ctlr->dummy_rx = tmp;
1417 		}
1418 
1419 		if (max_tx || max_rx) {
1420 			list_for_each_entry(xfer, &msg->transfers,
1421 					    transfer_list) {
1422 				if (!xfer->len)
1423 					continue;
1424 				if (!xfer->tx_buf)
1425 					xfer->tx_buf = ctlr->dummy_tx;
1426 				if (!xfer->rx_buf)
1427 					xfer->rx_buf = ctlr->dummy_rx;
1428 			}
1429 		}
1430 	}
1431 
1432 	return __spi_map_msg(ctlr, msg);
1433 }
1434 
spi_transfer_wait(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer * xfer)1435 static int spi_transfer_wait(struct spi_controller *ctlr,
1436 			     struct spi_message *msg,
1437 			     struct spi_transfer *xfer)
1438 {
1439 	struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1440 	struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1441 	u32 speed_hz = xfer->speed_hz;
1442 	unsigned long long ms;
1443 
1444 	if (spi_controller_is_target(ctlr)) {
1445 		if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
1446 			dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
1447 			return -EINTR;
1448 		}
1449 	} else {
1450 		if (!speed_hz)
1451 			speed_hz = 100000;
1452 
1453 		/*
1454 		 * For each byte we wait for 8 cycles of the SPI clock.
1455 		 * Since speed is defined in Hz and we want milliseconds,
1456 		 * use respective multiplier, but before the division,
1457 		 * otherwise we may get 0 for short transfers.
1458 		 */
1459 		ms = 8LL * MSEC_PER_SEC * xfer->len;
1460 		do_div(ms, speed_hz);
1461 
1462 		/*
1463 		 * Increase it twice and add 200 ms tolerance, use
1464 		 * predefined maximum in case of overflow.
1465 		 */
1466 		ms += ms + 200;
1467 		if (ms > UINT_MAX)
1468 			ms = UINT_MAX;
1469 
1470 		ms = wait_for_completion_timeout(&ctlr->xfer_completion,
1471 						 msecs_to_jiffies(ms));
1472 
1473 		if (ms == 0) {
1474 			SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
1475 			SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
1476 			dev_err(&msg->spi->dev,
1477 				"SPI transfer timed out\n");
1478 			return -ETIMEDOUT;
1479 		}
1480 
1481 		if (xfer->error & SPI_TRANS_FAIL_IO)
1482 			return -EIO;
1483 	}
1484 
1485 	return 0;
1486 }
1487 
_spi_transfer_delay_ns(u32 ns)1488 static void _spi_transfer_delay_ns(u32 ns)
1489 {
1490 	if (!ns)
1491 		return;
1492 	if (ns <= NSEC_PER_USEC) {
1493 		ndelay(ns);
1494 	} else {
1495 		u32 us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
1496 
1497 		fsleep(us);
1498 	}
1499 }
1500 
spi_delay_to_ns(struct spi_delay * _delay,struct spi_transfer * xfer)1501 int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer)
1502 {
1503 	u32 delay = _delay->value;
1504 	u32 unit = _delay->unit;
1505 	u32 hz;
1506 
1507 	if (!delay)
1508 		return 0;
1509 
1510 	switch (unit) {
1511 	case SPI_DELAY_UNIT_USECS:
1512 		delay *= NSEC_PER_USEC;
1513 		break;
1514 	case SPI_DELAY_UNIT_NSECS:
1515 		/* Nothing to do here */
1516 		break;
1517 	case SPI_DELAY_UNIT_SCK:
1518 		/* Clock cycles need to be obtained from spi_transfer */
1519 		if (!xfer)
1520 			return -EINVAL;
1521 		/*
1522 		 * If there is unknown effective speed, approximate it
1523 		 * by underestimating with half of the requested Hz.
1524 		 */
1525 		hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1526 		if (!hz)
1527 			return -EINVAL;
1528 
1529 		/* Convert delay to nanoseconds */
1530 		delay *= DIV_ROUND_UP(NSEC_PER_SEC, hz);
1531 		break;
1532 	default:
1533 		return -EINVAL;
1534 	}
1535 
1536 	return delay;
1537 }
1538 EXPORT_SYMBOL_GPL(spi_delay_to_ns);
1539 
spi_delay_exec(struct spi_delay * _delay,struct spi_transfer * xfer)1540 int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer)
1541 {
1542 	int delay;
1543 
1544 	might_sleep();
1545 
1546 	if (!_delay)
1547 		return -EINVAL;
1548 
1549 	delay = spi_delay_to_ns(_delay, xfer);
1550 	if (delay < 0)
1551 		return delay;
1552 
1553 	_spi_transfer_delay_ns(delay);
1554 
1555 	return 0;
1556 }
1557 EXPORT_SYMBOL_GPL(spi_delay_exec);
1558 
_spi_transfer_cs_change_delay(struct spi_message * msg,struct spi_transfer * xfer)1559 static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1560 					  struct spi_transfer *xfer)
1561 {
1562 	u32 default_delay_ns = 10 * NSEC_PER_USEC;
1563 	u32 delay = xfer->cs_change_delay.value;
1564 	u32 unit = xfer->cs_change_delay.unit;
1565 	int ret;
1566 
1567 	/* Return early on "fast" mode - for everything but USECS */
1568 	if (!delay) {
1569 		if (unit == SPI_DELAY_UNIT_USECS)
1570 			_spi_transfer_delay_ns(default_delay_ns);
1571 		return;
1572 	}
1573 
1574 	ret = spi_delay_exec(&xfer->cs_change_delay, xfer);
1575 	if (ret) {
1576 		dev_err_once(&msg->spi->dev,
1577 			     "Use of unsupported delay unit %i, using default of %luus\n",
1578 			     unit, default_delay_ns / NSEC_PER_USEC);
1579 		_spi_transfer_delay_ns(default_delay_ns);
1580 	}
1581 }
1582 
spi_transfer_cs_change_delay_exec(struct spi_message * msg,struct spi_transfer * xfer)1583 void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
1584 						  struct spi_transfer *xfer)
1585 {
1586 	_spi_transfer_cs_change_delay(msg, xfer);
1587 }
1588 EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec);
1589 
1590 /*
1591  * spi_transfer_one_message - Default implementation of transfer_one_message()
1592  *
1593  * This is a standard implementation of transfer_one_message() for
1594  * drivers which implement a transfer_one() operation.  It provides
1595  * standard handling of delays and chip select management.
1596  */
spi_transfer_one_message(struct spi_controller * ctlr,struct spi_message * msg)1597 static int spi_transfer_one_message(struct spi_controller *ctlr,
1598 				    struct spi_message *msg)
1599 {
1600 	struct spi_transfer *xfer;
1601 	bool keep_cs = false;
1602 	int ret = 0;
1603 	struct spi_statistics __percpu *statm = ctlr->pcpu_statistics;
1604 	struct spi_statistics __percpu *stats = msg->spi->pcpu_statistics;
1605 
1606 	xfer = list_first_entry(&msg->transfers, struct spi_transfer, transfer_list);
1607 	spi_set_cs(msg->spi, !xfer->cs_off, false);
1608 
1609 	SPI_STATISTICS_INCREMENT_FIELD(statm, messages);
1610 	SPI_STATISTICS_INCREMENT_FIELD(stats, messages);
1611 
1612 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1613 		trace_spi_transfer_start(msg, xfer);
1614 
1615 		spi_statistics_add_transfer_stats(statm, xfer, msg);
1616 		spi_statistics_add_transfer_stats(stats, xfer, msg);
1617 
1618 		if (!ctlr->ptp_sts_supported) {
1619 			xfer->ptp_sts_word_pre = 0;
1620 			ptp_read_system_prets(xfer->ptp_sts);
1621 		}
1622 
1623 		if ((xfer->tx_buf || xfer->rx_buf) && xfer->len) {
1624 			reinit_completion(&ctlr->xfer_completion);
1625 
1626 fallback_pio:
1627 			spi_dma_sync_for_device(ctlr, xfer);
1628 			ret = ctlr->transfer_one(ctlr, msg->spi, xfer);
1629 			if (ret < 0) {
1630 				spi_dma_sync_for_cpu(ctlr, xfer);
1631 
1632 				if ((xfer->tx_sg_mapped || xfer->rx_sg_mapped) &&
1633 				    (xfer->error & SPI_TRANS_FAIL_NO_START)) {
1634 					__spi_unmap_msg(ctlr, msg);
1635 					ctlr->fallback = true;
1636 					xfer->error &= ~SPI_TRANS_FAIL_NO_START;
1637 					goto fallback_pio;
1638 				}
1639 
1640 				SPI_STATISTICS_INCREMENT_FIELD(statm,
1641 							       errors);
1642 				SPI_STATISTICS_INCREMENT_FIELD(stats,
1643 							       errors);
1644 				dev_err(&msg->spi->dev,
1645 					"SPI transfer failed: %d\n", ret);
1646 				goto out;
1647 			}
1648 
1649 			if (ret > 0) {
1650 				ret = spi_transfer_wait(ctlr, msg, xfer);
1651 				if (ret < 0)
1652 					msg->status = ret;
1653 			}
1654 
1655 			spi_dma_sync_for_cpu(ctlr, xfer);
1656 		} else {
1657 			if (xfer->len)
1658 				dev_err(&msg->spi->dev,
1659 					"Bufferless transfer has length %u\n",
1660 					xfer->len);
1661 		}
1662 
1663 		if (!ctlr->ptp_sts_supported) {
1664 			ptp_read_system_postts(xfer->ptp_sts);
1665 			xfer->ptp_sts_word_post = xfer->len;
1666 		}
1667 
1668 		trace_spi_transfer_stop(msg, xfer);
1669 
1670 		if (msg->status != -EINPROGRESS)
1671 			goto out;
1672 
1673 		spi_transfer_delay_exec(xfer);
1674 
1675 		if (xfer->cs_change) {
1676 			if (list_is_last(&xfer->transfer_list,
1677 					 &msg->transfers)) {
1678 				keep_cs = true;
1679 			} else {
1680 				if (!xfer->cs_off)
1681 					spi_set_cs(msg->spi, false, false);
1682 				_spi_transfer_cs_change_delay(msg, xfer);
1683 				if (!list_next_entry(xfer, transfer_list)->cs_off)
1684 					spi_set_cs(msg->spi, true, false);
1685 			}
1686 		} else if (!list_is_last(&xfer->transfer_list, &msg->transfers) &&
1687 			   xfer->cs_off != list_next_entry(xfer, transfer_list)->cs_off) {
1688 			spi_set_cs(msg->spi, xfer->cs_off, false);
1689 		}
1690 
1691 		msg->actual_length += xfer->len;
1692 	}
1693 
1694 out:
1695 	if (ret != 0 || !keep_cs)
1696 		spi_set_cs(msg->spi, false, false);
1697 
1698 	if (msg->status == -EINPROGRESS)
1699 		msg->status = ret;
1700 
1701 	if (msg->status && ctlr->handle_err)
1702 		ctlr->handle_err(ctlr, msg);
1703 
1704 	spi_finalize_current_message(ctlr);
1705 
1706 	return ret;
1707 }
1708 
1709 /**
1710  * spi_finalize_current_transfer - report completion of a transfer
1711  * @ctlr: the controller reporting completion
1712  *
1713  * Called by SPI drivers using the core transfer_one_message()
1714  * implementation to notify it that the current interrupt driven
1715  * transfer has finished and the next one may be scheduled.
1716  */
spi_finalize_current_transfer(struct spi_controller * ctlr)1717 void spi_finalize_current_transfer(struct spi_controller *ctlr)
1718 {
1719 	complete(&ctlr->xfer_completion);
1720 }
1721 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1722 
spi_idle_runtime_pm(struct spi_controller * ctlr)1723 static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1724 {
1725 	if (ctlr->auto_runtime_pm) {
1726 		pm_runtime_put_autosuspend(ctlr->dev.parent);
1727 	}
1728 }
1729 
__spi_pump_transfer_message(struct spi_controller * ctlr,struct spi_message * msg,bool was_busy)1730 static int __spi_pump_transfer_message(struct spi_controller *ctlr,
1731 		struct spi_message *msg, bool was_busy)
1732 {
1733 	struct spi_transfer *xfer;
1734 	int ret;
1735 
1736 	if (!was_busy && ctlr->auto_runtime_pm) {
1737 		ret = pm_runtime_get_sync(ctlr->dev.parent);
1738 		if (ret < 0) {
1739 			pm_runtime_put_noidle(ctlr->dev.parent);
1740 			dev_err(&ctlr->dev, "Failed to power device: %d\n",
1741 				ret);
1742 
1743 			msg->status = ret;
1744 			spi_finalize_current_message(ctlr);
1745 
1746 			return ret;
1747 		}
1748 	}
1749 
1750 	if (!was_busy)
1751 		trace_spi_controller_busy(ctlr);
1752 
1753 	if (!was_busy && ctlr->prepare_transfer_hardware) {
1754 		ret = ctlr->prepare_transfer_hardware(ctlr);
1755 		if (ret) {
1756 			dev_err(&ctlr->dev,
1757 				"failed to prepare transfer hardware: %d\n",
1758 				ret);
1759 
1760 			if (ctlr->auto_runtime_pm)
1761 				pm_runtime_put(ctlr->dev.parent);
1762 
1763 			msg->status = ret;
1764 			spi_finalize_current_message(ctlr);
1765 
1766 			return ret;
1767 		}
1768 	}
1769 
1770 	trace_spi_message_start(msg);
1771 
1772 	if (ctlr->prepare_message) {
1773 		ret = ctlr->prepare_message(ctlr, msg);
1774 		if (ret) {
1775 			dev_err(&ctlr->dev, "failed to prepare message: %d\n",
1776 				ret);
1777 			msg->status = ret;
1778 			spi_finalize_current_message(ctlr);
1779 			return ret;
1780 		}
1781 		msg->prepared = true;
1782 	}
1783 
1784 	ret = spi_map_msg(ctlr, msg);
1785 	if (ret) {
1786 		msg->status = ret;
1787 		spi_finalize_current_message(ctlr);
1788 		return ret;
1789 	}
1790 
1791 	if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
1792 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
1793 			xfer->ptp_sts_word_pre = 0;
1794 			ptp_read_system_prets(xfer->ptp_sts);
1795 		}
1796 	}
1797 
1798 	/*
1799 	 * Drivers implementation of transfer_one_message() must arrange for
1800 	 * spi_finalize_current_message() to get called. Most drivers will do
1801 	 * this in the calling context, but some don't. For those cases, a
1802 	 * completion is used to guarantee that this function does not return
1803 	 * until spi_finalize_current_message() is done accessing
1804 	 * ctlr->cur_msg.
1805 	 * Use of the following two flags enable to opportunistically skip the
1806 	 * use of the completion since its use involves expensive spin locks.
1807 	 * In case of a race with the context that calls
1808 	 * spi_finalize_current_message() the completion will always be used,
1809 	 * due to strict ordering of these flags using barriers.
1810 	 */
1811 	WRITE_ONCE(ctlr->cur_msg_incomplete, true);
1812 	WRITE_ONCE(ctlr->cur_msg_need_completion, false);
1813 	reinit_completion(&ctlr->cur_msg_completion);
1814 	smp_wmb(); /* Make these available to spi_finalize_current_message() */
1815 
1816 	ret = ctlr->transfer_one_message(ctlr, msg);
1817 	if (ret) {
1818 		dev_err(&ctlr->dev,
1819 			"failed to transfer one message from queue\n");
1820 		return ret;
1821 	}
1822 
1823 	WRITE_ONCE(ctlr->cur_msg_need_completion, true);
1824 	smp_mb(); /* See spi_finalize_current_message()... */
1825 	if (READ_ONCE(ctlr->cur_msg_incomplete))
1826 		wait_for_completion(&ctlr->cur_msg_completion);
1827 
1828 	return 0;
1829 }
1830 
1831 /**
1832  * __spi_pump_messages - function which processes SPI message queue
1833  * @ctlr: controller to process queue for
1834  * @in_kthread: true if we are in the context of the message pump thread
1835  *
1836  * This function checks if there is any SPI message in the queue that
1837  * needs processing and if so call out to the driver to initialize hardware
1838  * and transfer each message.
1839  *
1840  * Note that it is called both from the kthread itself and also from
1841  * inside spi_sync(); the queue extraction handling at the top of the
1842  * function should deal with this safely.
1843  */
__spi_pump_messages(struct spi_controller * ctlr,bool in_kthread)1844 static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1845 {
1846 	struct spi_message *msg;
1847 	bool was_busy = false;
1848 	unsigned long flags;
1849 	int ret;
1850 
1851 	/* Take the I/O mutex */
1852 	mutex_lock(&ctlr->io_mutex);
1853 
1854 	/* Lock queue */
1855 	spin_lock_irqsave(&ctlr->queue_lock, flags);
1856 
1857 	/* Make sure we are not already running a message */
1858 	if (ctlr->cur_msg)
1859 		goto out_unlock;
1860 
1861 	/* Check if the queue is idle */
1862 	if (list_empty(&ctlr->queue) || !ctlr->running) {
1863 		if (!ctlr->busy)
1864 			goto out_unlock;
1865 
1866 		/* Defer any non-atomic teardown to the thread */
1867 		if (!in_kthread) {
1868 			if (!ctlr->dummy_rx && !ctlr->dummy_tx &&
1869 			    !ctlr->unprepare_transfer_hardware) {
1870 				spi_idle_runtime_pm(ctlr);
1871 				ctlr->busy = false;
1872 				ctlr->queue_empty = true;
1873 				trace_spi_controller_idle(ctlr);
1874 			} else {
1875 				kthread_queue_work(ctlr->kworker,
1876 						   &ctlr->pump_messages);
1877 			}
1878 			goto out_unlock;
1879 		}
1880 
1881 		ctlr->busy = false;
1882 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1883 
1884 		kfree(ctlr->dummy_rx);
1885 		ctlr->dummy_rx = NULL;
1886 		kfree(ctlr->dummy_tx);
1887 		ctlr->dummy_tx = NULL;
1888 		if (ctlr->unprepare_transfer_hardware &&
1889 		    ctlr->unprepare_transfer_hardware(ctlr))
1890 			dev_err(&ctlr->dev,
1891 				"failed to unprepare transfer hardware\n");
1892 		spi_idle_runtime_pm(ctlr);
1893 		trace_spi_controller_idle(ctlr);
1894 
1895 		spin_lock_irqsave(&ctlr->queue_lock, flags);
1896 		ctlr->queue_empty = true;
1897 		goto out_unlock;
1898 	}
1899 
1900 	/* Extract head of queue */
1901 	msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
1902 	ctlr->cur_msg = msg;
1903 
1904 	list_del_init(&msg->queue);
1905 	if (ctlr->busy)
1906 		was_busy = true;
1907 	else
1908 		ctlr->busy = true;
1909 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1910 
1911 	ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
1912 	kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
1913 
1914 	ctlr->cur_msg = NULL;
1915 	ctlr->fallback = false;
1916 
1917 	mutex_unlock(&ctlr->io_mutex);
1918 
1919 	/* Prod the scheduler in case transfer_one() was busy waiting */
1920 	if (!ret)
1921 		cond_resched();
1922 	return;
1923 
1924 out_unlock:
1925 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
1926 	mutex_unlock(&ctlr->io_mutex);
1927 }
1928 
1929 /**
1930  * spi_pump_messages - kthread work function which processes spi message queue
1931  * @work: pointer to kthread work struct contained in the controller struct
1932  */
spi_pump_messages(struct kthread_work * work)1933 static void spi_pump_messages(struct kthread_work *work)
1934 {
1935 	struct spi_controller *ctlr =
1936 		container_of(work, struct spi_controller, pump_messages);
1937 
1938 	__spi_pump_messages(ctlr, true);
1939 }
1940 
1941 /**
1942  * spi_take_timestamp_pre - helper to collect the beginning of the TX timestamp
1943  * @ctlr: Pointer to the spi_controller structure of the driver
1944  * @xfer: Pointer to the transfer being timestamped
1945  * @progress: How many words (not bytes) have been transferred so far
1946  * @irqs_off: If true, will disable IRQs and preemption for the duration of the
1947  *	      transfer, for less jitter in time measurement. Only compatible
1948  *	      with PIO drivers. If true, must follow up with
1949  *	      spi_take_timestamp_post or otherwise system will crash.
1950  *	      WARNING: for fully predictable results, the CPU frequency must
1951  *	      also be under control (governor).
1952  *
1953  * This is a helper for drivers to collect the beginning of the TX timestamp
1954  * for the requested byte from the SPI transfer. The frequency with which this
1955  * function must be called (once per word, once for the whole transfer, once
1956  * per batch of words etc) is arbitrary as long as the @tx buffer offset is
1957  * greater than or equal to the requested byte at the time of the call. The
1958  * timestamp is only taken once, at the first such call. It is assumed that
1959  * the driver advances its @tx buffer pointer monotonically.
1960  */
spi_take_timestamp_pre(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1961 void spi_take_timestamp_pre(struct spi_controller *ctlr,
1962 			    struct spi_transfer *xfer,
1963 			    size_t progress, bool irqs_off)
1964 {
1965 	if (!xfer->ptp_sts)
1966 		return;
1967 
1968 	if (xfer->timestamped)
1969 		return;
1970 
1971 	if (progress > xfer->ptp_sts_word_pre)
1972 		return;
1973 
1974 	/* Capture the resolution of the timestamp */
1975 	xfer->ptp_sts_word_pre = progress;
1976 
1977 	if (irqs_off) {
1978 		local_irq_save(ctlr->irq_flags);
1979 		preempt_disable();
1980 	}
1981 
1982 	ptp_read_system_prets(xfer->ptp_sts);
1983 }
1984 EXPORT_SYMBOL_GPL(spi_take_timestamp_pre);
1985 
1986 /**
1987  * spi_take_timestamp_post - helper to collect the end of the TX timestamp
1988  * @ctlr: Pointer to the spi_controller structure of the driver
1989  * @xfer: Pointer to the transfer being timestamped
1990  * @progress: How many words (not bytes) have been transferred so far
1991  * @irqs_off: If true, will re-enable IRQs and preemption for the local CPU.
1992  *
1993  * This is a helper for drivers to collect the end of the TX timestamp for
1994  * the requested byte from the SPI transfer. Can be called with an arbitrary
1995  * frequency: only the first call where @tx exceeds or is equal to the
1996  * requested word will be timestamped.
1997  */
spi_take_timestamp_post(struct spi_controller * ctlr,struct spi_transfer * xfer,size_t progress,bool irqs_off)1998 void spi_take_timestamp_post(struct spi_controller *ctlr,
1999 			     struct spi_transfer *xfer,
2000 			     size_t progress, bool irqs_off)
2001 {
2002 	if (!xfer->ptp_sts)
2003 		return;
2004 
2005 	if (xfer->timestamped)
2006 		return;
2007 
2008 	if (progress < xfer->ptp_sts_word_post)
2009 		return;
2010 
2011 	ptp_read_system_postts(xfer->ptp_sts);
2012 
2013 	if (irqs_off) {
2014 		local_irq_restore(ctlr->irq_flags);
2015 		preempt_enable();
2016 	}
2017 
2018 	/* Capture the resolution of the timestamp */
2019 	xfer->ptp_sts_word_post = progress;
2020 
2021 	xfer->timestamped = 1;
2022 }
2023 EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
2024 
2025 /**
2026  * spi_set_thread_rt - set the controller to pump at realtime priority
2027  * @ctlr: controller to boost priority of
2028  *
2029  * This can be called because the controller requested realtime priority
2030  * (by setting the ->rt value before calling spi_register_controller()) or
2031  * because a device on the bus said that its transfers needed realtime
2032  * priority.
2033  *
2034  * NOTE: at the moment if any device on a bus says it needs realtime then
2035  * the thread will be at realtime priority for all transfers on that
2036  * controller.  If this eventually becomes a problem we may see if we can
2037  * find a way to boost the priority only temporarily during relevant
2038  * transfers.
2039  */
spi_set_thread_rt(struct spi_controller * ctlr)2040 static void spi_set_thread_rt(struct spi_controller *ctlr)
2041 {
2042 	dev_info(&ctlr->dev,
2043 		"will run message pump with realtime priority\n");
2044 	sched_set_fifo(ctlr->kworker->task);
2045 }
2046 
spi_init_queue(struct spi_controller * ctlr)2047 static int spi_init_queue(struct spi_controller *ctlr)
2048 {
2049 	ctlr->running = false;
2050 	ctlr->busy = false;
2051 	ctlr->queue_empty = true;
2052 
2053 	ctlr->kworker = kthread_run_worker(0, dev_name(&ctlr->dev));
2054 	if (IS_ERR(ctlr->kworker)) {
2055 		dev_err(&ctlr->dev, "failed to create message pump kworker\n");
2056 		return PTR_ERR(ctlr->kworker);
2057 	}
2058 
2059 	kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
2060 
2061 	/*
2062 	 * Controller config will indicate if this controller should run the
2063 	 * message pump with high (realtime) priority to reduce the transfer
2064 	 * latency on the bus by minimising the delay between a transfer
2065 	 * request and the scheduling of the message pump thread. Without this
2066 	 * setting the message pump thread will remain at default priority.
2067 	 */
2068 	if (ctlr->rt)
2069 		spi_set_thread_rt(ctlr);
2070 
2071 	return 0;
2072 }
2073 
2074 /**
2075  * spi_get_next_queued_message() - called by driver to check for queued
2076  * messages
2077  * @ctlr: the controller to check for queued messages
2078  *
2079  * If there are more messages in the queue, the next message is returned from
2080  * this call.
2081  *
2082  * Return: the next message in the queue, else NULL if the queue is empty.
2083  */
spi_get_next_queued_message(struct spi_controller * ctlr)2084 struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
2085 {
2086 	struct spi_message *next;
2087 	unsigned long flags;
2088 
2089 	/* Get a pointer to the next message, if any */
2090 	spin_lock_irqsave(&ctlr->queue_lock, flags);
2091 	next = list_first_entry_or_null(&ctlr->queue, struct spi_message,
2092 					queue);
2093 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2094 
2095 	return next;
2096 }
2097 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
2098 
2099 /*
2100  * __spi_unoptimize_message - shared implementation of spi_unoptimize_message()
2101  *                            and spi_maybe_unoptimize_message()
2102  * @msg: the message to unoptimize
2103  *
2104  * Peripheral drivers should use spi_unoptimize_message() and callers inside
2105  * core should use spi_maybe_unoptimize_message() rather than calling this
2106  * function directly.
2107  *
2108  * It is not valid to call this on a message that is not currently optimized.
2109  */
__spi_unoptimize_message(struct spi_message * msg)2110 static void __spi_unoptimize_message(struct spi_message *msg)
2111 {
2112 	struct spi_controller *ctlr = msg->spi->controller;
2113 
2114 	if (ctlr->unoptimize_message)
2115 		ctlr->unoptimize_message(msg);
2116 
2117 	spi_res_release(ctlr, msg);
2118 
2119 	msg->optimized = false;
2120 	msg->opt_state = NULL;
2121 }
2122 
2123 /*
2124  * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral
2125  * @msg: the message to unoptimize
2126  *
2127  * This function is used to unoptimize a message if and only if it was
2128  * optimized by the core (via spi_maybe_optimize_message()).
2129  */
spi_maybe_unoptimize_message(struct spi_message * msg)2130 static void spi_maybe_unoptimize_message(struct spi_message *msg)
2131 {
2132 	if (!msg->pre_optimized && msg->optimized &&
2133 	    !msg->spi->controller->defer_optimize_message)
2134 		__spi_unoptimize_message(msg);
2135 }
2136 
2137 /**
2138  * spi_finalize_current_message() - the current message is complete
2139  * @ctlr: the controller to return the message to
2140  *
2141  * Called by the driver to notify the core that the message in the front of the
2142  * queue is complete and can be removed from the queue.
2143  */
spi_finalize_current_message(struct spi_controller * ctlr)2144 void spi_finalize_current_message(struct spi_controller *ctlr)
2145 {
2146 	struct spi_transfer *xfer;
2147 	struct spi_message *mesg;
2148 	int ret;
2149 
2150 	mesg = ctlr->cur_msg;
2151 
2152 	if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) {
2153 		list_for_each_entry(xfer, &mesg->transfers, transfer_list) {
2154 			ptp_read_system_postts(xfer->ptp_sts);
2155 			xfer->ptp_sts_word_post = xfer->len;
2156 		}
2157 	}
2158 
2159 	if (unlikely(ctlr->ptp_sts_supported))
2160 		list_for_each_entry(xfer, &mesg->transfers, transfer_list)
2161 			WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped);
2162 
2163 	spi_unmap_msg(ctlr, mesg);
2164 
2165 	if (mesg->prepared && ctlr->unprepare_message) {
2166 		ret = ctlr->unprepare_message(ctlr, mesg);
2167 		if (ret) {
2168 			dev_err(&ctlr->dev, "failed to unprepare message: %d\n",
2169 				ret);
2170 		}
2171 	}
2172 
2173 	mesg->prepared = false;
2174 
2175 	spi_maybe_unoptimize_message(mesg);
2176 
2177 	WRITE_ONCE(ctlr->cur_msg_incomplete, false);
2178 	smp_mb(); /* See __spi_pump_transfer_message()... */
2179 	if (READ_ONCE(ctlr->cur_msg_need_completion))
2180 		complete(&ctlr->cur_msg_completion);
2181 
2182 	trace_spi_message_done(mesg);
2183 
2184 	mesg->state = NULL;
2185 	if (mesg->complete)
2186 		mesg->complete(mesg->context);
2187 }
2188 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
2189 
spi_start_queue(struct spi_controller * ctlr)2190 static int spi_start_queue(struct spi_controller *ctlr)
2191 {
2192 	unsigned long flags;
2193 
2194 	spin_lock_irqsave(&ctlr->queue_lock, flags);
2195 
2196 	if (ctlr->running || ctlr->busy) {
2197 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2198 		return -EBUSY;
2199 	}
2200 
2201 	ctlr->running = true;
2202 	ctlr->cur_msg = NULL;
2203 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2204 
2205 	kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2206 
2207 	return 0;
2208 }
2209 
spi_stop_queue(struct spi_controller * ctlr)2210 static int spi_stop_queue(struct spi_controller *ctlr)
2211 {
2212 	unsigned int limit = 500;
2213 	unsigned long flags;
2214 
2215 	/*
2216 	 * This is a bit lame, but is optimized for the common execution path.
2217 	 * A wait_queue on the ctlr->busy could be used, but then the common
2218 	 * execution path (pump_messages) would be required to call wake_up or
2219 	 * friends on every SPI message. Do this instead.
2220 	 */
2221 	do {
2222 		spin_lock_irqsave(&ctlr->queue_lock, flags);
2223 		if (list_empty(&ctlr->queue) && !ctlr->busy) {
2224 			ctlr->running = false;
2225 			spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2226 			return 0;
2227 		}
2228 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2229 		usleep_range(10000, 11000);
2230 	} while (--limit);
2231 
2232 	return -EBUSY;
2233 }
2234 
spi_destroy_queue(struct spi_controller * ctlr)2235 static int spi_destroy_queue(struct spi_controller *ctlr)
2236 {
2237 	int ret;
2238 
2239 	ret = spi_stop_queue(ctlr);
2240 
2241 	/*
2242 	 * kthread_flush_worker will block until all work is done.
2243 	 * If the reason that stop_queue timed out is that the work will never
2244 	 * finish, then it does no good to call flush/stop thread, so
2245 	 * return anyway.
2246 	 */
2247 	if (ret) {
2248 		dev_err(&ctlr->dev, "problem destroying queue\n");
2249 		return ret;
2250 	}
2251 
2252 	kthread_destroy_worker(ctlr->kworker);
2253 
2254 	return 0;
2255 }
2256 
__spi_queued_transfer(struct spi_device * spi,struct spi_message * msg,bool need_pump)2257 static int __spi_queued_transfer(struct spi_device *spi,
2258 				 struct spi_message *msg,
2259 				 bool need_pump)
2260 {
2261 	struct spi_controller *ctlr = spi->controller;
2262 	unsigned long flags;
2263 
2264 	spin_lock_irqsave(&ctlr->queue_lock, flags);
2265 
2266 	if (!ctlr->running) {
2267 		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2268 		return -ESHUTDOWN;
2269 	}
2270 	msg->actual_length = 0;
2271 	msg->status = -EINPROGRESS;
2272 
2273 	list_add_tail(&msg->queue, &ctlr->queue);
2274 	ctlr->queue_empty = false;
2275 	if (!ctlr->busy && need_pump)
2276 		kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
2277 
2278 	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
2279 	return 0;
2280 }
2281 
2282 /**
2283  * spi_queued_transfer - transfer function for queued transfers
2284  * @spi: SPI device which is requesting transfer
2285  * @msg: SPI message which is to handled is queued to driver queue
2286  *
2287  * Return: zero on success, else a negative error code.
2288  */
spi_queued_transfer(struct spi_device * spi,struct spi_message * msg)2289 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
2290 {
2291 	return __spi_queued_transfer(spi, msg, true);
2292 }
2293 
spi_controller_initialize_queue(struct spi_controller * ctlr)2294 static int spi_controller_initialize_queue(struct spi_controller *ctlr)
2295 {
2296 	int ret;
2297 
2298 	ctlr->transfer = spi_queued_transfer;
2299 	if (!ctlr->transfer_one_message)
2300 		ctlr->transfer_one_message = spi_transfer_one_message;
2301 
2302 	/* Initialize and start queue */
2303 	ret = spi_init_queue(ctlr);
2304 	if (ret) {
2305 		dev_err(&ctlr->dev, "problem initializing queue\n");
2306 		goto err_init_queue;
2307 	}
2308 	ctlr->queued = true;
2309 	ret = spi_start_queue(ctlr);
2310 	if (ret) {
2311 		dev_err(&ctlr->dev, "problem starting queue\n");
2312 		goto err_start_queue;
2313 	}
2314 
2315 	return 0;
2316 
2317 err_start_queue:
2318 	spi_destroy_queue(ctlr);
2319 err_init_queue:
2320 	return ret;
2321 }
2322 
2323 /**
2324  * spi_flush_queue - Send all pending messages in the queue from the callers'
2325  *		     context
2326  * @ctlr: controller to process queue for
2327  *
2328  * This should be used when one wants to ensure all pending messages have been
2329  * sent before doing something. Is used by the spi-mem code to make sure SPI
2330  * memory operations do not preempt regular SPI transfers that have been queued
2331  * before the spi-mem operation.
2332  */
spi_flush_queue(struct spi_controller * ctlr)2333 void spi_flush_queue(struct spi_controller *ctlr)
2334 {
2335 	if (ctlr->transfer == spi_queued_transfer)
2336 		__spi_pump_messages(ctlr, false);
2337 }
2338 
2339 /*-------------------------------------------------------------------------*/
2340 
2341 #if defined(CONFIG_OF)
of_spi_parse_dt_cs_delay(struct device_node * nc,struct spi_delay * delay,const char * prop)2342 static void of_spi_parse_dt_cs_delay(struct device_node *nc,
2343 				     struct spi_delay *delay, const char *prop)
2344 {
2345 	u32 value;
2346 
2347 	if (!of_property_read_u32(nc, prop, &value)) {
2348 		if (value > U16_MAX) {
2349 			delay->value = DIV_ROUND_UP(value, 1000);
2350 			delay->unit = SPI_DELAY_UNIT_USECS;
2351 		} else {
2352 			delay->value = value;
2353 			delay->unit = SPI_DELAY_UNIT_NSECS;
2354 		}
2355 	}
2356 }
2357 
of_spi_parse_dt(struct spi_controller * ctlr,struct spi_device * spi,struct device_node * nc)2358 static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
2359 			   struct device_node *nc)
2360 {
2361 	u32 value, cs[SPI_CS_CNT_MAX];
2362 	int rc, idx;
2363 
2364 	/* Mode (clock phase/polarity/etc.) */
2365 	if (of_property_read_bool(nc, "spi-cpha"))
2366 		spi->mode |= SPI_CPHA;
2367 	if (of_property_read_bool(nc, "spi-cpol"))
2368 		spi->mode |= SPI_CPOL;
2369 	if (of_property_read_bool(nc, "spi-3wire"))
2370 		spi->mode |= SPI_3WIRE;
2371 	if (of_property_read_bool(nc, "spi-lsb-first"))
2372 		spi->mode |= SPI_LSB_FIRST;
2373 	if (of_property_read_bool(nc, "spi-cs-high"))
2374 		spi->mode |= SPI_CS_HIGH;
2375 
2376 	/* Device DUAL/QUAD mode */
2377 	if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
2378 		switch (value) {
2379 		case 0:
2380 			spi->mode |= SPI_NO_TX;
2381 			break;
2382 		case 1:
2383 			break;
2384 		case 2:
2385 			spi->mode |= SPI_TX_DUAL;
2386 			break;
2387 		case 4:
2388 			spi->mode |= SPI_TX_QUAD;
2389 			break;
2390 		case 8:
2391 			spi->mode |= SPI_TX_OCTAL;
2392 			break;
2393 		default:
2394 			dev_warn(&ctlr->dev,
2395 				"spi-tx-bus-width %d not supported\n",
2396 				value);
2397 			break;
2398 		}
2399 	}
2400 
2401 	if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
2402 		switch (value) {
2403 		case 0:
2404 			spi->mode |= SPI_NO_RX;
2405 			break;
2406 		case 1:
2407 			break;
2408 		case 2:
2409 			spi->mode |= SPI_RX_DUAL;
2410 			break;
2411 		case 4:
2412 			spi->mode |= SPI_RX_QUAD;
2413 			break;
2414 		case 8:
2415 			spi->mode |= SPI_RX_OCTAL;
2416 			break;
2417 		default:
2418 			dev_warn(&ctlr->dev,
2419 				"spi-rx-bus-width %d not supported\n",
2420 				value);
2421 			break;
2422 		}
2423 	}
2424 
2425 	if (spi_controller_is_target(ctlr)) {
2426 		if (!of_node_name_eq(nc, "slave")) {
2427 			dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
2428 				nc);
2429 			return -EINVAL;
2430 		}
2431 		return 0;
2432 	}
2433 
2434 	if (ctlr->num_chipselect > SPI_CS_CNT_MAX) {
2435 		dev_err(&ctlr->dev, "No. of CS is more than max. no. of supported CS\n");
2436 		return -EINVAL;
2437 	}
2438 
2439 	spi_set_all_cs_unused(spi);
2440 
2441 	/* Device address */
2442 	rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1,
2443 						 SPI_CS_CNT_MAX);
2444 	if (rc < 0) {
2445 		dev_err(&ctlr->dev, "%pOF has no valid 'reg' property (%d)\n",
2446 			nc, rc);
2447 		return rc;
2448 	}
2449 	if (rc > ctlr->num_chipselect) {
2450 		dev_err(&ctlr->dev, "%pOF has number of CS > ctlr->num_chipselect (%d)\n",
2451 			nc, rc);
2452 		return rc;
2453 	}
2454 	if ((of_property_present(nc, "parallel-memories")) &&
2455 	    (!(ctlr->flags & SPI_CONTROLLER_MULTI_CS))) {
2456 		dev_err(&ctlr->dev, "SPI controller doesn't support multi CS\n");
2457 		return -EINVAL;
2458 	}
2459 	for (idx = 0; idx < rc; idx++)
2460 		spi_set_chipselect(spi, idx, cs[idx]);
2461 
2462 	/*
2463 	 * By default spi->chip_select[0] will hold the physical CS number,
2464 	 * so set bit 0 in spi->cs_index_mask.
2465 	 */
2466 	spi->cs_index_mask = BIT(0);
2467 
2468 	/* Device speed */
2469 	if (!of_property_read_u32(nc, "spi-max-frequency", &value))
2470 		spi->max_speed_hz = value;
2471 
2472 	/* Device CS delays */
2473 	of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
2474 	of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
2475 	of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
2476 
2477 	return 0;
2478 }
2479 
2480 static struct spi_device *
of_register_spi_device(struct spi_controller * ctlr,struct device_node * nc)2481 of_register_spi_device(struct spi_controller *ctlr, struct device_node *nc)
2482 {
2483 	struct spi_device *spi;
2484 	int rc;
2485 
2486 	/* Alloc an spi_device */
2487 	spi = spi_alloc_device(ctlr);
2488 	if (!spi) {
2489 		dev_err(&ctlr->dev, "spi_device alloc error for %pOF\n", nc);
2490 		rc = -ENOMEM;
2491 		goto err_out;
2492 	}
2493 
2494 	/* Select device driver */
2495 	rc = of_alias_from_compatible(nc, spi->modalias,
2496 				      sizeof(spi->modalias));
2497 	if (rc < 0) {
2498 		dev_err(&ctlr->dev, "cannot find modalias for %pOF\n", nc);
2499 		goto err_out;
2500 	}
2501 
2502 	rc = of_spi_parse_dt(ctlr, spi, nc);
2503 	if (rc)
2504 		goto err_out;
2505 
2506 	/* Store a pointer to the node in the device structure */
2507 	of_node_get(nc);
2508 
2509 	device_set_node(&spi->dev, of_fwnode_handle(nc));
2510 
2511 	/* Register the new device */
2512 	rc = spi_add_device(spi);
2513 	if (rc) {
2514 		dev_err(&ctlr->dev, "spi_device register error %pOF\n", nc);
2515 		goto err_of_node_put;
2516 	}
2517 
2518 	return spi;
2519 
2520 err_of_node_put:
2521 	of_node_put(nc);
2522 err_out:
2523 	spi_dev_put(spi);
2524 	return ERR_PTR(rc);
2525 }
2526 
2527 /**
2528  * of_register_spi_devices() - Register child devices onto the SPI bus
2529  * @ctlr:	Pointer to spi_controller device
2530  *
2531  * Registers an spi_device for each child node of controller node which
2532  * represents a valid SPI target device.
2533  */
of_register_spi_devices(struct spi_controller * ctlr)2534 static void of_register_spi_devices(struct spi_controller *ctlr)
2535 {
2536 	struct spi_device *spi;
2537 	struct device_node *nc;
2538 
2539 	for_each_available_child_of_node(ctlr->dev.of_node, nc) {
2540 		if (of_node_test_and_set_flag(nc, OF_POPULATED))
2541 			continue;
2542 		spi = of_register_spi_device(ctlr, nc);
2543 		if (IS_ERR(spi)) {
2544 			dev_warn(&ctlr->dev,
2545 				 "Failed to create SPI device for %pOF\n", nc);
2546 			of_node_clear_flag(nc, OF_POPULATED);
2547 		}
2548 	}
2549 }
2550 #else
of_register_spi_devices(struct spi_controller * ctlr)2551 static void of_register_spi_devices(struct spi_controller *ctlr) { }
2552 #endif
2553 
2554 /**
2555  * spi_new_ancillary_device() - Register ancillary SPI device
2556  * @spi:         Pointer to the main SPI device registering the ancillary device
2557  * @chip_select: Chip Select of the ancillary device
2558  *
2559  * Register an ancillary SPI device; for example some chips have a chip-select
2560  * for normal device usage and another one for setup/firmware upload.
2561  *
2562  * This may only be called from main SPI device's probe routine.
2563  *
2564  * Return: 0 on success; negative errno on failure
2565  */
spi_new_ancillary_device(struct spi_device * spi,u8 chip_select)2566 struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
2567 					     u8 chip_select)
2568 {
2569 	struct spi_controller *ctlr = spi->controller;
2570 	struct spi_device *ancillary;
2571 	int rc;
2572 
2573 	/* Alloc an spi_device */
2574 	ancillary = spi_alloc_device(ctlr);
2575 	if (!ancillary) {
2576 		rc = -ENOMEM;
2577 		goto err_out;
2578 	}
2579 
2580 	strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
2581 
2582 	/* Use provided chip-select for ancillary device */
2583 	spi_set_all_cs_unused(ancillary);
2584 	spi_set_chipselect(ancillary, 0, chip_select);
2585 
2586 	/* Take over SPI mode/speed from SPI main device */
2587 	ancillary->max_speed_hz = spi->max_speed_hz;
2588 	ancillary->mode = spi->mode;
2589 	/*
2590 	 * By default spi->chip_select[0] will hold the physical CS number,
2591 	 * so set bit 0 in spi->cs_index_mask.
2592 	 */
2593 	ancillary->cs_index_mask = BIT(0);
2594 
2595 	WARN_ON(!mutex_is_locked(&ctlr->add_lock));
2596 
2597 	/* Register the new device */
2598 	rc = __spi_add_device(ancillary);
2599 	if (rc) {
2600 		dev_err(&spi->dev, "failed to register ancillary device\n");
2601 		goto err_out;
2602 	}
2603 
2604 	return ancillary;
2605 
2606 err_out:
2607 	spi_dev_put(ancillary);
2608 	return ERR_PTR(rc);
2609 }
2610 EXPORT_SYMBOL_GPL(spi_new_ancillary_device);
2611 
2612 #ifdef CONFIG_ACPI
2613 struct acpi_spi_lookup {
2614 	struct spi_controller 	*ctlr;
2615 	u32			max_speed_hz;
2616 	u32			mode;
2617 	int			irq;
2618 	u8			bits_per_word;
2619 	u8			chip_select;
2620 	int			n;
2621 	int			index;
2622 };
2623 
acpi_spi_count(struct acpi_resource * ares,void * data)2624 static int acpi_spi_count(struct acpi_resource *ares, void *data)
2625 {
2626 	struct acpi_resource_spi_serialbus *sb;
2627 	int *count = data;
2628 
2629 	if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
2630 		return 1;
2631 
2632 	sb = &ares->data.spi_serial_bus;
2633 	if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_SPI)
2634 		return 1;
2635 
2636 	*count = *count + 1;
2637 
2638 	return 1;
2639 }
2640 
2641 /**
2642  * acpi_spi_count_resources - Count the number of SpiSerialBus resources
2643  * @adev:	ACPI device
2644  *
2645  * Return: the number of SpiSerialBus resources in the ACPI-device's
2646  * resource-list; or a negative error code.
2647  */
acpi_spi_count_resources(struct acpi_device * adev)2648 int acpi_spi_count_resources(struct acpi_device *adev)
2649 {
2650 	LIST_HEAD(r);
2651 	int count = 0;
2652 	int ret;
2653 
2654 	ret = acpi_dev_get_resources(adev, &r, acpi_spi_count, &count);
2655 	if (ret < 0)
2656 		return ret;
2657 
2658 	acpi_dev_free_resource_list(&r);
2659 
2660 	return count;
2661 }
2662 EXPORT_SYMBOL_GPL(acpi_spi_count_resources);
2663 
acpi_spi_parse_apple_properties(struct acpi_device * dev,struct acpi_spi_lookup * lookup)2664 static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
2665 					    struct acpi_spi_lookup *lookup)
2666 {
2667 	const union acpi_object *obj;
2668 
2669 	if (!x86_apple_machine)
2670 		return;
2671 
2672 	if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
2673 	    && obj->buffer.length >= 4)
2674 		lookup->max_speed_hz  = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
2675 
2676 	if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
2677 	    && obj->buffer.length == 8)
2678 		lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
2679 
2680 	if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
2681 	    && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
2682 		lookup->mode |= SPI_LSB_FIRST;
2683 
2684 	if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
2685 	    && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2686 		lookup->mode |= SPI_CPOL;
2687 
2688 	if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
2689 	    && obj->buffer.length == 8 &&  *(u64 *)obj->buffer.pointer)
2690 		lookup->mode |= SPI_CPHA;
2691 }
2692 
acpi_spi_add_resource(struct acpi_resource * ares,void * data)2693 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
2694 {
2695 	struct acpi_spi_lookup *lookup = data;
2696 	struct spi_controller *ctlr = lookup->ctlr;
2697 
2698 	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
2699 		struct acpi_resource_spi_serialbus *sb;
2700 		acpi_handle parent_handle;
2701 		acpi_status status;
2702 
2703 		sb = &ares->data.spi_serial_bus;
2704 		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
2705 
2706 			if (lookup->index != -1 && lookup->n++ != lookup->index)
2707 				return 1;
2708 
2709 			status = acpi_get_handle(NULL,
2710 						 sb->resource_source.string_ptr,
2711 						 &parent_handle);
2712 
2713 			if (ACPI_FAILURE(status))
2714 				return -ENODEV;
2715 
2716 			if (ctlr) {
2717 				if (!device_match_acpi_handle(ctlr->dev.parent, parent_handle))
2718 					return -ENODEV;
2719 			} else {
2720 				struct acpi_device *adev;
2721 
2722 				adev = acpi_fetch_acpi_dev(parent_handle);
2723 				if (!adev)
2724 					return -ENODEV;
2725 
2726 				ctlr = acpi_spi_find_controller_by_adev(adev);
2727 				if (!ctlr)
2728 					return -EPROBE_DEFER;
2729 
2730 				lookup->ctlr = ctlr;
2731 			}
2732 
2733 			/*
2734 			 * ACPI DeviceSelection numbering is handled by the
2735 			 * host controller driver in Windows and can vary
2736 			 * from driver to driver. In Linux we always expect
2737 			 * 0 .. max - 1 so we need to ask the driver to
2738 			 * translate between the two schemes.
2739 			 */
2740 			if (ctlr->fw_translate_cs) {
2741 				int cs = ctlr->fw_translate_cs(ctlr,
2742 						sb->device_selection);
2743 				if (cs < 0)
2744 					return cs;
2745 				lookup->chip_select = cs;
2746 			} else {
2747 				lookup->chip_select = sb->device_selection;
2748 			}
2749 
2750 			lookup->max_speed_hz = sb->connection_speed;
2751 			lookup->bits_per_word = sb->data_bit_length;
2752 
2753 			if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
2754 				lookup->mode |= SPI_CPHA;
2755 			if (sb->clock_polarity == ACPI_SPI_START_HIGH)
2756 				lookup->mode |= SPI_CPOL;
2757 			if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
2758 				lookup->mode |= SPI_CS_HIGH;
2759 		}
2760 	} else if (lookup->irq < 0) {
2761 		struct resource r;
2762 
2763 		if (acpi_dev_resource_interrupt(ares, 0, &r))
2764 			lookup->irq = r.start;
2765 	}
2766 
2767 	/* Always tell the ACPI core to skip this resource */
2768 	return 1;
2769 }
2770 
2771 /**
2772  * acpi_spi_device_alloc - Allocate a spi device, and fill it in with ACPI information
2773  * @ctlr: controller to which the spi device belongs
2774  * @adev: ACPI Device for the spi device
2775  * @index: Index of the spi resource inside the ACPI Node
2776  *
2777  * This should be used to allocate a new SPI device from and ACPI Device node.
2778  * The caller is responsible for calling spi_add_device to register the SPI device.
2779  *
2780  * If ctlr is set to NULL, the Controller for the SPI device will be looked up
2781  * using the resource.
2782  * If index is set to -1, index is not used.
2783  * Note: If index is -1, ctlr must be set.
2784  *
2785  * Return: a pointer to the new device, or ERR_PTR on error.
2786  */
acpi_spi_device_alloc(struct spi_controller * ctlr,struct acpi_device * adev,int index)2787 struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
2788 					 struct acpi_device *adev,
2789 					 int index)
2790 {
2791 	acpi_handle parent_handle = NULL;
2792 	struct list_head resource_list;
2793 	struct acpi_spi_lookup lookup = {};
2794 	struct spi_device *spi;
2795 	int ret;
2796 
2797 	if (!ctlr && index == -1)
2798 		return ERR_PTR(-EINVAL);
2799 
2800 	lookup.ctlr		= ctlr;
2801 	lookup.irq		= -1;
2802 	lookup.index		= index;
2803 	lookup.n		= 0;
2804 
2805 	INIT_LIST_HEAD(&resource_list);
2806 	ret = acpi_dev_get_resources(adev, &resource_list,
2807 				     acpi_spi_add_resource, &lookup);
2808 	acpi_dev_free_resource_list(&resource_list);
2809 
2810 	if (ret < 0)
2811 		/* Found SPI in _CRS but it points to another controller */
2812 		return ERR_PTR(ret);
2813 
2814 	if (!lookup.max_speed_hz &&
2815 	    ACPI_SUCCESS(acpi_get_parent(adev->handle, &parent_handle)) &&
2816 	    device_match_acpi_handle(lookup.ctlr->dev.parent, parent_handle)) {
2817 		/* Apple does not use _CRS but nested devices for SPI target devices */
2818 		acpi_spi_parse_apple_properties(adev, &lookup);
2819 	}
2820 
2821 	if (!lookup.max_speed_hz)
2822 		return ERR_PTR(-ENODEV);
2823 
2824 	spi = spi_alloc_device(lookup.ctlr);
2825 	if (!spi) {
2826 		dev_err(&lookup.ctlr->dev, "failed to allocate SPI device for %s\n",
2827 			dev_name(&adev->dev));
2828 		return ERR_PTR(-ENOMEM);
2829 	}
2830 
2831 	spi_set_all_cs_unused(spi);
2832 	spi_set_chipselect(spi, 0, lookup.chip_select);
2833 
2834 	ACPI_COMPANION_SET(&spi->dev, adev);
2835 	spi->max_speed_hz	= lookup.max_speed_hz;
2836 	spi->mode		|= lookup.mode;
2837 	spi->irq		= lookup.irq;
2838 	spi->bits_per_word	= lookup.bits_per_word;
2839 	/*
2840 	 * By default spi->chip_select[0] will hold the physical CS number,
2841 	 * so set bit 0 in spi->cs_index_mask.
2842 	 */
2843 	spi->cs_index_mask	= BIT(0);
2844 
2845 	return spi;
2846 }
2847 EXPORT_SYMBOL_GPL(acpi_spi_device_alloc);
2848 
acpi_register_spi_device(struct spi_controller * ctlr,struct acpi_device * adev)2849 static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
2850 					    struct acpi_device *adev)
2851 {
2852 	struct spi_device *spi;
2853 
2854 	if (acpi_bus_get_status(adev) || !adev->status.present ||
2855 	    acpi_device_enumerated(adev))
2856 		return AE_OK;
2857 
2858 	spi = acpi_spi_device_alloc(ctlr, adev, -1);
2859 	if (IS_ERR(spi)) {
2860 		if (PTR_ERR(spi) == -ENOMEM)
2861 			return AE_NO_MEMORY;
2862 		else
2863 			return AE_OK;
2864 	}
2865 
2866 	acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
2867 			  sizeof(spi->modalias));
2868 
2869 	acpi_device_set_enumerated(adev);
2870 
2871 	adev->power.flags.ignore_parent = true;
2872 	if (spi_add_device(spi)) {
2873 		adev->power.flags.ignore_parent = false;
2874 		dev_err(&ctlr->dev, "failed to add SPI device %s from ACPI\n",
2875 			dev_name(&adev->dev));
2876 		spi_dev_put(spi);
2877 	}
2878 
2879 	return AE_OK;
2880 }
2881 
acpi_spi_add_device(acpi_handle handle,u32 level,void * data,void ** return_value)2882 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
2883 				       void *data, void **return_value)
2884 {
2885 	struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
2886 	struct spi_controller *ctlr = data;
2887 
2888 	if (!adev)
2889 		return AE_OK;
2890 
2891 	return acpi_register_spi_device(ctlr, adev);
2892 }
2893 
2894 #define SPI_ACPI_ENUMERATE_MAX_DEPTH		32
2895 
acpi_register_spi_devices(struct spi_controller * ctlr)2896 static void acpi_register_spi_devices(struct spi_controller *ctlr)
2897 {
2898 	acpi_status status;
2899 	acpi_handle handle;
2900 
2901 	handle = ACPI_HANDLE(ctlr->dev.parent);
2902 	if (!handle)
2903 		return;
2904 
2905 	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2906 				     SPI_ACPI_ENUMERATE_MAX_DEPTH,
2907 				     acpi_spi_add_device, NULL, ctlr, NULL);
2908 	if (ACPI_FAILURE(status))
2909 		dev_warn(&ctlr->dev, "failed to enumerate SPI target devices\n");
2910 }
2911 #else
acpi_register_spi_devices(struct spi_controller * ctlr)2912 static inline void acpi_register_spi_devices(struct spi_controller *ctlr) {}
2913 #endif /* CONFIG_ACPI */
2914 
spi_controller_release(struct device * dev)2915 static void spi_controller_release(struct device *dev)
2916 {
2917 	struct spi_controller *ctlr;
2918 
2919 	ctlr = container_of(dev, struct spi_controller, dev);
2920 	kfree(ctlr);
2921 }
2922 
2923 static const struct class spi_controller_class = {
2924 	.name		= "spi_master",
2925 	.dev_release	= spi_controller_release,
2926 	.dev_groups	= spi_controller_groups,
2927 };
2928 
2929 #ifdef CONFIG_SPI_SLAVE
2930 /**
2931  * spi_target_abort - abort the ongoing transfer request on an SPI target controller
2932  * @spi: device used for the current transfer
2933  */
spi_target_abort(struct spi_device * spi)2934 int spi_target_abort(struct spi_device *spi)
2935 {
2936 	struct spi_controller *ctlr = spi->controller;
2937 
2938 	if (spi_controller_is_target(ctlr) && ctlr->target_abort)
2939 		return ctlr->target_abort(ctlr);
2940 
2941 	return -ENOTSUPP;
2942 }
2943 EXPORT_SYMBOL_GPL(spi_target_abort);
2944 
slave_show(struct device * dev,struct device_attribute * attr,char * buf)2945 static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
2946 			  char *buf)
2947 {
2948 	struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2949 						   dev);
2950 	struct device *child;
2951 	int ret;
2952 
2953 	child = device_find_any_child(&ctlr->dev);
2954 	ret = sysfs_emit(buf, "%s\n", child ? to_spi_device(child)->modalias : NULL);
2955 	put_device(child);
2956 
2957 	return ret;
2958 }
2959 
slave_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2960 static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
2961 			   const char *buf, size_t count)
2962 {
2963 	struct spi_controller *ctlr = container_of(dev, struct spi_controller,
2964 						   dev);
2965 	struct spi_device *spi;
2966 	struct device *child;
2967 	char name[32];
2968 	int rc;
2969 
2970 	rc = sscanf(buf, "%31s", name);
2971 	if (rc != 1 || !name[0])
2972 		return -EINVAL;
2973 
2974 	child = device_find_any_child(&ctlr->dev);
2975 	if (child) {
2976 		/* Remove registered target device */
2977 		device_unregister(child);
2978 		put_device(child);
2979 	}
2980 
2981 	if (strcmp(name, "(null)")) {
2982 		/* Register new target device */
2983 		spi = spi_alloc_device(ctlr);
2984 		if (!spi)
2985 			return -ENOMEM;
2986 
2987 		strscpy(spi->modalias, name, sizeof(spi->modalias));
2988 
2989 		rc = spi_add_device(spi);
2990 		if (rc) {
2991 			spi_dev_put(spi);
2992 			return rc;
2993 		}
2994 	}
2995 
2996 	return count;
2997 }
2998 
2999 static DEVICE_ATTR_RW(slave);
3000 
3001 static struct attribute *spi_target_attrs[] = {
3002 	&dev_attr_slave.attr,
3003 	NULL,
3004 };
3005 
3006 static const struct attribute_group spi_target_group = {
3007 	.attrs = spi_target_attrs,
3008 };
3009 
3010 static const struct attribute_group *spi_target_groups[] = {
3011 	&spi_controller_statistics_group,
3012 	&spi_target_group,
3013 	NULL,
3014 };
3015 
3016 static const struct class spi_target_class = {
3017 	.name		= "spi_slave",
3018 	.dev_release	= spi_controller_release,
3019 	.dev_groups	= spi_target_groups,
3020 };
3021 #else
3022 extern struct class spi_target_class;	/* dummy */
3023 #endif
3024 
3025 /**
3026  * __spi_alloc_controller - allocate an SPI host or target controller
3027  * @dev: the controller, possibly using the platform_bus
3028  * @size: how much zeroed driver-private data to allocate; the pointer to this
3029  *	memory is in the driver_data field of the returned device, accessible
3030  *	with spi_controller_get_devdata(); the memory is cacheline aligned;
3031  *	drivers granting DMA access to portions of their private data need to
3032  *	round up @size using ALIGN(size, dma_get_cache_alignment()).
3033  * @target: flag indicating whether to allocate an SPI host (false) or SPI target (true)
3034  *	controller
3035  * Context: can sleep
3036  *
3037  * This call is used only by SPI controller drivers, which are the
3038  * only ones directly touching chip registers.  It's how they allocate
3039  * an spi_controller structure, prior to calling spi_register_controller().
3040  *
3041  * This must be called from context that can sleep.
3042  *
3043  * The caller is responsible for assigning the bus number and initializing the
3044  * controller's methods before calling spi_register_controller(); and (after
3045  * errors adding the device) calling spi_controller_put() to prevent a memory
3046  * leak.
3047  *
3048  * Return: the SPI controller structure on success, else NULL.
3049  */
__spi_alloc_controller(struct device * dev,unsigned int size,bool target)3050 struct spi_controller *__spi_alloc_controller(struct device *dev,
3051 					      unsigned int size, bool target)
3052 {
3053 	struct spi_controller	*ctlr;
3054 	size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
3055 
3056 	if (!dev)
3057 		return NULL;
3058 
3059 	ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
3060 	if (!ctlr)
3061 		return NULL;
3062 
3063 	device_initialize(&ctlr->dev);
3064 	INIT_LIST_HEAD(&ctlr->queue);
3065 	spin_lock_init(&ctlr->queue_lock);
3066 	spin_lock_init(&ctlr->bus_lock_spinlock);
3067 	mutex_init(&ctlr->bus_lock_mutex);
3068 	mutex_init(&ctlr->io_mutex);
3069 	mutex_init(&ctlr->add_lock);
3070 	ctlr->bus_num = -1;
3071 	ctlr->num_chipselect = 1;
3072 	ctlr->target = target;
3073 	if (IS_ENABLED(CONFIG_SPI_SLAVE) && target)
3074 		ctlr->dev.class = &spi_target_class;
3075 	else
3076 		ctlr->dev.class = &spi_controller_class;
3077 	ctlr->dev.parent = dev;
3078 	pm_suspend_ignore_children(&ctlr->dev, true);
3079 	spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
3080 
3081 	return ctlr;
3082 }
3083 EXPORT_SYMBOL_GPL(__spi_alloc_controller);
3084 
devm_spi_release_controller(struct device * dev,void * ctlr)3085 static void devm_spi_release_controller(struct device *dev, void *ctlr)
3086 {
3087 	spi_controller_put(*(struct spi_controller **)ctlr);
3088 }
3089 
3090 /**
3091  * __devm_spi_alloc_controller - resource-managed __spi_alloc_controller()
3092  * @dev: physical device of SPI controller
3093  * @size: how much zeroed driver-private data to allocate
3094  * @target: whether to allocate an SPI host (false) or SPI target (true) controller
3095  * Context: can sleep
3096  *
3097  * Allocate an SPI controller and automatically release a reference on it
3098  * when @dev is unbound from its driver.  Drivers are thus relieved from
3099  * having to call spi_controller_put().
3100  *
3101  * The arguments to this function are identical to __spi_alloc_controller().
3102  *
3103  * Return: the SPI controller structure on success, else NULL.
3104  */
__devm_spi_alloc_controller(struct device * dev,unsigned int size,bool target)3105 struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
3106 						   unsigned int size,
3107 						   bool target)
3108 {
3109 	struct spi_controller **ptr, *ctlr;
3110 
3111 	ptr = devres_alloc(devm_spi_release_controller, sizeof(*ptr),
3112 			   GFP_KERNEL);
3113 	if (!ptr)
3114 		return NULL;
3115 
3116 	ctlr = __spi_alloc_controller(dev, size, target);
3117 	if (ctlr) {
3118 		ctlr->devm_allocated = true;
3119 		*ptr = ctlr;
3120 		devres_add(dev, ptr);
3121 	} else {
3122 		devres_free(ptr);
3123 	}
3124 
3125 	return ctlr;
3126 }
3127 EXPORT_SYMBOL_GPL(__devm_spi_alloc_controller);
3128 
3129 /**
3130  * spi_get_gpio_descs() - grab chip select GPIOs for the controller
3131  * @ctlr: The SPI controller to grab GPIO descriptors for
3132  */
spi_get_gpio_descs(struct spi_controller * ctlr)3133 static int spi_get_gpio_descs(struct spi_controller *ctlr)
3134 {
3135 	int nb, i;
3136 	struct gpio_desc **cs;
3137 	struct device *dev = &ctlr->dev;
3138 	unsigned long native_cs_mask = 0;
3139 	unsigned int num_cs_gpios = 0;
3140 
3141 	nb = gpiod_count(dev, "cs");
3142 	if (nb < 0) {
3143 		/* No GPIOs at all is fine, else return the error */
3144 		if (nb == -ENOENT)
3145 			return 0;
3146 		return nb;
3147 	}
3148 
3149 	ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
3150 
3151 	cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
3152 			  GFP_KERNEL);
3153 	if (!cs)
3154 		return -ENOMEM;
3155 	ctlr->cs_gpiods = cs;
3156 
3157 	for (i = 0; i < nb; i++) {
3158 		/*
3159 		 * Most chipselects are active low, the inverted
3160 		 * semantics are handled by special quirks in gpiolib,
3161 		 * so initializing them GPIOD_OUT_LOW here means
3162 		 * "unasserted", in most cases this will drive the physical
3163 		 * line high.
3164 		 */
3165 		cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
3166 						      GPIOD_OUT_LOW);
3167 		if (IS_ERR(cs[i]))
3168 			return PTR_ERR(cs[i]);
3169 
3170 		if (cs[i]) {
3171 			/*
3172 			 * If we find a CS GPIO, name it after the device and
3173 			 * chip select line.
3174 			 */
3175 			char *gpioname;
3176 
3177 			gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
3178 						  dev_name(dev), i);
3179 			if (!gpioname)
3180 				return -ENOMEM;
3181 			gpiod_set_consumer_name(cs[i], gpioname);
3182 			num_cs_gpios++;
3183 			continue;
3184 		}
3185 
3186 		if (ctlr->max_native_cs && i >= ctlr->max_native_cs) {
3187 			dev_err(dev, "Invalid native chip select %d\n", i);
3188 			return -EINVAL;
3189 		}
3190 		native_cs_mask |= BIT(i);
3191 	}
3192 
3193 	ctlr->unused_native_cs = ffs(~native_cs_mask) - 1;
3194 
3195 	if ((ctlr->flags & SPI_CONTROLLER_GPIO_SS) && num_cs_gpios &&
3196 	    ctlr->max_native_cs && ctlr->unused_native_cs >= ctlr->max_native_cs) {
3197 		dev_err(dev, "No unused native chip select available\n");
3198 		return -EINVAL;
3199 	}
3200 
3201 	return 0;
3202 }
3203 
spi_controller_check_ops(struct spi_controller * ctlr)3204 static int spi_controller_check_ops(struct spi_controller *ctlr)
3205 {
3206 	/*
3207 	 * The controller may implement only the high-level SPI-memory like
3208 	 * operations if it does not support regular SPI transfers, and this is
3209 	 * valid use case.
3210 	 * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least
3211 	 * one of the ->transfer_xxx() method be implemented.
3212 	 */
3213 	if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
3214 		if (!ctlr->transfer && !ctlr->transfer_one &&
3215 		   !ctlr->transfer_one_message) {
3216 			return -EINVAL;
3217 		}
3218 	}
3219 
3220 	return 0;
3221 }
3222 
3223 /* Allocate dynamic bus number using Linux idr */
spi_controller_id_alloc(struct spi_controller * ctlr,int start,int end)3224 static int spi_controller_id_alloc(struct spi_controller *ctlr, int start, int end)
3225 {
3226 	int id;
3227 
3228 	mutex_lock(&board_lock);
3229 	id = idr_alloc(&spi_controller_idr, ctlr, start, end, GFP_KERNEL);
3230 	mutex_unlock(&board_lock);
3231 	if (WARN(id < 0, "couldn't get idr"))
3232 		return id == -ENOSPC ? -EBUSY : id;
3233 	ctlr->bus_num = id;
3234 	return 0;
3235 }
3236 
3237 /**
3238  * spi_register_controller - register SPI host or target controller
3239  * @ctlr: initialized controller, originally from spi_alloc_host() or
3240  *	spi_alloc_target()
3241  * Context: can sleep
3242  *
3243  * SPI controllers connect to their drivers using some non-SPI bus,
3244  * such as the platform bus.  The final stage of probe() in that code
3245  * includes calling spi_register_controller() to hook up to this SPI bus glue.
3246  *
3247  * SPI controllers use board specific (often SOC specific) bus numbers,
3248  * and board-specific addressing for SPI devices combines those numbers
3249  * with chip select numbers.  Since SPI does not directly support dynamic
3250  * device identification, boards need configuration tables telling which
3251  * chip is at which address.
3252  *
3253  * This must be called from context that can sleep.  It returns zero on
3254  * success, else a negative error code (dropping the controller's refcount).
3255  * After a successful return, the caller is responsible for calling
3256  * spi_unregister_controller().
3257  *
3258  * Return: zero on success, else a negative error code.
3259  */
spi_register_controller(struct spi_controller * ctlr)3260 int spi_register_controller(struct spi_controller *ctlr)
3261 {
3262 	struct device		*dev = ctlr->dev.parent;
3263 	struct boardinfo	*bi;
3264 	int			first_dynamic;
3265 	int			status;
3266 	int			idx;
3267 
3268 	if (!dev)
3269 		return -ENODEV;
3270 
3271 	/*
3272 	 * Make sure all necessary hooks are implemented before registering
3273 	 * the SPI controller.
3274 	 */
3275 	status = spi_controller_check_ops(ctlr);
3276 	if (status)
3277 		return status;
3278 
3279 	if (ctlr->bus_num < 0)
3280 		ctlr->bus_num = of_alias_get_id(ctlr->dev.of_node, "spi");
3281 	if (ctlr->bus_num >= 0) {
3282 		/* Devices with a fixed bus num must check-in with the num */
3283 		status = spi_controller_id_alloc(ctlr, ctlr->bus_num, ctlr->bus_num + 1);
3284 		if (status)
3285 			return status;
3286 	}
3287 	if (ctlr->bus_num < 0) {
3288 		first_dynamic = of_alias_get_highest_id("spi");
3289 		if (first_dynamic < 0)
3290 			first_dynamic = 0;
3291 		else
3292 			first_dynamic++;
3293 
3294 		status = spi_controller_id_alloc(ctlr, first_dynamic, 0);
3295 		if (status)
3296 			return status;
3297 	}
3298 	ctlr->bus_lock_flag = 0;
3299 	init_completion(&ctlr->xfer_completion);
3300 	init_completion(&ctlr->cur_msg_completion);
3301 	if (!ctlr->max_dma_len)
3302 		ctlr->max_dma_len = INT_MAX;
3303 
3304 	/*
3305 	 * Register the device, then userspace will see it.
3306 	 * Registration fails if the bus ID is in use.
3307 	 */
3308 	dev_set_name(&ctlr->dev, "spi%u", ctlr->bus_num);
3309 
3310 	if (!spi_controller_is_target(ctlr) && ctlr->use_gpio_descriptors) {
3311 		status = spi_get_gpio_descs(ctlr);
3312 		if (status)
3313 			goto free_bus_id;
3314 		/*
3315 		 * A controller using GPIO descriptors always
3316 		 * supports SPI_CS_HIGH if need be.
3317 		 */
3318 		ctlr->mode_bits |= SPI_CS_HIGH;
3319 	}
3320 
3321 	/*
3322 	 * Even if it's just one always-selected device, there must
3323 	 * be at least one chipselect.
3324 	 */
3325 	if (!ctlr->num_chipselect) {
3326 		status = -EINVAL;
3327 		goto free_bus_id;
3328 	}
3329 
3330 	/* Setting last_cs to SPI_INVALID_CS means no chip selected */
3331 	for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
3332 		ctlr->last_cs[idx] = SPI_INVALID_CS;
3333 
3334 	status = device_add(&ctlr->dev);
3335 	if (status < 0)
3336 		goto free_bus_id;
3337 	dev_dbg(dev, "registered %s %s\n",
3338 			spi_controller_is_target(ctlr) ? "target" : "host",
3339 			dev_name(&ctlr->dev));
3340 
3341 	/*
3342 	 * If we're using a queued driver, start the queue. Note that we don't
3343 	 * need the queueing logic if the driver is only supporting high-level
3344 	 * memory operations.
3345 	 */
3346 	if (ctlr->transfer) {
3347 		dev_info(dev, "controller is unqueued, this is deprecated\n");
3348 	} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
3349 		status = spi_controller_initialize_queue(ctlr);
3350 		if (status) {
3351 			device_del(&ctlr->dev);
3352 			goto free_bus_id;
3353 		}
3354 	}
3355 	/* Add statistics */
3356 	ctlr->pcpu_statistics = spi_alloc_pcpu_stats(dev);
3357 	if (!ctlr->pcpu_statistics) {
3358 		dev_err(dev, "Error allocating per-cpu statistics\n");
3359 		status = -ENOMEM;
3360 		goto destroy_queue;
3361 	}
3362 
3363 	mutex_lock(&board_lock);
3364 	list_add_tail(&ctlr->list, &spi_controller_list);
3365 	list_for_each_entry(bi, &board_list, list)
3366 		spi_match_controller_to_boardinfo(ctlr, &bi->board_info);
3367 	mutex_unlock(&board_lock);
3368 
3369 	/* Register devices from the device tree and ACPI */
3370 	of_register_spi_devices(ctlr);
3371 	acpi_register_spi_devices(ctlr);
3372 	return status;
3373 
3374 destroy_queue:
3375 	spi_destroy_queue(ctlr);
3376 free_bus_id:
3377 	mutex_lock(&board_lock);
3378 	idr_remove(&spi_controller_idr, ctlr->bus_num);
3379 	mutex_unlock(&board_lock);
3380 	return status;
3381 }
3382 EXPORT_SYMBOL_GPL(spi_register_controller);
3383 
devm_spi_unregister(struct device * dev,void * res)3384 static void devm_spi_unregister(struct device *dev, void *res)
3385 {
3386 	spi_unregister_controller(*(struct spi_controller **)res);
3387 }
3388 
3389 /**
3390  * devm_spi_register_controller - register managed SPI host or target controller
3391  * @dev:    device managing SPI controller
3392  * @ctlr: initialized controller, originally from spi_alloc_host() or
3393  *	spi_alloc_target()
3394  * Context: can sleep
3395  *
3396  * Register a SPI device as with spi_register_controller() which will
3397  * automatically be unregistered and freed.
3398  *
3399  * Return: zero on success, else a negative error code.
3400  */
devm_spi_register_controller(struct device * dev,struct spi_controller * ctlr)3401 int devm_spi_register_controller(struct device *dev,
3402 				 struct spi_controller *ctlr)
3403 {
3404 	struct spi_controller **ptr;
3405 	int ret;
3406 
3407 	ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
3408 	if (!ptr)
3409 		return -ENOMEM;
3410 
3411 	ret = spi_register_controller(ctlr);
3412 	if (!ret) {
3413 		*ptr = ctlr;
3414 		devres_add(dev, ptr);
3415 	} else {
3416 		devres_free(ptr);
3417 	}
3418 
3419 	return ret;
3420 }
3421 EXPORT_SYMBOL_GPL(devm_spi_register_controller);
3422 
__unregister(struct device * dev,void * null)3423 static int __unregister(struct device *dev, void *null)
3424 {
3425 	spi_unregister_device(to_spi_device(dev));
3426 	return 0;
3427 }
3428 
3429 /**
3430  * spi_unregister_controller - unregister SPI host or target controller
3431  * @ctlr: the controller being unregistered
3432  * Context: can sleep
3433  *
3434  * This call is used only by SPI controller drivers, which are the
3435  * only ones directly touching chip registers.
3436  *
3437  * This must be called from context that can sleep.
3438  *
3439  * Note that this function also drops a reference to the controller.
3440  */
spi_unregister_controller(struct spi_controller * ctlr)3441 void spi_unregister_controller(struct spi_controller *ctlr)
3442 {
3443 	struct spi_controller *found;
3444 	int id = ctlr->bus_num;
3445 
3446 	/* Prevent addition of new devices, unregister existing ones */
3447 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3448 		mutex_lock(&ctlr->add_lock);
3449 
3450 	device_for_each_child(&ctlr->dev, NULL, __unregister);
3451 
3452 	/* First make sure that this controller was ever added */
3453 	mutex_lock(&board_lock);
3454 	found = idr_find(&spi_controller_idr, id);
3455 	mutex_unlock(&board_lock);
3456 	if (ctlr->queued) {
3457 		if (spi_destroy_queue(ctlr))
3458 			dev_err(&ctlr->dev, "queue remove failed\n");
3459 	}
3460 	mutex_lock(&board_lock);
3461 	list_del(&ctlr->list);
3462 	mutex_unlock(&board_lock);
3463 
3464 	device_del(&ctlr->dev);
3465 
3466 	/* Free bus id */
3467 	mutex_lock(&board_lock);
3468 	if (found == ctlr)
3469 		idr_remove(&spi_controller_idr, id);
3470 	mutex_unlock(&board_lock);
3471 
3472 	if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
3473 		mutex_unlock(&ctlr->add_lock);
3474 
3475 	/*
3476 	 * Release the last reference on the controller if its driver
3477 	 * has not yet been converted to devm_spi_alloc_host/target().
3478 	 */
3479 	if (!ctlr->devm_allocated)
3480 		put_device(&ctlr->dev);
3481 }
3482 EXPORT_SYMBOL_GPL(spi_unregister_controller);
3483 
__spi_check_suspended(const struct spi_controller * ctlr)3484 static inline int __spi_check_suspended(const struct spi_controller *ctlr)
3485 {
3486 	return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
3487 }
3488 
__spi_mark_suspended(struct spi_controller * ctlr)3489 static inline void __spi_mark_suspended(struct spi_controller *ctlr)
3490 {
3491 	mutex_lock(&ctlr->bus_lock_mutex);
3492 	ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
3493 	mutex_unlock(&ctlr->bus_lock_mutex);
3494 }
3495 
__spi_mark_resumed(struct spi_controller * ctlr)3496 static inline void __spi_mark_resumed(struct spi_controller *ctlr)
3497 {
3498 	mutex_lock(&ctlr->bus_lock_mutex);
3499 	ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
3500 	mutex_unlock(&ctlr->bus_lock_mutex);
3501 }
3502 
spi_controller_suspend(struct spi_controller * ctlr)3503 int spi_controller_suspend(struct spi_controller *ctlr)
3504 {
3505 	int ret = 0;
3506 
3507 	/* Basically no-ops for non-queued controllers */
3508 	if (ctlr->queued) {
3509 		ret = spi_stop_queue(ctlr);
3510 		if (ret)
3511 			dev_err(&ctlr->dev, "queue stop failed\n");
3512 	}
3513 
3514 	__spi_mark_suspended(ctlr);
3515 	return ret;
3516 }
3517 EXPORT_SYMBOL_GPL(spi_controller_suspend);
3518 
spi_controller_resume(struct spi_controller * ctlr)3519 int spi_controller_resume(struct spi_controller *ctlr)
3520 {
3521 	int ret = 0;
3522 
3523 	__spi_mark_resumed(ctlr);
3524 
3525 	if (ctlr->queued) {
3526 		ret = spi_start_queue(ctlr);
3527 		if (ret)
3528 			dev_err(&ctlr->dev, "queue restart failed\n");
3529 	}
3530 	return ret;
3531 }
3532 EXPORT_SYMBOL_GPL(spi_controller_resume);
3533 
3534 /*-------------------------------------------------------------------------*/
3535 
3536 /* Core methods for spi_message alterations */
3537 
__spi_replace_transfers_release(struct spi_controller * ctlr,struct spi_message * msg,void * res)3538 static void __spi_replace_transfers_release(struct spi_controller *ctlr,
3539 					    struct spi_message *msg,
3540 					    void *res)
3541 {
3542 	struct spi_replaced_transfers *rxfer = res;
3543 	size_t i;
3544 
3545 	/* Call extra callback if requested */
3546 	if (rxfer->release)
3547 		rxfer->release(ctlr, msg, res);
3548 
3549 	/* Insert replaced transfers back into the message */
3550 	list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
3551 
3552 	/* Remove the formerly inserted entries */
3553 	for (i = 0; i < rxfer->inserted; i++)
3554 		list_del(&rxfer->inserted_transfers[i].transfer_list);
3555 }
3556 
3557 /**
3558  * spi_replace_transfers - replace transfers with several transfers
3559  *                         and register change with spi_message.resources
3560  * @msg:           the spi_message we work upon
3561  * @xfer_first:    the first spi_transfer we want to replace
3562  * @remove:        number of transfers to remove
3563  * @insert:        the number of transfers we want to insert instead
3564  * @release:       extra release code necessary in some circumstances
3565  * @extradatasize: extra data to allocate (with alignment guarantees
3566  *                 of struct @spi_transfer)
3567  * @gfp:           gfp flags
3568  *
3569  * Returns: pointer to @spi_replaced_transfers,
3570  *          PTR_ERR(...) in case of errors.
3571  */
spi_replace_transfers(struct spi_message * msg,struct spi_transfer * xfer_first,size_t remove,size_t insert,spi_replaced_release_t release,size_t extradatasize,gfp_t gfp)3572 static struct spi_replaced_transfers *spi_replace_transfers(
3573 	struct spi_message *msg,
3574 	struct spi_transfer *xfer_first,
3575 	size_t remove,
3576 	size_t insert,
3577 	spi_replaced_release_t release,
3578 	size_t extradatasize,
3579 	gfp_t gfp)
3580 {
3581 	struct spi_replaced_transfers *rxfer;
3582 	struct spi_transfer *xfer;
3583 	size_t i;
3584 
3585 	/* Allocate the structure using spi_res */
3586 	rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
3587 			      struct_size(rxfer, inserted_transfers, insert)
3588 			      + extradatasize,
3589 			      gfp);
3590 	if (!rxfer)
3591 		return ERR_PTR(-ENOMEM);
3592 
3593 	/* The release code to invoke before running the generic release */
3594 	rxfer->release = release;
3595 
3596 	/* Assign extradata */
3597 	if (extradatasize)
3598 		rxfer->extradata =
3599 			&rxfer->inserted_transfers[insert];
3600 
3601 	/* Init the replaced_transfers list */
3602 	INIT_LIST_HEAD(&rxfer->replaced_transfers);
3603 
3604 	/*
3605 	 * Assign the list_entry after which we should reinsert
3606 	 * the @replaced_transfers - it may be spi_message.messages!
3607 	 */
3608 	rxfer->replaced_after = xfer_first->transfer_list.prev;
3609 
3610 	/* Remove the requested number of transfers */
3611 	for (i = 0; i < remove; i++) {
3612 		/*
3613 		 * If the entry after replaced_after it is msg->transfers
3614 		 * then we have been requested to remove more transfers
3615 		 * than are in the list.
3616 		 */
3617 		if (rxfer->replaced_after->next == &msg->transfers) {
3618 			dev_err(&msg->spi->dev,
3619 				"requested to remove more spi_transfers than are available\n");
3620 			/* Insert replaced transfers back into the message */
3621 			list_splice(&rxfer->replaced_transfers,
3622 				    rxfer->replaced_after);
3623 
3624 			/* Free the spi_replace_transfer structure... */
3625 			spi_res_free(rxfer);
3626 
3627 			/* ...and return with an error */
3628 			return ERR_PTR(-EINVAL);
3629 		}
3630 
3631 		/*
3632 		 * Remove the entry after replaced_after from list of
3633 		 * transfers and add it to list of replaced_transfers.
3634 		 */
3635 		list_move_tail(rxfer->replaced_after->next,
3636 			       &rxfer->replaced_transfers);
3637 	}
3638 
3639 	/*
3640 	 * Create copy of the given xfer with identical settings
3641 	 * based on the first transfer to get removed.
3642 	 */
3643 	for (i = 0; i < insert; i++) {
3644 		/* We need to run in reverse order */
3645 		xfer = &rxfer->inserted_transfers[insert - 1 - i];
3646 
3647 		/* Copy all spi_transfer data */
3648 		memcpy(xfer, xfer_first, sizeof(*xfer));
3649 
3650 		/* Add to list */
3651 		list_add(&xfer->transfer_list, rxfer->replaced_after);
3652 
3653 		/* Clear cs_change and delay for all but the last */
3654 		if (i) {
3655 			xfer->cs_change = false;
3656 			xfer->delay.value = 0;
3657 		}
3658 	}
3659 
3660 	/* Set up inserted... */
3661 	rxfer->inserted = insert;
3662 
3663 	/* ...and register it with spi_res/spi_message */
3664 	spi_res_add(msg, rxfer);
3665 
3666 	return rxfer;
3667 }
3668 
__spi_split_transfer_maxsize(struct spi_controller * ctlr,struct spi_message * msg,struct spi_transfer ** xferp,size_t maxsize)3669 static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
3670 					struct spi_message *msg,
3671 					struct spi_transfer **xferp,
3672 					size_t maxsize)
3673 {
3674 	struct spi_transfer *xfer = *xferp, *xfers;
3675 	struct spi_replaced_transfers *srt;
3676 	size_t offset;
3677 	size_t count, i;
3678 
3679 	/* Calculate how many we have to replace */
3680 	count = DIV_ROUND_UP(xfer->len, maxsize);
3681 
3682 	/* Create replacement */
3683 	srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, GFP_KERNEL);
3684 	if (IS_ERR(srt))
3685 		return PTR_ERR(srt);
3686 	xfers = srt->inserted_transfers;
3687 
3688 	/*
3689 	 * Now handle each of those newly inserted spi_transfers.
3690 	 * Note that the replacements spi_transfers all are preset
3691 	 * to the same values as *xferp, so tx_buf, rx_buf and len
3692 	 * are all identical (as well as most others)
3693 	 * so we just have to fix up len and the pointers.
3694 	 */
3695 
3696 	/*
3697 	 * The first transfer just needs the length modified, so we
3698 	 * run it outside the loop.
3699 	 */
3700 	xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
3701 
3702 	/* All the others need rx_buf/tx_buf also set */
3703 	for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
3704 		/* Update rx_buf, tx_buf and DMA */
3705 		if (xfers[i].rx_buf)
3706 			xfers[i].rx_buf += offset;
3707 		if (xfers[i].tx_buf)
3708 			xfers[i].tx_buf += offset;
3709 
3710 		/* Update length */
3711 		xfers[i].len = min(maxsize, xfers[i].len - offset);
3712 	}
3713 
3714 	/*
3715 	 * We set up xferp to the last entry we have inserted,
3716 	 * so that we skip those already split transfers.
3717 	 */
3718 	*xferp = &xfers[count - 1];
3719 
3720 	/* Increment statistics counters */
3721 	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics,
3722 				       transfers_split_maxsize);
3723 	SPI_STATISTICS_INCREMENT_FIELD(msg->spi->pcpu_statistics,
3724 				       transfers_split_maxsize);
3725 
3726 	return 0;
3727 }
3728 
3729 /**
3730  * spi_split_transfers_maxsize - split spi transfers into multiple transfers
3731  *                               when an individual transfer exceeds a
3732  *                               certain size
3733  * @ctlr:    the @spi_controller for this transfer
3734  * @msg:   the @spi_message to transform
3735  * @maxsize:  the maximum when to apply this
3736  *
3737  * This function allocates resources that are automatically freed during the
3738  * spi message unoptimize phase so this function should only be called from
3739  * optimize_message callbacks.
3740  *
3741  * Return: status of transformation
3742  */
spi_split_transfers_maxsize(struct spi_controller * ctlr,struct spi_message * msg,size_t maxsize)3743 int spi_split_transfers_maxsize(struct spi_controller *ctlr,
3744 				struct spi_message *msg,
3745 				size_t maxsize)
3746 {
3747 	struct spi_transfer *xfer;
3748 	int ret;
3749 
3750 	/*
3751 	 * Iterate over the transfer_list,
3752 	 * but note that xfer is advanced to the last transfer inserted
3753 	 * to avoid checking sizes again unnecessarily (also xfer does
3754 	 * potentially belong to a different list by the time the
3755 	 * replacement has happened).
3756 	 */
3757 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3758 		if (xfer->len > maxsize) {
3759 			ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3760 							   maxsize);
3761 			if (ret)
3762 				return ret;
3763 		}
3764 	}
3765 
3766 	return 0;
3767 }
3768 EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
3769 
3770 
3771 /**
3772  * spi_split_transfers_maxwords - split SPI transfers into multiple transfers
3773  *                                when an individual transfer exceeds a
3774  *                                certain number of SPI words
3775  * @ctlr:     the @spi_controller for this transfer
3776  * @msg:      the @spi_message to transform
3777  * @maxwords: the number of words to limit each transfer to
3778  *
3779  * This function allocates resources that are automatically freed during the
3780  * spi message unoptimize phase so this function should only be called from
3781  * optimize_message callbacks.
3782  *
3783  * Return: status of transformation
3784  */
spi_split_transfers_maxwords(struct spi_controller * ctlr,struct spi_message * msg,size_t maxwords)3785 int spi_split_transfers_maxwords(struct spi_controller *ctlr,
3786 				 struct spi_message *msg,
3787 				 size_t maxwords)
3788 {
3789 	struct spi_transfer *xfer;
3790 
3791 	/*
3792 	 * Iterate over the transfer_list,
3793 	 * but note that xfer is advanced to the last transfer inserted
3794 	 * to avoid checking sizes again unnecessarily (also xfer does
3795 	 * potentially belong to a different list by the time the
3796 	 * replacement has happened).
3797 	 */
3798 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3799 		size_t maxsize;
3800 		int ret;
3801 
3802 		maxsize = maxwords * spi_bpw_to_bytes(xfer->bits_per_word);
3803 		if (xfer->len > maxsize) {
3804 			ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
3805 							   maxsize);
3806 			if (ret)
3807 				return ret;
3808 		}
3809 	}
3810 
3811 	return 0;
3812 }
3813 EXPORT_SYMBOL_GPL(spi_split_transfers_maxwords);
3814 
3815 /*-------------------------------------------------------------------------*/
3816 
3817 /*
3818  * Core methods for SPI controller protocol drivers. Some of the
3819  * other core methods are currently defined as inline functions.
3820  */
3821 
__spi_validate_bits_per_word(struct spi_controller * ctlr,u8 bits_per_word)3822 static int __spi_validate_bits_per_word(struct spi_controller *ctlr,
3823 					u8 bits_per_word)
3824 {
3825 	if (ctlr->bits_per_word_mask) {
3826 		/* Only 32 bits fit in the mask */
3827 		if (bits_per_word > 32)
3828 			return -EINVAL;
3829 		if (!(ctlr->bits_per_word_mask & SPI_BPW_MASK(bits_per_word)))
3830 			return -EINVAL;
3831 	}
3832 
3833 	return 0;
3834 }
3835 
3836 /**
3837  * spi_set_cs_timing - configure CS setup, hold, and inactive delays
3838  * @spi: the device that requires specific CS timing configuration
3839  *
3840  * Return: zero on success, else a negative error code.
3841  */
spi_set_cs_timing(struct spi_device * spi)3842 static int spi_set_cs_timing(struct spi_device *spi)
3843 {
3844 	struct device *parent = spi->controller->dev.parent;
3845 	int status = 0;
3846 
3847 	if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
3848 		if (spi->controller->auto_runtime_pm) {
3849 			status = pm_runtime_get_sync(parent);
3850 			if (status < 0) {
3851 				pm_runtime_put_noidle(parent);
3852 				dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3853 					status);
3854 				return status;
3855 			}
3856 
3857 			status = spi->controller->set_cs_timing(spi);
3858 			pm_runtime_put_autosuspend(parent);
3859 		} else {
3860 			status = spi->controller->set_cs_timing(spi);
3861 		}
3862 	}
3863 	return status;
3864 }
3865 
3866 /**
3867  * spi_setup - setup SPI mode and clock rate
3868  * @spi: the device whose settings are being modified
3869  * Context: can sleep, and no requests are queued to the device
3870  *
3871  * SPI protocol drivers may need to update the transfer mode if the
3872  * device doesn't work with its default.  They may likewise need
3873  * to update clock rates or word sizes from initial values.  This function
3874  * changes those settings, and must be called from a context that can sleep.
3875  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
3876  * effect the next time the device is selected and data is transferred to
3877  * or from it.  When this function returns, the SPI device is deselected.
3878  *
3879  * Note that this call will fail if the protocol driver specifies an option
3880  * that the underlying controller or its driver does not support.  For
3881  * example, not all hardware supports wire transfers using nine bit words,
3882  * LSB-first wire encoding, or active-high chipselects.
3883  *
3884  * Return: zero on success, else a negative error code.
3885  */
spi_setup(struct spi_device * spi)3886 int spi_setup(struct spi_device *spi)
3887 {
3888 	unsigned	bad_bits, ugly_bits;
3889 	int		status;
3890 
3891 	/*
3892 	 * Check mode to prevent that any two of DUAL, QUAD and NO_MOSI/MISO
3893 	 * are set at the same time.
3894 	 */
3895 	if ((hweight_long(spi->mode &
3896 		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_NO_TX)) > 1) ||
3897 	    (hweight_long(spi->mode &
3898 		(SPI_RX_DUAL | SPI_RX_QUAD | SPI_NO_RX)) > 1)) {
3899 		dev_err(&spi->dev,
3900 		"setup: can not select any two of dual, quad and no-rx/tx at the same time\n");
3901 		return -EINVAL;
3902 	}
3903 	/* If it is SPI_3WIRE mode, DUAL and QUAD should be forbidden */
3904 	if ((spi->mode & SPI_3WIRE) && (spi->mode &
3905 		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3906 		 SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
3907 		return -EINVAL;
3908 	/* Check against conflicting MOSI idle configuration */
3909 	if ((spi->mode & SPI_MOSI_IDLE_LOW) && (spi->mode & SPI_MOSI_IDLE_HIGH)) {
3910 		dev_err(&spi->dev,
3911 			"setup: MOSI configured to idle low and high at the same time.\n");
3912 		return -EINVAL;
3913 	}
3914 	/*
3915 	 * Help drivers fail *cleanly* when they need options
3916 	 * that aren't supported with their current controller.
3917 	 * SPI_CS_WORD has a fallback software implementation,
3918 	 * so it is ignored here.
3919 	 */
3920 	bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD |
3921 				 SPI_NO_TX | SPI_NO_RX);
3922 	ugly_bits = bad_bits &
3923 		    (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
3924 		     SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
3925 	if (ugly_bits) {
3926 		dev_warn(&spi->dev,
3927 			 "setup: ignoring unsupported mode bits %x\n",
3928 			 ugly_bits);
3929 		spi->mode &= ~ugly_bits;
3930 		bad_bits &= ~ugly_bits;
3931 	}
3932 	if (bad_bits) {
3933 		dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
3934 			bad_bits);
3935 		return -EINVAL;
3936 	}
3937 
3938 	if (!spi->bits_per_word) {
3939 		spi->bits_per_word = 8;
3940 	} else {
3941 		/*
3942 		 * Some controllers may not support the default 8 bits-per-word
3943 		 * so only perform the check when this is explicitly provided.
3944 		 */
3945 		status = __spi_validate_bits_per_word(spi->controller,
3946 						      spi->bits_per_word);
3947 		if (status)
3948 			return status;
3949 	}
3950 
3951 	if (spi->controller->max_speed_hz &&
3952 	    (!spi->max_speed_hz ||
3953 	     spi->max_speed_hz > spi->controller->max_speed_hz))
3954 		spi->max_speed_hz = spi->controller->max_speed_hz;
3955 
3956 	mutex_lock(&spi->controller->io_mutex);
3957 
3958 	if (spi->controller->setup) {
3959 		status = spi->controller->setup(spi);
3960 		if (status) {
3961 			mutex_unlock(&spi->controller->io_mutex);
3962 			dev_err(&spi->controller->dev, "Failed to setup device: %d\n",
3963 				status);
3964 			return status;
3965 		}
3966 	}
3967 
3968 	status = spi_set_cs_timing(spi);
3969 	if (status) {
3970 		mutex_unlock(&spi->controller->io_mutex);
3971 		return status;
3972 	}
3973 
3974 	if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
3975 		status = pm_runtime_resume_and_get(spi->controller->dev.parent);
3976 		if (status < 0) {
3977 			mutex_unlock(&spi->controller->io_mutex);
3978 			dev_err(&spi->controller->dev, "Failed to power device: %d\n",
3979 				status);
3980 			return status;
3981 		}
3982 
3983 		/*
3984 		 * We do not want to return positive value from pm_runtime_get,
3985 		 * there are many instances of devices calling spi_setup() and
3986 		 * checking for a non-zero return value instead of a negative
3987 		 * return value.
3988 		 */
3989 		status = 0;
3990 
3991 		spi_set_cs(spi, false, true);
3992 		pm_runtime_put_autosuspend(spi->controller->dev.parent);
3993 	} else {
3994 		spi_set_cs(spi, false, true);
3995 	}
3996 
3997 	mutex_unlock(&spi->controller->io_mutex);
3998 
3999 	if (spi->rt && !spi->controller->rt) {
4000 		spi->controller->rt = true;
4001 		spi_set_thread_rt(spi->controller);
4002 	}
4003 
4004 	trace_spi_setup(spi, status);
4005 
4006 	dev_dbg(&spi->dev, "setup mode %lu, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
4007 			spi->mode & SPI_MODE_X_MASK,
4008 			(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
4009 			(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
4010 			(spi->mode & SPI_3WIRE) ? "3wire, " : "",
4011 			(spi->mode & SPI_LOOP) ? "loopback, " : "",
4012 			spi->bits_per_word, spi->max_speed_hz,
4013 			status);
4014 
4015 	return status;
4016 }
4017 EXPORT_SYMBOL_GPL(spi_setup);
4018 
_spi_xfer_word_delay_update(struct spi_transfer * xfer,struct spi_device * spi)4019 static int _spi_xfer_word_delay_update(struct spi_transfer *xfer,
4020 				       struct spi_device *spi)
4021 {
4022 	int delay1, delay2;
4023 
4024 	delay1 = spi_delay_to_ns(&xfer->word_delay, xfer);
4025 	if (delay1 < 0)
4026 		return delay1;
4027 
4028 	delay2 = spi_delay_to_ns(&spi->word_delay, xfer);
4029 	if (delay2 < 0)
4030 		return delay2;
4031 
4032 	if (delay1 < delay2)
4033 		memcpy(&xfer->word_delay, &spi->word_delay,
4034 		       sizeof(xfer->word_delay));
4035 
4036 	return 0;
4037 }
4038 
__spi_validate(struct spi_device * spi,struct spi_message * message)4039 static int __spi_validate(struct spi_device *spi, struct spi_message *message)
4040 {
4041 	struct spi_controller *ctlr = spi->controller;
4042 	struct spi_transfer *xfer;
4043 	int w_size;
4044 
4045 	if (list_empty(&message->transfers))
4046 		return -EINVAL;
4047 
4048 	message->spi = spi;
4049 
4050 	/*
4051 	 * Half-duplex links include original MicroWire, and ones with
4052 	 * only one data pin like SPI_3WIRE (switches direction) or where
4053 	 * either MOSI or MISO is missing.  They can also be caused by
4054 	 * software limitations.
4055 	 */
4056 	if ((ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) ||
4057 	    (spi->mode & SPI_3WIRE)) {
4058 		unsigned flags = ctlr->flags;
4059 
4060 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
4061 			if (xfer->rx_buf && xfer->tx_buf)
4062 				return -EINVAL;
4063 			if ((flags & SPI_CONTROLLER_NO_TX) && xfer->tx_buf)
4064 				return -EINVAL;
4065 			if ((flags & SPI_CONTROLLER_NO_RX) && xfer->rx_buf)
4066 				return -EINVAL;
4067 		}
4068 	}
4069 
4070 	/*
4071 	 * Set transfer bits_per_word and max speed as spi device default if
4072 	 * it is not set for this transfer.
4073 	 * Set transfer tx_nbits and rx_nbits as single transfer default
4074 	 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
4075 	 * Ensure transfer word_delay is at least as long as that required by
4076 	 * device itself.
4077 	 */
4078 	message->frame_length = 0;
4079 	list_for_each_entry(xfer, &message->transfers, transfer_list) {
4080 		xfer->effective_speed_hz = 0;
4081 		message->frame_length += xfer->len;
4082 		if (!xfer->bits_per_word)
4083 			xfer->bits_per_word = spi->bits_per_word;
4084 
4085 		if (!xfer->speed_hz)
4086 			xfer->speed_hz = spi->max_speed_hz;
4087 
4088 		if (ctlr->max_speed_hz && xfer->speed_hz > ctlr->max_speed_hz)
4089 			xfer->speed_hz = ctlr->max_speed_hz;
4090 
4091 		if (__spi_validate_bits_per_word(ctlr, xfer->bits_per_word))
4092 			return -EINVAL;
4093 
4094 		/* DDR mode is supported only if controller has dtr_caps=true.
4095 		 * default considered as SDR mode for SPI and QSPI controller.
4096 		 * Note: This is applicable only to QSPI controller.
4097 		 */
4098 		if (xfer->dtr_mode && !ctlr->dtr_caps)
4099 			return -EINVAL;
4100 
4101 		/*
4102 		 * SPI transfer length should be multiple of SPI word size
4103 		 * where SPI word size should be power-of-two multiple.
4104 		 */
4105 		if (xfer->bits_per_word <= 8)
4106 			w_size = 1;
4107 		else if (xfer->bits_per_word <= 16)
4108 			w_size = 2;
4109 		else
4110 			w_size = 4;
4111 
4112 		/* No partial transfers accepted */
4113 		if (xfer->len % w_size)
4114 			return -EINVAL;
4115 
4116 		if (xfer->speed_hz && ctlr->min_speed_hz &&
4117 		    xfer->speed_hz < ctlr->min_speed_hz)
4118 			return -EINVAL;
4119 
4120 		if (xfer->tx_buf && !xfer->tx_nbits)
4121 			xfer->tx_nbits = SPI_NBITS_SINGLE;
4122 		if (xfer->rx_buf && !xfer->rx_nbits)
4123 			xfer->rx_nbits = SPI_NBITS_SINGLE;
4124 		/*
4125 		 * Check transfer tx/rx_nbits:
4126 		 * 1. check the value matches one of single, dual and quad
4127 		 * 2. check tx/rx_nbits match the mode in spi_device
4128 		 */
4129 		if (xfer->tx_buf) {
4130 			if (spi->mode & SPI_NO_TX)
4131 				return -EINVAL;
4132 			if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
4133 				xfer->tx_nbits != SPI_NBITS_DUAL &&
4134 				xfer->tx_nbits != SPI_NBITS_QUAD &&
4135 				xfer->tx_nbits != SPI_NBITS_OCTAL)
4136 				return -EINVAL;
4137 			if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
4138 				!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL)))
4139 				return -EINVAL;
4140 			if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
4141 				!(spi->mode & (SPI_TX_QUAD | SPI_TX_OCTAL)))
4142 				return -EINVAL;
4143 			if ((xfer->tx_nbits == SPI_NBITS_OCTAL) &&
4144 				!(spi->mode & SPI_TX_OCTAL))
4145 				return -EINVAL;
4146 		}
4147 		/* Check transfer rx_nbits */
4148 		if (xfer->rx_buf) {
4149 			if (spi->mode & SPI_NO_RX)
4150 				return -EINVAL;
4151 			if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
4152 				xfer->rx_nbits != SPI_NBITS_DUAL &&
4153 				xfer->rx_nbits != SPI_NBITS_QUAD &&
4154 				xfer->rx_nbits != SPI_NBITS_OCTAL)
4155 				return -EINVAL;
4156 			if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
4157 				!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
4158 				return -EINVAL;
4159 			if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
4160 				!(spi->mode & (SPI_RX_QUAD | SPI_RX_OCTAL)))
4161 				return -EINVAL;
4162 			if ((xfer->rx_nbits == SPI_NBITS_OCTAL) &&
4163 				!(spi->mode & SPI_RX_OCTAL))
4164 				return -EINVAL;
4165 		}
4166 
4167 		if (_spi_xfer_word_delay_update(xfer, spi))
4168 			return -EINVAL;
4169 
4170 		/* Make sure controller supports required offload features. */
4171 		if (xfer->offload_flags) {
4172 			if (!message->offload)
4173 				return -EINVAL;
4174 
4175 			if (xfer->offload_flags & ~message->offload->xfer_flags)
4176 				return -EINVAL;
4177 		}
4178 	}
4179 
4180 	message->status = -EINPROGRESS;
4181 
4182 	return 0;
4183 }
4184 
4185 /*
4186  * spi_split_transfers - generic handling of transfer splitting
4187  * @msg: the message to split
4188  *
4189  * Under certain conditions, a SPI controller may not support arbitrary
4190  * transfer sizes or other features required by a peripheral. This function
4191  * will split the transfers in the message into smaller transfers that are
4192  * supported by the controller.
4193  *
4194  * Controllers with special requirements not covered here can also split
4195  * transfers in the optimize_message() callback.
4196  *
4197  * Context: can sleep
4198  * Return: zero on success, else a negative error code
4199  */
spi_split_transfers(struct spi_message * msg)4200 static int spi_split_transfers(struct spi_message *msg)
4201 {
4202 	struct spi_controller *ctlr = msg->spi->controller;
4203 	struct spi_transfer *xfer;
4204 	int ret;
4205 
4206 	/*
4207 	 * If an SPI controller does not support toggling the CS line on each
4208 	 * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
4209 	 * for the CS line, we can emulate the CS-per-word hardware function by
4210 	 * splitting transfers into one-word transfers and ensuring that
4211 	 * cs_change is set for each transfer.
4212 	 */
4213 	if ((msg->spi->mode & SPI_CS_WORD) &&
4214 	    (!(ctlr->mode_bits & SPI_CS_WORD) || spi_is_csgpiod(msg->spi))) {
4215 		ret = spi_split_transfers_maxwords(ctlr, msg, 1);
4216 		if (ret)
4217 			return ret;
4218 
4219 		list_for_each_entry(xfer, &msg->transfers, transfer_list) {
4220 			/* Don't change cs_change on the last entry in the list */
4221 			if (list_is_last(&xfer->transfer_list, &msg->transfers))
4222 				break;
4223 
4224 			xfer->cs_change = 1;
4225 		}
4226 	} else {
4227 		ret = spi_split_transfers_maxsize(ctlr, msg,
4228 						  spi_max_transfer_size(msg->spi));
4229 		if (ret)
4230 			return ret;
4231 	}
4232 
4233 	return 0;
4234 }
4235 
4236 /*
4237  * __spi_optimize_message - shared implementation for spi_optimize_message()
4238  *                          and spi_maybe_optimize_message()
4239  * @spi: the device that will be used for the message
4240  * @msg: the message to optimize
4241  *
4242  * Peripheral drivers will call spi_optimize_message() and the spi core will
4243  * call spi_maybe_optimize_message() instead of calling this directly.
4244  *
4245  * It is not valid to call this on a message that has already been optimized.
4246  *
4247  * Return: zero on success, else a negative error code
4248  */
__spi_optimize_message(struct spi_device * spi,struct spi_message * msg)4249 static int __spi_optimize_message(struct spi_device *spi,
4250 				  struct spi_message *msg)
4251 {
4252 	struct spi_controller *ctlr = spi->controller;
4253 	int ret;
4254 
4255 	ret = __spi_validate(spi, msg);
4256 	if (ret)
4257 		return ret;
4258 
4259 	ret = spi_split_transfers(msg);
4260 	if (ret)
4261 		return ret;
4262 
4263 	if (ctlr->optimize_message) {
4264 		ret = ctlr->optimize_message(msg);
4265 		if (ret) {
4266 			spi_res_release(ctlr, msg);
4267 			return ret;
4268 		}
4269 	}
4270 
4271 	msg->optimized = true;
4272 
4273 	return 0;
4274 }
4275 
4276 /*
4277  * spi_maybe_optimize_message - optimize message if it isn't already pre-optimized
4278  * @spi: the device that will be used for the message
4279  * @msg: the message to optimize
4280  * Return: zero on success, else a negative error code
4281  */
spi_maybe_optimize_message(struct spi_device * spi,struct spi_message * msg)4282 static int spi_maybe_optimize_message(struct spi_device *spi,
4283 				      struct spi_message *msg)
4284 {
4285 	if (spi->controller->defer_optimize_message) {
4286 		msg->spi = spi;
4287 		return 0;
4288 	}
4289 
4290 	if (msg->pre_optimized)
4291 		return 0;
4292 
4293 	return __spi_optimize_message(spi, msg);
4294 }
4295 
4296 /**
4297  * spi_optimize_message - do any one-time validation and setup for a SPI message
4298  * @spi: the device that will be used for the message
4299  * @msg: the message to optimize
4300  *
4301  * Peripheral drivers that reuse the same message repeatedly may call this to
4302  * perform as much message prep as possible once, rather than repeating it each
4303  * time a message transfer is performed to improve throughput and reduce CPU
4304  * usage.
4305  *
4306  * Once a message has been optimized, it cannot be modified with the exception
4307  * of updating the contents of any xfer->tx_buf (the pointer can't be changed,
4308  * only the data in the memory it points to).
4309  *
4310  * Calls to this function must be balanced with calls to spi_unoptimize_message()
4311  * to avoid leaking resources.
4312  *
4313  * Context: can sleep
4314  * Return: zero on success, else a negative error code
4315  */
spi_optimize_message(struct spi_device * spi,struct spi_message * msg)4316 int spi_optimize_message(struct spi_device *spi, struct spi_message *msg)
4317 {
4318 	int ret;
4319 
4320 	/*
4321 	 * Pre-optimization is not supported and optimization is deferred e.g.
4322 	 * when using spi-mux.
4323 	 */
4324 	if (spi->controller->defer_optimize_message)
4325 		return 0;
4326 
4327 	ret = __spi_optimize_message(spi, msg);
4328 	if (ret)
4329 		return ret;
4330 
4331 	/*
4332 	 * This flag indicates that the peripheral driver called spi_optimize_message()
4333 	 * and therefore we shouldn't unoptimize message automatically when finalizing
4334 	 * the message but rather wait until spi_unoptimize_message() is called
4335 	 * by the peripheral driver.
4336 	 */
4337 	msg->pre_optimized = true;
4338 
4339 	return 0;
4340 }
4341 EXPORT_SYMBOL_GPL(spi_optimize_message);
4342 
4343 /**
4344  * spi_unoptimize_message - releases any resources allocated by spi_optimize_message()
4345  * @msg: the message to unoptimize
4346  *
4347  * Calls to this function must be balanced with calls to spi_optimize_message().
4348  *
4349  * Context: can sleep
4350  */
spi_unoptimize_message(struct spi_message * msg)4351 void spi_unoptimize_message(struct spi_message *msg)
4352 {
4353 	if (msg->spi->controller->defer_optimize_message)
4354 		return;
4355 
4356 	__spi_unoptimize_message(msg);
4357 	msg->pre_optimized = false;
4358 }
4359 EXPORT_SYMBOL_GPL(spi_unoptimize_message);
4360 
__spi_async(struct spi_device * spi,struct spi_message * message)4361 static int __spi_async(struct spi_device *spi, struct spi_message *message)
4362 {
4363 	struct spi_controller *ctlr = spi->controller;
4364 	struct spi_transfer *xfer;
4365 
4366 	/*
4367 	 * Some controllers do not support doing regular SPI transfers. Return
4368 	 * ENOTSUPP when this is the case.
4369 	 */
4370 	if (!ctlr->transfer)
4371 		return -ENOTSUPP;
4372 
4373 	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_async);
4374 	SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_async);
4375 
4376 	trace_spi_message_submit(message);
4377 
4378 	if (!ctlr->ptp_sts_supported) {
4379 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
4380 			xfer->ptp_sts_word_pre = 0;
4381 			ptp_read_system_prets(xfer->ptp_sts);
4382 		}
4383 	}
4384 
4385 	return ctlr->transfer(spi, message);
4386 }
4387 
devm_spi_unoptimize_message(void * msg)4388 static void devm_spi_unoptimize_message(void *msg)
4389 {
4390 	spi_unoptimize_message(msg);
4391 }
4392 
4393 /**
4394  * devm_spi_optimize_message - managed version of spi_optimize_message()
4395  * @dev: the device that manages @msg (usually @spi->dev)
4396  * @spi: the device that will be used for the message
4397  * @msg: the message to optimize
4398  * Return: zero on success, else a negative error code
4399  *
4400  * spi_unoptimize_message() will automatically be called when the device is
4401  * removed.
4402  */
devm_spi_optimize_message(struct device * dev,struct spi_device * spi,struct spi_message * msg)4403 int devm_spi_optimize_message(struct device *dev, struct spi_device *spi,
4404 			      struct spi_message *msg)
4405 {
4406 	int ret;
4407 
4408 	ret = spi_optimize_message(spi, msg);
4409 	if (ret)
4410 		return ret;
4411 
4412 	return devm_add_action_or_reset(dev, devm_spi_unoptimize_message, msg);
4413 }
4414 EXPORT_SYMBOL_GPL(devm_spi_optimize_message);
4415 
4416 /**
4417  * spi_async - asynchronous SPI transfer
4418  * @spi: device with which data will be exchanged
4419  * @message: describes the data transfers, including completion callback
4420  * Context: any (IRQs may be blocked, etc)
4421  *
4422  * This call may be used in_irq and other contexts which can't sleep,
4423  * as well as from task contexts which can sleep.
4424  *
4425  * The completion callback is invoked in a context which can't sleep.
4426  * Before that invocation, the value of message->status is undefined.
4427  * When the callback is issued, message->status holds either zero (to
4428  * indicate complete success) or a negative error code.  After that
4429  * callback returns, the driver which issued the transfer request may
4430  * deallocate the associated memory; it's no longer in use by any SPI
4431  * core or controller driver code.
4432  *
4433  * Note that although all messages to a spi_device are handled in
4434  * FIFO order, messages may go to different devices in other orders.
4435  * Some device might be higher priority, or have various "hard" access
4436  * time requirements, for example.
4437  *
4438  * On detection of any fault during the transfer, processing of
4439  * the entire message is aborted, and the device is deselected.
4440  * Until returning from the associated message completion callback,
4441  * no other spi_message queued to that device will be processed.
4442  * (This rule applies equally to all the synchronous transfer calls,
4443  * which are wrappers around this core asynchronous primitive.)
4444  *
4445  * Return: zero on success, else a negative error code.
4446  */
spi_async(struct spi_device * spi,struct spi_message * message)4447 int spi_async(struct spi_device *spi, struct spi_message *message)
4448 {
4449 	struct spi_controller *ctlr = spi->controller;
4450 	int ret;
4451 	unsigned long flags;
4452 
4453 	ret = spi_maybe_optimize_message(spi, message);
4454 	if (ret)
4455 		return ret;
4456 
4457 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4458 
4459 	if (ctlr->bus_lock_flag)
4460 		ret = -EBUSY;
4461 	else
4462 		ret = __spi_async(spi, message);
4463 
4464 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4465 
4466 	return ret;
4467 }
4468 EXPORT_SYMBOL_GPL(spi_async);
4469 
__spi_transfer_message_noqueue(struct spi_controller * ctlr,struct spi_message * msg)4470 static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
4471 {
4472 	bool was_busy;
4473 	int ret;
4474 
4475 	mutex_lock(&ctlr->io_mutex);
4476 
4477 	was_busy = ctlr->busy;
4478 
4479 	ctlr->cur_msg = msg;
4480 	ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
4481 	if (ret)
4482 		dev_err(&ctlr->dev, "noqueue transfer failed\n");
4483 	ctlr->cur_msg = NULL;
4484 	ctlr->fallback = false;
4485 
4486 	if (!was_busy) {
4487 		kfree(ctlr->dummy_rx);
4488 		ctlr->dummy_rx = NULL;
4489 		kfree(ctlr->dummy_tx);
4490 		ctlr->dummy_tx = NULL;
4491 		if (ctlr->unprepare_transfer_hardware &&
4492 		    ctlr->unprepare_transfer_hardware(ctlr))
4493 			dev_err(&ctlr->dev,
4494 				"failed to unprepare transfer hardware\n");
4495 		spi_idle_runtime_pm(ctlr);
4496 	}
4497 
4498 	mutex_unlock(&ctlr->io_mutex);
4499 }
4500 
4501 /*-------------------------------------------------------------------------*/
4502 
4503 /*
4504  * Utility methods for SPI protocol drivers, layered on
4505  * top of the core.  Some other utility methods are defined as
4506  * inline functions.
4507  */
4508 
spi_complete(void * arg)4509 static void spi_complete(void *arg)
4510 {
4511 	complete(arg);
4512 }
4513 
__spi_sync(struct spi_device * spi,struct spi_message * message)4514 static int __spi_sync(struct spi_device *spi, struct spi_message *message)
4515 {
4516 	DECLARE_COMPLETION_ONSTACK(done);
4517 	unsigned long flags;
4518 	int status;
4519 	struct spi_controller *ctlr = spi->controller;
4520 
4521 	if (__spi_check_suspended(ctlr)) {
4522 		dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
4523 		return -ESHUTDOWN;
4524 	}
4525 
4526 	status = spi_maybe_optimize_message(spi, message);
4527 	if (status)
4528 		return status;
4529 
4530 	SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
4531 	SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync);
4532 
4533 	/*
4534 	 * Checking queue_empty here only guarantees async/sync message
4535 	 * ordering when coming from the same context. It does not need to
4536 	 * guard against reentrancy from a different context. The io_mutex
4537 	 * will catch those cases.
4538 	 */
4539 	if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
4540 		message->actual_length = 0;
4541 		message->status = -EINPROGRESS;
4542 
4543 		trace_spi_message_submit(message);
4544 
4545 		SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate);
4546 		SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate);
4547 
4548 		__spi_transfer_message_noqueue(ctlr, message);
4549 
4550 		return message->status;
4551 	}
4552 
4553 	/*
4554 	 * There are messages in the async queue that could have originated
4555 	 * from the same context, so we need to preserve ordering.
4556 	 * Therefor we send the message to the async queue and wait until they
4557 	 * are completed.
4558 	 */
4559 	message->complete = spi_complete;
4560 	message->context = &done;
4561 
4562 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4563 	status = __spi_async(spi, message);
4564 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4565 
4566 	if (status == 0) {
4567 		wait_for_completion(&done);
4568 		status = message->status;
4569 	}
4570 	message->complete = NULL;
4571 	message->context = NULL;
4572 
4573 	return status;
4574 }
4575 
4576 /**
4577  * spi_sync - blocking/synchronous SPI data transfers
4578  * @spi: device with which data will be exchanged
4579  * @message: describes the data transfers
4580  * Context: can sleep
4581  *
4582  * This call may only be used from a context that may sleep.  The sleep
4583  * is non-interruptible, and has no timeout.  Low-overhead controller
4584  * drivers may DMA directly into and out of the message buffers.
4585  *
4586  * Note that the SPI device's chip select is active during the message,
4587  * and then is normally disabled between messages.  Drivers for some
4588  * frequently-used devices may want to minimize costs of selecting a chip,
4589  * by leaving it selected in anticipation that the next message will go
4590  * to the same chip.  (That may increase power usage.)
4591  *
4592  * Also, the caller is guaranteeing that the memory associated with the
4593  * message will not be freed before this call returns.
4594  *
4595  * Return: zero on success, else a negative error code.
4596  */
spi_sync(struct spi_device * spi,struct spi_message * message)4597 int spi_sync(struct spi_device *spi, struct spi_message *message)
4598 {
4599 	int ret;
4600 
4601 	mutex_lock(&spi->controller->bus_lock_mutex);
4602 	ret = __spi_sync(spi, message);
4603 	mutex_unlock(&spi->controller->bus_lock_mutex);
4604 
4605 	return ret;
4606 }
4607 EXPORT_SYMBOL_GPL(spi_sync);
4608 
4609 /**
4610  * spi_sync_locked - version of spi_sync with exclusive bus usage
4611  * @spi: device with which data will be exchanged
4612  * @message: describes the data transfers
4613  * Context: can sleep
4614  *
4615  * This call may only be used from a context that may sleep.  The sleep
4616  * is non-interruptible, and has no timeout.  Low-overhead controller
4617  * drivers may DMA directly into and out of the message buffers.
4618  *
4619  * This call should be used by drivers that require exclusive access to the
4620  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
4621  * be released by a spi_bus_unlock call when the exclusive access is over.
4622  *
4623  * Return: zero on success, else a negative error code.
4624  */
spi_sync_locked(struct spi_device * spi,struct spi_message * message)4625 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
4626 {
4627 	return __spi_sync(spi, message);
4628 }
4629 EXPORT_SYMBOL_GPL(spi_sync_locked);
4630 
4631 /**
4632  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
4633  * @ctlr: SPI bus controller that should be locked for exclusive bus access
4634  * Context: can sleep
4635  *
4636  * This call may only be used from a context that may sleep.  The sleep
4637  * is non-interruptible, and has no timeout.
4638  *
4639  * This call should be used by drivers that require exclusive access to the
4640  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
4641  * exclusive access is over. Data transfer must be done by spi_sync_locked
4642  * and spi_async_locked calls when the SPI bus lock is held.
4643  *
4644  * Return: always zero.
4645  */
spi_bus_lock(struct spi_controller * ctlr)4646 int spi_bus_lock(struct spi_controller *ctlr)
4647 {
4648 	unsigned long flags;
4649 
4650 	mutex_lock(&ctlr->bus_lock_mutex);
4651 
4652 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4653 	ctlr->bus_lock_flag = 1;
4654 	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4655 
4656 	/* Mutex remains locked until spi_bus_unlock() is called */
4657 
4658 	return 0;
4659 }
4660 EXPORT_SYMBOL_GPL(spi_bus_lock);
4661 
4662 /**
4663  * spi_bus_unlock - release the lock for exclusive SPI bus usage
4664  * @ctlr: SPI bus controller that was locked for exclusive bus access
4665  * Context: can sleep
4666  *
4667  * This call may only be used from a context that may sleep.  The sleep
4668  * is non-interruptible, and has no timeout.
4669  *
4670  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
4671  * call.
4672  *
4673  * Return: always zero.
4674  */
spi_bus_unlock(struct spi_controller * ctlr)4675 int spi_bus_unlock(struct spi_controller *ctlr)
4676 {
4677 	ctlr->bus_lock_flag = 0;
4678 
4679 	mutex_unlock(&ctlr->bus_lock_mutex);
4680 
4681 	return 0;
4682 }
4683 EXPORT_SYMBOL_GPL(spi_bus_unlock);
4684 
4685 /* Portable code must never pass more than 32 bytes */
4686 #define	SPI_BUFSIZ	max(32, SMP_CACHE_BYTES)
4687 
4688 static u8	*buf;
4689 
4690 /**
4691  * spi_write_then_read - SPI synchronous write followed by read
4692  * @spi: device with which data will be exchanged
4693  * @txbuf: data to be written (need not be DMA-safe)
4694  * @n_tx: size of txbuf, in bytes
4695  * @rxbuf: buffer into which data will be read (need not be DMA-safe)
4696  * @n_rx: size of rxbuf, in bytes
4697  * Context: can sleep
4698  *
4699  * This performs a half duplex MicroWire style transaction with the
4700  * device, sending txbuf and then reading rxbuf.  The return value
4701  * is zero for success, else a negative errno status code.
4702  * This call may only be used from a context that may sleep.
4703  *
4704  * Parameters to this routine are always copied using a small buffer.
4705  * Performance-sensitive or bulk transfer code should instead use
4706  * spi_{async,sync}() calls with DMA-safe buffers.
4707  *
4708  * Return: zero on success, else a negative error code.
4709  */
spi_write_then_read(struct spi_device * spi,const void * txbuf,unsigned n_tx,void * rxbuf,unsigned n_rx)4710 int spi_write_then_read(struct spi_device *spi,
4711 		const void *txbuf, unsigned n_tx,
4712 		void *rxbuf, unsigned n_rx)
4713 {
4714 	static DEFINE_MUTEX(lock);
4715 
4716 	int			status;
4717 	struct spi_message	message;
4718 	struct spi_transfer	x[2];
4719 	u8			*local_buf;
4720 
4721 	/*
4722 	 * Use preallocated DMA-safe buffer if we can. We can't avoid
4723 	 * copying here, (as a pure convenience thing), but we can
4724 	 * keep heap costs out of the hot path unless someone else is
4725 	 * using the pre-allocated buffer or the transfer is too large.
4726 	 */
4727 	if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
4728 		local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
4729 				    GFP_KERNEL | GFP_DMA);
4730 		if (!local_buf)
4731 			return -ENOMEM;
4732 	} else {
4733 		local_buf = buf;
4734 	}
4735 
4736 	spi_message_init(&message);
4737 	memset(x, 0, sizeof(x));
4738 	if (n_tx) {
4739 		x[0].len = n_tx;
4740 		spi_message_add_tail(&x[0], &message);
4741 	}
4742 	if (n_rx) {
4743 		x[1].len = n_rx;
4744 		spi_message_add_tail(&x[1], &message);
4745 	}
4746 
4747 	memcpy(local_buf, txbuf, n_tx);
4748 	x[0].tx_buf = local_buf;
4749 	x[1].rx_buf = local_buf + n_tx;
4750 
4751 	/* Do the I/O */
4752 	status = spi_sync(spi, &message);
4753 	if (status == 0)
4754 		memcpy(rxbuf, x[1].rx_buf, n_rx);
4755 
4756 	if (x[0].tx_buf == buf)
4757 		mutex_unlock(&lock);
4758 	else
4759 		kfree(local_buf);
4760 
4761 	return status;
4762 }
4763 EXPORT_SYMBOL_GPL(spi_write_then_read);
4764 
4765 /*-------------------------------------------------------------------------*/
4766 
4767 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
4768 /* Must call put_device() when done with returned spi_device device */
of_find_spi_device_by_node(struct device_node * node)4769 static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
4770 {
4771 	struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
4772 
4773 	return dev ? to_spi_device(dev) : NULL;
4774 }
4775 
4776 /* The spi controllers are not using spi_bus, so we find it with another way */
of_find_spi_controller_by_node(struct device_node * node)4777 static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
4778 {
4779 	struct device *dev;
4780 
4781 	dev = class_find_device_by_of_node(&spi_controller_class, node);
4782 	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4783 		dev = class_find_device_by_of_node(&spi_target_class, node);
4784 	if (!dev)
4785 		return NULL;
4786 
4787 	/* Reference got in class_find_device */
4788 	return container_of(dev, struct spi_controller, dev);
4789 }
4790 
of_spi_notify(struct notifier_block * nb,unsigned long action,void * arg)4791 static int of_spi_notify(struct notifier_block *nb, unsigned long action,
4792 			 void *arg)
4793 {
4794 	struct of_reconfig_data *rd = arg;
4795 	struct spi_controller *ctlr;
4796 	struct spi_device *spi;
4797 
4798 	switch (of_reconfig_get_state_change(action, arg)) {
4799 	case OF_RECONFIG_CHANGE_ADD:
4800 		ctlr = of_find_spi_controller_by_node(rd->dn->parent);
4801 		if (ctlr == NULL)
4802 			return NOTIFY_OK;	/* Not for us */
4803 
4804 		if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
4805 			put_device(&ctlr->dev);
4806 			return NOTIFY_OK;
4807 		}
4808 
4809 		/*
4810 		 * Clear the flag before adding the device so that fw_devlink
4811 		 * doesn't skip adding consumers to this device.
4812 		 */
4813 		rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE;
4814 		spi = of_register_spi_device(ctlr, rd->dn);
4815 		put_device(&ctlr->dev);
4816 
4817 		if (IS_ERR(spi)) {
4818 			pr_err("%s: failed to create for '%pOF'\n",
4819 					__func__, rd->dn);
4820 			of_node_clear_flag(rd->dn, OF_POPULATED);
4821 			return notifier_from_errno(PTR_ERR(spi));
4822 		}
4823 		break;
4824 
4825 	case OF_RECONFIG_CHANGE_REMOVE:
4826 		/* Already depopulated? */
4827 		if (!of_node_check_flag(rd->dn, OF_POPULATED))
4828 			return NOTIFY_OK;
4829 
4830 		/* Find our device by node */
4831 		spi = of_find_spi_device_by_node(rd->dn);
4832 		if (spi == NULL)
4833 			return NOTIFY_OK;	/* No? not meant for us */
4834 
4835 		/* Unregister takes one ref away */
4836 		spi_unregister_device(spi);
4837 
4838 		/* And put the reference of the find */
4839 		put_device(&spi->dev);
4840 		break;
4841 	}
4842 
4843 	return NOTIFY_OK;
4844 }
4845 
4846 static struct notifier_block spi_of_notifier = {
4847 	.notifier_call = of_spi_notify,
4848 };
4849 #else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4850 extern struct notifier_block spi_of_notifier;
4851 #endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
4852 
4853 #if IS_ENABLED(CONFIG_ACPI)
spi_acpi_controller_match(struct device * dev,const void * data)4854 static int spi_acpi_controller_match(struct device *dev, const void *data)
4855 {
4856 	return device_match_acpi_dev(dev->parent, data);
4857 }
4858 
acpi_spi_find_controller_by_adev(struct acpi_device * adev)4859 struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev)
4860 {
4861 	struct device *dev;
4862 
4863 	dev = class_find_device(&spi_controller_class, NULL, adev,
4864 				spi_acpi_controller_match);
4865 	if (!dev && IS_ENABLED(CONFIG_SPI_SLAVE))
4866 		dev = class_find_device(&spi_target_class, NULL, adev,
4867 					spi_acpi_controller_match);
4868 	if (!dev)
4869 		return NULL;
4870 
4871 	return container_of(dev, struct spi_controller, dev);
4872 }
4873 EXPORT_SYMBOL_GPL(acpi_spi_find_controller_by_adev);
4874 
acpi_spi_find_device_by_adev(struct acpi_device * adev)4875 static struct spi_device *acpi_spi_find_device_by_adev(struct acpi_device *adev)
4876 {
4877 	struct device *dev;
4878 
4879 	dev = bus_find_device_by_acpi_dev(&spi_bus_type, adev);
4880 	return to_spi_device(dev);
4881 }
4882 
acpi_spi_notify(struct notifier_block * nb,unsigned long value,void * arg)4883 static int acpi_spi_notify(struct notifier_block *nb, unsigned long value,
4884 			   void *arg)
4885 {
4886 	struct acpi_device *adev = arg;
4887 	struct spi_controller *ctlr;
4888 	struct spi_device *spi;
4889 
4890 	switch (value) {
4891 	case ACPI_RECONFIG_DEVICE_ADD:
4892 		ctlr = acpi_spi_find_controller_by_adev(acpi_dev_parent(adev));
4893 		if (!ctlr)
4894 			break;
4895 
4896 		acpi_register_spi_device(ctlr, adev);
4897 		put_device(&ctlr->dev);
4898 		break;
4899 	case ACPI_RECONFIG_DEVICE_REMOVE:
4900 		if (!acpi_device_enumerated(adev))
4901 			break;
4902 
4903 		spi = acpi_spi_find_device_by_adev(adev);
4904 		if (!spi)
4905 			break;
4906 
4907 		spi_unregister_device(spi);
4908 		put_device(&spi->dev);
4909 		break;
4910 	}
4911 
4912 	return NOTIFY_OK;
4913 }
4914 
4915 static struct notifier_block spi_acpi_notifier = {
4916 	.notifier_call = acpi_spi_notify,
4917 };
4918 #else
4919 extern struct notifier_block spi_acpi_notifier;
4920 #endif
4921 
spi_init(void)4922 static int __init spi_init(void)
4923 {
4924 	int	status;
4925 
4926 	buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
4927 	if (!buf) {
4928 		status = -ENOMEM;
4929 		goto err0;
4930 	}
4931 
4932 	status = bus_register(&spi_bus_type);
4933 	if (status < 0)
4934 		goto err1;
4935 
4936 	status = class_register(&spi_controller_class);
4937 	if (status < 0)
4938 		goto err2;
4939 
4940 	if (IS_ENABLED(CONFIG_SPI_SLAVE)) {
4941 		status = class_register(&spi_target_class);
4942 		if (status < 0)
4943 			goto err3;
4944 	}
4945 
4946 	if (IS_ENABLED(CONFIG_OF_DYNAMIC))
4947 		WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
4948 	if (IS_ENABLED(CONFIG_ACPI))
4949 		WARN_ON(acpi_reconfig_notifier_register(&spi_acpi_notifier));
4950 
4951 	return 0;
4952 
4953 err3:
4954 	class_unregister(&spi_controller_class);
4955 err2:
4956 	bus_unregister(&spi_bus_type);
4957 err1:
4958 	kfree(buf);
4959 	buf = NULL;
4960 err0:
4961 	return status;
4962 }
4963 
4964 /*
4965  * A board_info is normally registered in arch_initcall(),
4966  * but even essential drivers wait till later.
4967  *
4968  * REVISIT only boardinfo really needs static linking. The rest (device and
4969  * driver registration) _could_ be dynamically linked (modular) ... Costs
4970  * include needing to have boardinfo data structures be much more public.
4971  */
4972 postcore_initcall(spi_init);
4973