xref: /linux/drivers/spi/spi.c (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 /*
2  * SPI init/core code
3  *
4  * Copyright (C) 2005 David Brownell
5  * Copyright (C) 2008 Secret Lab Technologies Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21 
22 #include <linux/kernel.h>
23 #include <linux/kmod.h>
24 #include <linux/device.h>
25 #include <linux/init.h>
26 #include <linux/cache.h>
27 #include <linux/mutex.h>
28 #include <linux/of_device.h>
29 #include <linux/of_irq.h>
30 #include <linux/slab.h>
31 #include <linux/mod_devicetable.h>
32 #include <linux/spi/spi.h>
33 #include <linux/of_gpio.h>
34 #include <linux/pm_runtime.h>
35 #include <linux/export.h>
36 #include <linux/sched/rt.h>
37 #include <linux/delay.h>
38 #include <linux/kthread.h>
39 #include <linux/ioport.h>
40 #include <linux/acpi.h>
41 
42 #define CREATE_TRACE_POINTS
43 #include <trace/events/spi.h>
44 
45 static void spidev_release(struct device *dev)
46 {
47 	struct spi_device	*spi = to_spi_device(dev);
48 
49 	/* spi masters may cleanup for released devices */
50 	if (spi->master->cleanup)
51 		spi->master->cleanup(spi);
52 
53 	spi_master_put(spi->master);
54 	kfree(spi);
55 }
56 
57 static ssize_t
58 modalias_show(struct device *dev, struct device_attribute *a, char *buf)
59 {
60 	const struct spi_device	*spi = to_spi_device(dev);
61 
62 	return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
63 }
64 static DEVICE_ATTR_RO(modalias);
65 
66 static struct attribute *spi_dev_attrs[] = {
67 	&dev_attr_modalias.attr,
68 	NULL,
69 };
70 ATTRIBUTE_GROUPS(spi_dev);
71 
72 /* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
73  * and the sysfs version makes coldplug work too.
74  */
75 
76 static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
77 						const struct spi_device *sdev)
78 {
79 	while (id->name[0]) {
80 		if (!strcmp(sdev->modalias, id->name))
81 			return id;
82 		id++;
83 	}
84 	return NULL;
85 }
86 
87 const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
88 {
89 	const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
90 
91 	return spi_match_id(sdrv->id_table, sdev);
92 }
93 EXPORT_SYMBOL_GPL(spi_get_device_id);
94 
95 static int spi_match_device(struct device *dev, struct device_driver *drv)
96 {
97 	const struct spi_device	*spi = to_spi_device(dev);
98 	const struct spi_driver	*sdrv = to_spi_driver(drv);
99 
100 	/* Attempt an OF style match */
101 	if (of_driver_match_device(dev, drv))
102 		return 1;
103 
104 	/* Then try ACPI */
105 	if (acpi_driver_match_device(dev, drv))
106 		return 1;
107 
108 	if (sdrv->id_table)
109 		return !!spi_match_id(sdrv->id_table, spi);
110 
111 	return strcmp(spi->modalias, drv->name) == 0;
112 }
113 
114 static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
115 {
116 	const struct spi_device		*spi = to_spi_device(dev);
117 
118 	add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
119 	return 0;
120 }
121 
122 #ifdef CONFIG_PM_SLEEP
123 static int spi_legacy_suspend(struct device *dev, pm_message_t message)
124 {
125 	int			value = 0;
126 	struct spi_driver	*drv = to_spi_driver(dev->driver);
127 
128 	/* suspend will stop irqs and dma; no more i/o */
129 	if (drv) {
130 		if (drv->suspend)
131 			value = drv->suspend(to_spi_device(dev), message);
132 		else
133 			dev_dbg(dev, "... can't suspend\n");
134 	}
135 	return value;
136 }
137 
138 static int spi_legacy_resume(struct device *dev)
139 {
140 	int			value = 0;
141 	struct spi_driver	*drv = to_spi_driver(dev->driver);
142 
143 	/* resume may restart the i/o queue */
144 	if (drv) {
145 		if (drv->resume)
146 			value = drv->resume(to_spi_device(dev));
147 		else
148 			dev_dbg(dev, "... can't resume\n");
149 	}
150 	return value;
151 }
152 
153 static int spi_pm_suspend(struct device *dev)
154 {
155 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
156 
157 	if (pm)
158 		return pm_generic_suspend(dev);
159 	else
160 		return spi_legacy_suspend(dev, PMSG_SUSPEND);
161 }
162 
163 static int spi_pm_resume(struct device *dev)
164 {
165 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
166 
167 	if (pm)
168 		return pm_generic_resume(dev);
169 	else
170 		return spi_legacy_resume(dev);
171 }
172 
173 static int spi_pm_freeze(struct device *dev)
174 {
175 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
176 
177 	if (pm)
178 		return pm_generic_freeze(dev);
179 	else
180 		return spi_legacy_suspend(dev, PMSG_FREEZE);
181 }
182 
183 static int spi_pm_thaw(struct device *dev)
184 {
185 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
186 
187 	if (pm)
188 		return pm_generic_thaw(dev);
189 	else
190 		return spi_legacy_resume(dev);
191 }
192 
193 static int spi_pm_poweroff(struct device *dev)
194 {
195 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
196 
197 	if (pm)
198 		return pm_generic_poweroff(dev);
199 	else
200 		return spi_legacy_suspend(dev, PMSG_HIBERNATE);
201 }
202 
203 static int spi_pm_restore(struct device *dev)
204 {
205 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
206 
207 	if (pm)
208 		return pm_generic_restore(dev);
209 	else
210 		return spi_legacy_resume(dev);
211 }
212 #else
213 #define spi_pm_suspend	NULL
214 #define spi_pm_resume	NULL
215 #define spi_pm_freeze	NULL
216 #define spi_pm_thaw	NULL
217 #define spi_pm_poweroff	NULL
218 #define spi_pm_restore	NULL
219 #endif
220 
221 static const struct dev_pm_ops spi_pm = {
222 	.suspend = spi_pm_suspend,
223 	.resume = spi_pm_resume,
224 	.freeze = spi_pm_freeze,
225 	.thaw = spi_pm_thaw,
226 	.poweroff = spi_pm_poweroff,
227 	.restore = spi_pm_restore,
228 	SET_RUNTIME_PM_OPS(
229 		pm_generic_runtime_suspend,
230 		pm_generic_runtime_resume,
231 		NULL
232 	)
233 };
234 
235 struct bus_type spi_bus_type = {
236 	.name		= "spi",
237 	.dev_groups	= spi_dev_groups,
238 	.match		= spi_match_device,
239 	.uevent		= spi_uevent,
240 	.pm		= &spi_pm,
241 };
242 EXPORT_SYMBOL_GPL(spi_bus_type);
243 
244 
245 static int spi_drv_probe(struct device *dev)
246 {
247 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
248 	struct spi_device		*spi = to_spi_device(dev);
249 	int ret;
250 
251 	acpi_dev_pm_attach(&spi->dev, true);
252 	ret = sdrv->probe(spi);
253 	if (ret)
254 		acpi_dev_pm_detach(&spi->dev, true);
255 
256 	return ret;
257 }
258 
259 static int spi_drv_remove(struct device *dev)
260 {
261 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
262 	struct spi_device		*spi = to_spi_device(dev);
263 	int ret;
264 
265 	ret = sdrv->remove(spi);
266 	acpi_dev_pm_detach(&spi->dev, true);
267 
268 	return ret;
269 }
270 
271 static void spi_drv_shutdown(struct device *dev)
272 {
273 	const struct spi_driver		*sdrv = to_spi_driver(dev->driver);
274 
275 	sdrv->shutdown(to_spi_device(dev));
276 }
277 
278 /**
279  * spi_register_driver - register a SPI driver
280  * @sdrv: the driver to register
281  * Context: can sleep
282  */
283 int spi_register_driver(struct spi_driver *sdrv)
284 {
285 	sdrv->driver.bus = &spi_bus_type;
286 	if (sdrv->probe)
287 		sdrv->driver.probe = spi_drv_probe;
288 	if (sdrv->remove)
289 		sdrv->driver.remove = spi_drv_remove;
290 	if (sdrv->shutdown)
291 		sdrv->driver.shutdown = spi_drv_shutdown;
292 	return driver_register(&sdrv->driver);
293 }
294 EXPORT_SYMBOL_GPL(spi_register_driver);
295 
296 /*-------------------------------------------------------------------------*/
297 
298 /* SPI devices should normally not be created by SPI device drivers; that
299  * would make them board-specific.  Similarly with SPI master drivers.
300  * Device registration normally goes into like arch/.../mach.../board-YYY.c
301  * with other readonly (flashable) information about mainboard devices.
302  */
303 
304 struct boardinfo {
305 	struct list_head	list;
306 	struct spi_board_info	board_info;
307 };
308 
309 static LIST_HEAD(board_list);
310 static LIST_HEAD(spi_master_list);
311 
312 /*
313  * Used to protect add/del opertion for board_info list and
314  * spi_master list, and their matching process
315  */
316 static DEFINE_MUTEX(board_lock);
317 
318 /**
319  * spi_alloc_device - Allocate a new SPI device
320  * @master: Controller to which device is connected
321  * Context: can sleep
322  *
323  * Allows a driver to allocate and initialize a spi_device without
324  * registering it immediately.  This allows a driver to directly
325  * fill the spi_device with device parameters before calling
326  * spi_add_device() on it.
327  *
328  * Caller is responsible to call spi_add_device() on the returned
329  * spi_device structure to add it to the SPI master.  If the caller
330  * needs to discard the spi_device without adding it, then it should
331  * call spi_dev_put() on it.
332  *
333  * Returns a pointer to the new device, or NULL.
334  */
335 struct spi_device *spi_alloc_device(struct spi_master *master)
336 {
337 	struct spi_device	*spi;
338 	struct device		*dev = master->dev.parent;
339 
340 	if (!spi_master_get(master))
341 		return NULL;
342 
343 	spi = kzalloc(sizeof(*spi), GFP_KERNEL);
344 	if (!spi) {
345 		dev_err(dev, "cannot alloc spi_device\n");
346 		spi_master_put(master);
347 		return NULL;
348 	}
349 
350 	spi->master = master;
351 	spi->dev.parent = &master->dev;
352 	spi->dev.bus = &spi_bus_type;
353 	spi->dev.release = spidev_release;
354 	spi->cs_gpio = -ENOENT;
355 	device_initialize(&spi->dev);
356 	return spi;
357 }
358 EXPORT_SYMBOL_GPL(spi_alloc_device);
359 
360 static void spi_dev_set_name(struct spi_device *spi)
361 {
362 	struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
363 
364 	if (adev) {
365 		dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
366 		return;
367 	}
368 
369 	dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
370 		     spi->chip_select);
371 }
372 
373 /**
374  * spi_add_device - Add spi_device allocated with spi_alloc_device
375  * @spi: spi_device to register
376  *
377  * Companion function to spi_alloc_device.  Devices allocated with
378  * spi_alloc_device can be added onto the spi bus with this function.
379  *
380  * Returns 0 on success; negative errno on failure
381  */
382 int spi_add_device(struct spi_device *spi)
383 {
384 	static DEFINE_MUTEX(spi_add_lock);
385 	struct spi_master *master = spi->master;
386 	struct device *dev = master->dev.parent;
387 	struct device *d;
388 	int status;
389 
390 	/* Chipselects are numbered 0..max; validate. */
391 	if (spi->chip_select >= master->num_chipselect) {
392 		dev_err(dev, "cs%d >= max %d\n",
393 			spi->chip_select,
394 			master->num_chipselect);
395 		return -EINVAL;
396 	}
397 
398 	/* Set the bus ID string */
399 	spi_dev_set_name(spi);
400 
401 	/* We need to make sure there's no other device with this
402 	 * chipselect **BEFORE** we call setup(), else we'll trash
403 	 * its configuration.  Lock against concurrent add() calls.
404 	 */
405 	mutex_lock(&spi_add_lock);
406 
407 	d = bus_find_device_by_name(&spi_bus_type, NULL, dev_name(&spi->dev));
408 	if (d != NULL) {
409 		dev_err(dev, "chipselect %d already in use\n",
410 				spi->chip_select);
411 		put_device(d);
412 		status = -EBUSY;
413 		goto done;
414 	}
415 
416 	if (master->cs_gpios)
417 		spi->cs_gpio = master->cs_gpios[spi->chip_select];
418 
419 	/* Drivers may modify this initial i/o setup, but will
420 	 * normally rely on the device being setup.  Devices
421 	 * using SPI_CS_HIGH can't coexist well otherwise...
422 	 */
423 	status = spi_setup(spi);
424 	if (status < 0) {
425 		dev_err(dev, "can't setup %s, status %d\n",
426 				dev_name(&spi->dev), status);
427 		goto done;
428 	}
429 
430 	/* Device may be bound to an active driver when this returns */
431 	status = device_add(&spi->dev);
432 	if (status < 0)
433 		dev_err(dev, "can't add %s, status %d\n",
434 				dev_name(&spi->dev), status);
435 	else
436 		dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
437 
438 done:
439 	mutex_unlock(&spi_add_lock);
440 	return status;
441 }
442 EXPORT_SYMBOL_GPL(spi_add_device);
443 
444 /**
445  * spi_new_device - instantiate one new SPI device
446  * @master: Controller to which device is connected
447  * @chip: Describes the SPI device
448  * Context: can sleep
449  *
450  * On typical mainboards, this is purely internal; and it's not needed
451  * after board init creates the hard-wired devices.  Some development
452  * platforms may not be able to use spi_register_board_info though, and
453  * this is exported so that for example a USB or parport based adapter
454  * driver could add devices (which it would learn about out-of-band).
455  *
456  * Returns the new device, or NULL.
457  */
458 struct spi_device *spi_new_device(struct spi_master *master,
459 				  struct spi_board_info *chip)
460 {
461 	struct spi_device	*proxy;
462 	int			status;
463 
464 	/* NOTE:  caller did any chip->bus_num checks necessary.
465 	 *
466 	 * Also, unless we change the return value convention to use
467 	 * error-or-pointer (not NULL-or-pointer), troubleshootability
468 	 * suggests syslogged diagnostics are best here (ugh).
469 	 */
470 
471 	proxy = spi_alloc_device(master);
472 	if (!proxy)
473 		return NULL;
474 
475 	WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
476 
477 	proxy->chip_select = chip->chip_select;
478 	proxy->max_speed_hz = chip->max_speed_hz;
479 	proxy->mode = chip->mode;
480 	proxy->irq = chip->irq;
481 	strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
482 	proxy->dev.platform_data = (void *) chip->platform_data;
483 	proxy->controller_data = chip->controller_data;
484 	proxy->controller_state = NULL;
485 
486 	status = spi_add_device(proxy);
487 	if (status < 0) {
488 		spi_dev_put(proxy);
489 		return NULL;
490 	}
491 
492 	return proxy;
493 }
494 EXPORT_SYMBOL_GPL(spi_new_device);
495 
496 static void spi_match_master_to_boardinfo(struct spi_master *master,
497 				struct spi_board_info *bi)
498 {
499 	struct spi_device *dev;
500 
501 	if (master->bus_num != bi->bus_num)
502 		return;
503 
504 	dev = spi_new_device(master, bi);
505 	if (!dev)
506 		dev_err(master->dev.parent, "can't create new device for %s\n",
507 			bi->modalias);
508 }
509 
510 /**
511  * spi_register_board_info - register SPI devices for a given board
512  * @info: array of chip descriptors
513  * @n: how many descriptors are provided
514  * Context: can sleep
515  *
516  * Board-specific early init code calls this (probably during arch_initcall)
517  * with segments of the SPI device table.  Any device nodes are created later,
518  * after the relevant parent SPI controller (bus_num) is defined.  We keep
519  * this table of devices forever, so that reloading a controller driver will
520  * not make Linux forget about these hard-wired devices.
521  *
522  * Other code can also call this, e.g. a particular add-on board might provide
523  * SPI devices through its expansion connector, so code initializing that board
524  * would naturally declare its SPI devices.
525  *
526  * The board info passed can safely be __initdata ... but be careful of
527  * any embedded pointers (platform_data, etc), they're copied as-is.
528  */
529 int spi_register_board_info(struct spi_board_info const *info, unsigned n)
530 {
531 	struct boardinfo *bi;
532 	int i;
533 
534 	bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
535 	if (!bi)
536 		return -ENOMEM;
537 
538 	for (i = 0; i < n; i++, bi++, info++) {
539 		struct spi_master *master;
540 
541 		memcpy(&bi->board_info, info, sizeof(*info));
542 		mutex_lock(&board_lock);
543 		list_add_tail(&bi->list, &board_list);
544 		list_for_each_entry(master, &spi_master_list, list)
545 			spi_match_master_to_boardinfo(master, &bi->board_info);
546 		mutex_unlock(&board_lock);
547 	}
548 
549 	return 0;
550 }
551 
552 /*-------------------------------------------------------------------------*/
553 
554 static void spi_set_cs(struct spi_device *spi, bool enable)
555 {
556 	if (spi->mode & SPI_CS_HIGH)
557 		enable = !enable;
558 
559 	if (spi->cs_gpio >= 0)
560 		gpio_set_value(spi->cs_gpio, !enable);
561 	else if (spi->master->set_cs)
562 		spi->master->set_cs(spi, !enable);
563 }
564 
565 /*
566  * spi_transfer_one_message - Default implementation of transfer_one_message()
567  *
568  * This is a standard implementation of transfer_one_message() for
569  * drivers which impelment a transfer_one() operation.  It provides
570  * standard handling of delays and chip select management.
571  */
572 static int spi_transfer_one_message(struct spi_master *master,
573 				    struct spi_message *msg)
574 {
575 	struct spi_transfer *xfer;
576 	bool cur_cs = true;
577 	bool keep_cs = false;
578 	int ret = 0;
579 
580 	spi_set_cs(msg->spi, true);
581 
582 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
583 		trace_spi_transfer_start(msg, xfer);
584 
585 		reinit_completion(&master->xfer_completion);
586 
587 		ret = master->transfer_one(master, msg->spi, xfer);
588 		if (ret < 0) {
589 			dev_err(&msg->spi->dev,
590 				"SPI transfer failed: %d\n", ret);
591 			goto out;
592 		}
593 
594 		if (ret > 0)
595 			wait_for_completion(&master->xfer_completion);
596 
597 		trace_spi_transfer_stop(msg, xfer);
598 
599 		if (msg->status != -EINPROGRESS)
600 			goto out;
601 
602 		if (xfer->delay_usecs)
603 			udelay(xfer->delay_usecs);
604 
605 		if (xfer->cs_change) {
606 			if (list_is_last(&xfer->transfer_list,
607 					 &msg->transfers)) {
608 				keep_cs = true;
609 			} else {
610 				cur_cs = !cur_cs;
611 				spi_set_cs(msg->spi, cur_cs);
612 			}
613 		}
614 
615 		msg->actual_length += xfer->len;
616 	}
617 
618 out:
619 	if (ret != 0 || !keep_cs)
620 		spi_set_cs(msg->spi, false);
621 
622 	if (msg->status == -EINPROGRESS)
623 		msg->status = ret;
624 
625 	spi_finalize_current_message(master);
626 
627 	return ret;
628 }
629 
630 /**
631  * spi_finalize_current_transfer - report completion of a transfer
632  *
633  * Called by SPI drivers using the core transfer_one_message()
634  * implementation to notify it that the current interrupt driven
635  * transfer has finised and the next one may be scheduled.
636  */
637 void spi_finalize_current_transfer(struct spi_master *master)
638 {
639 	complete(&master->xfer_completion);
640 }
641 EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
642 
643 /**
644  * spi_pump_messages - kthread work function which processes spi message queue
645  * @work: pointer to kthread work struct contained in the master struct
646  *
647  * This function checks if there is any spi message in the queue that
648  * needs processing and if so call out to the driver to initialize hardware
649  * and transfer each message.
650  *
651  */
652 static void spi_pump_messages(struct kthread_work *work)
653 {
654 	struct spi_master *master =
655 		container_of(work, struct spi_master, pump_messages);
656 	unsigned long flags;
657 	bool was_busy = false;
658 	int ret;
659 
660 	/* Lock queue and check for queue work */
661 	spin_lock_irqsave(&master->queue_lock, flags);
662 	if (list_empty(&master->queue) || !master->running) {
663 		if (!master->busy) {
664 			spin_unlock_irqrestore(&master->queue_lock, flags);
665 			return;
666 		}
667 		master->busy = false;
668 		spin_unlock_irqrestore(&master->queue_lock, flags);
669 		if (master->unprepare_transfer_hardware &&
670 		    master->unprepare_transfer_hardware(master))
671 			dev_err(&master->dev,
672 				"failed to unprepare transfer hardware\n");
673 		if (master->auto_runtime_pm) {
674 			pm_runtime_mark_last_busy(master->dev.parent);
675 			pm_runtime_put_autosuspend(master->dev.parent);
676 		}
677 		trace_spi_master_idle(master);
678 		return;
679 	}
680 
681 	/* Make sure we are not already running a message */
682 	if (master->cur_msg) {
683 		spin_unlock_irqrestore(&master->queue_lock, flags);
684 		return;
685 	}
686 	/* Extract head of queue */
687 	master->cur_msg =
688 	    list_entry(master->queue.next, struct spi_message, queue);
689 
690 	list_del_init(&master->cur_msg->queue);
691 	if (master->busy)
692 		was_busy = true;
693 	else
694 		master->busy = true;
695 	spin_unlock_irqrestore(&master->queue_lock, flags);
696 
697 	if (!was_busy && master->auto_runtime_pm) {
698 		ret = pm_runtime_get_sync(master->dev.parent);
699 		if (ret < 0) {
700 			dev_err(&master->dev, "Failed to power device: %d\n",
701 				ret);
702 			return;
703 		}
704 	}
705 
706 	if (!was_busy)
707 		trace_spi_master_busy(master);
708 
709 	if (!was_busy && master->prepare_transfer_hardware) {
710 		ret = master->prepare_transfer_hardware(master);
711 		if (ret) {
712 			dev_err(&master->dev,
713 				"failed to prepare transfer hardware\n");
714 
715 			if (master->auto_runtime_pm)
716 				pm_runtime_put(master->dev.parent);
717 			return;
718 		}
719 	}
720 
721 	trace_spi_message_start(master->cur_msg);
722 
723 	if (master->prepare_message) {
724 		ret = master->prepare_message(master, master->cur_msg);
725 		if (ret) {
726 			dev_err(&master->dev,
727 				"failed to prepare message: %d\n", ret);
728 			master->cur_msg->status = ret;
729 			spi_finalize_current_message(master);
730 			return;
731 		}
732 		master->cur_msg_prepared = true;
733 	}
734 
735 	ret = master->transfer_one_message(master, master->cur_msg);
736 	if (ret) {
737 		dev_err(&master->dev,
738 			"failed to transfer one message from queue\n");
739 		return;
740 	}
741 }
742 
743 static int spi_init_queue(struct spi_master *master)
744 {
745 	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
746 
747 	INIT_LIST_HEAD(&master->queue);
748 	spin_lock_init(&master->queue_lock);
749 
750 	master->running = false;
751 	master->busy = false;
752 
753 	init_kthread_worker(&master->kworker);
754 	master->kworker_task = kthread_run(kthread_worker_fn,
755 					   &master->kworker, "%s",
756 					   dev_name(&master->dev));
757 	if (IS_ERR(master->kworker_task)) {
758 		dev_err(&master->dev, "failed to create message pump task\n");
759 		return -ENOMEM;
760 	}
761 	init_kthread_work(&master->pump_messages, spi_pump_messages);
762 
763 	/*
764 	 * Master config will indicate if this controller should run the
765 	 * message pump with high (realtime) priority to reduce the transfer
766 	 * latency on the bus by minimising the delay between a transfer
767 	 * request and the scheduling of the message pump thread. Without this
768 	 * setting the message pump thread will remain at default priority.
769 	 */
770 	if (master->rt) {
771 		dev_info(&master->dev,
772 			"will run message pump with realtime priority\n");
773 		sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
774 	}
775 
776 	return 0;
777 }
778 
779 /**
780  * spi_get_next_queued_message() - called by driver to check for queued
781  * messages
782  * @master: the master to check for queued messages
783  *
784  * If there are more messages in the queue, the next message is returned from
785  * this call.
786  */
787 struct spi_message *spi_get_next_queued_message(struct spi_master *master)
788 {
789 	struct spi_message *next;
790 	unsigned long flags;
791 
792 	/* get a pointer to the next message, if any */
793 	spin_lock_irqsave(&master->queue_lock, flags);
794 	if (list_empty(&master->queue))
795 		next = NULL;
796 	else
797 		next = list_entry(master->queue.next,
798 				  struct spi_message, queue);
799 	spin_unlock_irqrestore(&master->queue_lock, flags);
800 
801 	return next;
802 }
803 EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
804 
805 /**
806  * spi_finalize_current_message() - the current message is complete
807  * @master: the master to return the message to
808  *
809  * Called by the driver to notify the core that the message in the front of the
810  * queue is complete and can be removed from the queue.
811  */
812 void spi_finalize_current_message(struct spi_master *master)
813 {
814 	struct spi_message *mesg;
815 	unsigned long flags;
816 	int ret;
817 
818 	spin_lock_irqsave(&master->queue_lock, flags);
819 	mesg = master->cur_msg;
820 	master->cur_msg = NULL;
821 
822 	queue_kthread_work(&master->kworker, &master->pump_messages);
823 	spin_unlock_irqrestore(&master->queue_lock, flags);
824 
825 	if (master->cur_msg_prepared && master->unprepare_message) {
826 		ret = master->unprepare_message(master, mesg);
827 		if (ret) {
828 			dev_err(&master->dev,
829 				"failed to unprepare message: %d\n", ret);
830 		}
831 	}
832 	master->cur_msg_prepared = false;
833 
834 	mesg->state = NULL;
835 	if (mesg->complete)
836 		mesg->complete(mesg->context);
837 
838 	trace_spi_message_done(mesg);
839 }
840 EXPORT_SYMBOL_GPL(spi_finalize_current_message);
841 
842 static int spi_start_queue(struct spi_master *master)
843 {
844 	unsigned long flags;
845 
846 	spin_lock_irqsave(&master->queue_lock, flags);
847 
848 	if (master->running || master->busy) {
849 		spin_unlock_irqrestore(&master->queue_lock, flags);
850 		return -EBUSY;
851 	}
852 
853 	master->running = true;
854 	master->cur_msg = NULL;
855 	spin_unlock_irqrestore(&master->queue_lock, flags);
856 
857 	queue_kthread_work(&master->kworker, &master->pump_messages);
858 
859 	return 0;
860 }
861 
862 static int spi_stop_queue(struct spi_master *master)
863 {
864 	unsigned long flags;
865 	unsigned limit = 500;
866 	int ret = 0;
867 
868 	spin_lock_irqsave(&master->queue_lock, flags);
869 
870 	/*
871 	 * This is a bit lame, but is optimized for the common execution path.
872 	 * A wait_queue on the master->busy could be used, but then the common
873 	 * execution path (pump_messages) would be required to call wake_up or
874 	 * friends on every SPI message. Do this instead.
875 	 */
876 	while ((!list_empty(&master->queue) || master->busy) && limit--) {
877 		spin_unlock_irqrestore(&master->queue_lock, flags);
878 		msleep(10);
879 		spin_lock_irqsave(&master->queue_lock, flags);
880 	}
881 
882 	if (!list_empty(&master->queue) || master->busy)
883 		ret = -EBUSY;
884 	else
885 		master->running = false;
886 
887 	spin_unlock_irqrestore(&master->queue_lock, flags);
888 
889 	if (ret) {
890 		dev_warn(&master->dev,
891 			 "could not stop message queue\n");
892 		return ret;
893 	}
894 	return ret;
895 }
896 
897 static int spi_destroy_queue(struct spi_master *master)
898 {
899 	int ret;
900 
901 	ret = spi_stop_queue(master);
902 
903 	/*
904 	 * flush_kthread_worker will block until all work is done.
905 	 * If the reason that stop_queue timed out is that the work will never
906 	 * finish, then it does no good to call flush/stop thread, so
907 	 * return anyway.
908 	 */
909 	if (ret) {
910 		dev_err(&master->dev, "problem destroying queue\n");
911 		return ret;
912 	}
913 
914 	flush_kthread_worker(&master->kworker);
915 	kthread_stop(master->kworker_task);
916 
917 	return 0;
918 }
919 
920 /**
921  * spi_queued_transfer - transfer function for queued transfers
922  * @spi: spi device which is requesting transfer
923  * @msg: spi message which is to handled is queued to driver queue
924  */
925 static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
926 {
927 	struct spi_master *master = spi->master;
928 	unsigned long flags;
929 
930 	spin_lock_irqsave(&master->queue_lock, flags);
931 
932 	if (!master->running) {
933 		spin_unlock_irqrestore(&master->queue_lock, flags);
934 		return -ESHUTDOWN;
935 	}
936 	msg->actual_length = 0;
937 	msg->status = -EINPROGRESS;
938 
939 	list_add_tail(&msg->queue, &master->queue);
940 	if (!master->busy)
941 		queue_kthread_work(&master->kworker, &master->pump_messages);
942 
943 	spin_unlock_irqrestore(&master->queue_lock, flags);
944 	return 0;
945 }
946 
947 static int spi_master_initialize_queue(struct spi_master *master)
948 {
949 	int ret;
950 
951 	master->queued = true;
952 	master->transfer = spi_queued_transfer;
953 	if (!master->transfer_one_message)
954 		master->transfer_one_message = spi_transfer_one_message;
955 
956 	/* Initialize and start queue */
957 	ret = spi_init_queue(master);
958 	if (ret) {
959 		dev_err(&master->dev, "problem initializing queue\n");
960 		goto err_init_queue;
961 	}
962 	ret = spi_start_queue(master);
963 	if (ret) {
964 		dev_err(&master->dev, "problem starting queue\n");
965 		goto err_start_queue;
966 	}
967 
968 	return 0;
969 
970 err_start_queue:
971 err_init_queue:
972 	spi_destroy_queue(master);
973 	return ret;
974 }
975 
976 /*-------------------------------------------------------------------------*/
977 
978 #if defined(CONFIG_OF)
979 /**
980  * of_register_spi_devices() - Register child devices onto the SPI bus
981  * @master:	Pointer to spi_master device
982  *
983  * Registers an spi_device for each child node of master node which has a 'reg'
984  * property.
985  */
986 static void of_register_spi_devices(struct spi_master *master)
987 {
988 	struct spi_device *spi;
989 	struct device_node *nc;
990 	int rc;
991 	u32 value;
992 
993 	if (!master->dev.of_node)
994 		return;
995 
996 	for_each_available_child_of_node(master->dev.of_node, nc) {
997 		/* Alloc an spi_device */
998 		spi = spi_alloc_device(master);
999 		if (!spi) {
1000 			dev_err(&master->dev, "spi_device alloc error for %s\n",
1001 				nc->full_name);
1002 			spi_dev_put(spi);
1003 			continue;
1004 		}
1005 
1006 		/* Select device driver */
1007 		if (of_modalias_node(nc, spi->modalias,
1008 				     sizeof(spi->modalias)) < 0) {
1009 			dev_err(&master->dev, "cannot find modalias for %s\n",
1010 				nc->full_name);
1011 			spi_dev_put(spi);
1012 			continue;
1013 		}
1014 
1015 		/* Device address */
1016 		rc = of_property_read_u32(nc, "reg", &value);
1017 		if (rc) {
1018 			dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
1019 				nc->full_name, rc);
1020 			spi_dev_put(spi);
1021 			continue;
1022 		}
1023 		spi->chip_select = value;
1024 
1025 		/* Mode (clock phase/polarity/etc.) */
1026 		if (of_find_property(nc, "spi-cpha", NULL))
1027 			spi->mode |= SPI_CPHA;
1028 		if (of_find_property(nc, "spi-cpol", NULL))
1029 			spi->mode |= SPI_CPOL;
1030 		if (of_find_property(nc, "spi-cs-high", NULL))
1031 			spi->mode |= SPI_CS_HIGH;
1032 		if (of_find_property(nc, "spi-3wire", NULL))
1033 			spi->mode |= SPI_3WIRE;
1034 
1035 		/* Device DUAL/QUAD mode */
1036 		if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
1037 			switch (value) {
1038 			case 1:
1039 				break;
1040 			case 2:
1041 				spi->mode |= SPI_TX_DUAL;
1042 				break;
1043 			case 4:
1044 				spi->mode |= SPI_TX_QUAD;
1045 				break;
1046 			default:
1047 				dev_err(&master->dev,
1048 					"spi-tx-bus-width %d not supported\n",
1049 					value);
1050 				spi_dev_put(spi);
1051 				continue;
1052 			}
1053 		}
1054 
1055 		if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
1056 			switch (value) {
1057 			case 1:
1058 				break;
1059 			case 2:
1060 				spi->mode |= SPI_RX_DUAL;
1061 				break;
1062 			case 4:
1063 				spi->mode |= SPI_RX_QUAD;
1064 				break;
1065 			default:
1066 				dev_err(&master->dev,
1067 					"spi-rx-bus-width %d not supported\n",
1068 					value);
1069 				spi_dev_put(spi);
1070 				continue;
1071 			}
1072 		}
1073 
1074 		/* Device speed */
1075 		rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1076 		if (rc) {
1077 			dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
1078 				nc->full_name, rc);
1079 			spi_dev_put(spi);
1080 			continue;
1081 		}
1082 		spi->max_speed_hz = value;
1083 
1084 		/* IRQ */
1085 		spi->irq = irq_of_parse_and_map(nc, 0);
1086 
1087 		/* Store a pointer to the node in the device structure */
1088 		of_node_get(nc);
1089 		spi->dev.of_node = nc;
1090 
1091 		/* Register the new device */
1092 		request_module("%s%s", SPI_MODULE_PREFIX, spi->modalias);
1093 		rc = spi_add_device(spi);
1094 		if (rc) {
1095 			dev_err(&master->dev, "spi_device register error %s\n",
1096 				nc->full_name);
1097 			spi_dev_put(spi);
1098 		}
1099 
1100 	}
1101 }
1102 #else
1103 static void of_register_spi_devices(struct spi_master *master) { }
1104 #endif
1105 
1106 #ifdef CONFIG_ACPI
1107 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1108 {
1109 	struct spi_device *spi = data;
1110 
1111 	if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1112 		struct acpi_resource_spi_serialbus *sb;
1113 
1114 		sb = &ares->data.spi_serial_bus;
1115 		if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1116 			spi->chip_select = sb->device_selection;
1117 			spi->max_speed_hz = sb->connection_speed;
1118 
1119 			if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1120 				spi->mode |= SPI_CPHA;
1121 			if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1122 				spi->mode |= SPI_CPOL;
1123 			if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1124 				spi->mode |= SPI_CS_HIGH;
1125 		}
1126 	} else if (spi->irq < 0) {
1127 		struct resource r;
1128 
1129 		if (acpi_dev_resource_interrupt(ares, 0, &r))
1130 			spi->irq = r.start;
1131 	}
1132 
1133 	/* Always tell the ACPI core to skip this resource */
1134 	return 1;
1135 }
1136 
1137 static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1138 				       void *data, void **return_value)
1139 {
1140 	struct spi_master *master = data;
1141 	struct list_head resource_list;
1142 	struct acpi_device *adev;
1143 	struct spi_device *spi;
1144 	int ret;
1145 
1146 	if (acpi_bus_get_device(handle, &adev))
1147 		return AE_OK;
1148 	if (acpi_bus_get_status(adev) || !adev->status.present)
1149 		return AE_OK;
1150 
1151 	spi = spi_alloc_device(master);
1152 	if (!spi) {
1153 		dev_err(&master->dev, "failed to allocate SPI device for %s\n",
1154 			dev_name(&adev->dev));
1155 		return AE_NO_MEMORY;
1156 	}
1157 
1158 	ACPI_COMPANION_SET(&spi->dev, adev);
1159 	spi->irq = -1;
1160 
1161 	INIT_LIST_HEAD(&resource_list);
1162 	ret = acpi_dev_get_resources(adev, &resource_list,
1163 				     acpi_spi_add_resource, spi);
1164 	acpi_dev_free_resource_list(&resource_list);
1165 
1166 	if (ret < 0 || !spi->max_speed_hz) {
1167 		spi_dev_put(spi);
1168 		return AE_OK;
1169 	}
1170 
1171 	adev->power.flags.ignore_parent = true;
1172 	strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
1173 	if (spi_add_device(spi)) {
1174 		adev->power.flags.ignore_parent = false;
1175 		dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
1176 			dev_name(&adev->dev));
1177 		spi_dev_put(spi);
1178 	}
1179 
1180 	return AE_OK;
1181 }
1182 
1183 static void acpi_register_spi_devices(struct spi_master *master)
1184 {
1185 	acpi_status status;
1186 	acpi_handle handle;
1187 
1188 	handle = ACPI_HANDLE(master->dev.parent);
1189 	if (!handle)
1190 		return;
1191 
1192 	status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
1193 				     acpi_spi_add_device, NULL,
1194 				     master, NULL);
1195 	if (ACPI_FAILURE(status))
1196 		dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
1197 }
1198 #else
1199 static inline void acpi_register_spi_devices(struct spi_master *master) {}
1200 #endif /* CONFIG_ACPI */
1201 
1202 static void spi_master_release(struct device *dev)
1203 {
1204 	struct spi_master *master;
1205 
1206 	master = container_of(dev, struct spi_master, dev);
1207 	kfree(master);
1208 }
1209 
1210 static struct class spi_master_class = {
1211 	.name		= "spi_master",
1212 	.owner		= THIS_MODULE,
1213 	.dev_release	= spi_master_release,
1214 };
1215 
1216 
1217 
1218 /**
1219  * spi_alloc_master - allocate SPI master controller
1220  * @dev: the controller, possibly using the platform_bus
1221  * @size: how much zeroed driver-private data to allocate; the pointer to this
1222  *	memory is in the driver_data field of the returned device,
1223  *	accessible with spi_master_get_devdata().
1224  * Context: can sleep
1225  *
1226  * This call is used only by SPI master controller drivers, which are the
1227  * only ones directly touching chip registers.  It's how they allocate
1228  * an spi_master structure, prior to calling spi_register_master().
1229  *
1230  * This must be called from context that can sleep.  It returns the SPI
1231  * master structure on success, else NULL.
1232  *
1233  * The caller is responsible for assigning the bus number and initializing
1234  * the master's methods before calling spi_register_master(); and (after errors
1235  * adding the device) calling spi_master_put() and kfree() to prevent a memory
1236  * leak.
1237  */
1238 struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
1239 {
1240 	struct spi_master	*master;
1241 
1242 	if (!dev)
1243 		return NULL;
1244 
1245 	master = kzalloc(size + sizeof(*master), GFP_KERNEL);
1246 	if (!master)
1247 		return NULL;
1248 
1249 	device_initialize(&master->dev);
1250 	master->bus_num = -1;
1251 	master->num_chipselect = 1;
1252 	master->dev.class = &spi_master_class;
1253 	master->dev.parent = get_device(dev);
1254 	spi_master_set_devdata(master, &master[1]);
1255 
1256 	return master;
1257 }
1258 EXPORT_SYMBOL_GPL(spi_alloc_master);
1259 
1260 #ifdef CONFIG_OF
1261 static int of_spi_register_master(struct spi_master *master)
1262 {
1263 	int nb, i, *cs;
1264 	struct device_node *np = master->dev.of_node;
1265 
1266 	if (!np)
1267 		return 0;
1268 
1269 	nb = of_gpio_named_count(np, "cs-gpios");
1270 	master->num_chipselect = max_t(int, nb, master->num_chipselect);
1271 
1272 	/* Return error only for an incorrectly formed cs-gpios property */
1273 	if (nb == 0 || nb == -ENOENT)
1274 		return 0;
1275 	else if (nb < 0)
1276 		return nb;
1277 
1278 	cs = devm_kzalloc(&master->dev,
1279 			  sizeof(int) * master->num_chipselect,
1280 			  GFP_KERNEL);
1281 	master->cs_gpios = cs;
1282 
1283 	if (!master->cs_gpios)
1284 		return -ENOMEM;
1285 
1286 	for (i = 0; i < master->num_chipselect; i++)
1287 		cs[i] = -ENOENT;
1288 
1289 	for (i = 0; i < nb; i++)
1290 		cs[i] = of_get_named_gpio(np, "cs-gpios", i);
1291 
1292 	return 0;
1293 }
1294 #else
1295 static int of_spi_register_master(struct spi_master *master)
1296 {
1297 	return 0;
1298 }
1299 #endif
1300 
1301 /**
1302  * spi_register_master - register SPI master controller
1303  * @master: initialized master, originally from spi_alloc_master()
1304  * Context: can sleep
1305  *
1306  * SPI master controllers connect to their drivers using some non-SPI bus,
1307  * such as the platform bus.  The final stage of probe() in that code
1308  * includes calling spi_register_master() to hook up to this SPI bus glue.
1309  *
1310  * SPI controllers use board specific (often SOC specific) bus numbers,
1311  * and board-specific addressing for SPI devices combines those numbers
1312  * with chip select numbers.  Since SPI does not directly support dynamic
1313  * device identification, boards need configuration tables telling which
1314  * chip is at which address.
1315  *
1316  * This must be called from context that can sleep.  It returns zero on
1317  * success, else a negative error code (dropping the master's refcount).
1318  * After a successful return, the caller is responsible for calling
1319  * spi_unregister_master().
1320  */
1321 int spi_register_master(struct spi_master *master)
1322 {
1323 	static atomic_t		dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
1324 	struct device		*dev = master->dev.parent;
1325 	struct boardinfo	*bi;
1326 	int			status = -ENODEV;
1327 	int			dynamic = 0;
1328 
1329 	if (!dev)
1330 		return -ENODEV;
1331 
1332 	status = of_spi_register_master(master);
1333 	if (status)
1334 		return status;
1335 
1336 	/* even if it's just one always-selected device, there must
1337 	 * be at least one chipselect
1338 	 */
1339 	if (master->num_chipselect == 0)
1340 		return -EINVAL;
1341 
1342 	if ((master->bus_num < 0) && master->dev.of_node)
1343 		master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
1344 
1345 	/* convention:  dynamically assigned bus IDs count down from the max */
1346 	if (master->bus_num < 0) {
1347 		/* FIXME switch to an IDR based scheme, something like
1348 		 * I2C now uses, so we can't run out of "dynamic" IDs
1349 		 */
1350 		master->bus_num = atomic_dec_return(&dyn_bus_id);
1351 		dynamic = 1;
1352 	}
1353 
1354 	spin_lock_init(&master->bus_lock_spinlock);
1355 	mutex_init(&master->bus_lock_mutex);
1356 	master->bus_lock_flag = 0;
1357 	init_completion(&master->xfer_completion);
1358 
1359 	/* register the device, then userspace will see it.
1360 	 * registration fails if the bus ID is in use.
1361 	 */
1362 	dev_set_name(&master->dev, "spi%u", master->bus_num);
1363 	status = device_add(&master->dev);
1364 	if (status < 0)
1365 		goto done;
1366 	dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
1367 			dynamic ? " (dynamic)" : "");
1368 
1369 	/* If we're using a queued driver, start the queue */
1370 	if (master->transfer)
1371 		dev_info(dev, "master is unqueued, this is deprecated\n");
1372 	else {
1373 		status = spi_master_initialize_queue(master);
1374 		if (status) {
1375 			device_del(&master->dev);
1376 			goto done;
1377 		}
1378 	}
1379 
1380 	mutex_lock(&board_lock);
1381 	list_add_tail(&master->list, &spi_master_list);
1382 	list_for_each_entry(bi, &board_list, list)
1383 		spi_match_master_to_boardinfo(master, &bi->board_info);
1384 	mutex_unlock(&board_lock);
1385 
1386 	/* Register devices from the device tree and ACPI */
1387 	of_register_spi_devices(master);
1388 	acpi_register_spi_devices(master);
1389 done:
1390 	return status;
1391 }
1392 EXPORT_SYMBOL_GPL(spi_register_master);
1393 
1394 static void devm_spi_unregister(struct device *dev, void *res)
1395 {
1396 	spi_unregister_master(*(struct spi_master **)res);
1397 }
1398 
1399 /**
1400  * dev_spi_register_master - register managed SPI master controller
1401  * @dev:    device managing SPI master
1402  * @master: initialized master, originally from spi_alloc_master()
1403  * Context: can sleep
1404  *
1405  * Register a SPI device as with spi_register_master() which will
1406  * automatically be unregister
1407  */
1408 int devm_spi_register_master(struct device *dev, struct spi_master *master)
1409 {
1410 	struct spi_master **ptr;
1411 	int ret;
1412 
1413 	ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
1414 	if (!ptr)
1415 		return -ENOMEM;
1416 
1417 	ret = spi_register_master(master);
1418 	if (!ret) {
1419 		*ptr = master;
1420 		devres_add(dev, ptr);
1421 	} else {
1422 		devres_free(ptr);
1423 	}
1424 
1425 	return ret;
1426 }
1427 EXPORT_SYMBOL_GPL(devm_spi_register_master);
1428 
1429 static int __unregister(struct device *dev, void *null)
1430 {
1431 	spi_unregister_device(to_spi_device(dev));
1432 	return 0;
1433 }
1434 
1435 /**
1436  * spi_unregister_master - unregister SPI master controller
1437  * @master: the master being unregistered
1438  * Context: can sleep
1439  *
1440  * This call is used only by SPI master controller drivers, which are the
1441  * only ones directly touching chip registers.
1442  *
1443  * This must be called from context that can sleep.
1444  */
1445 void spi_unregister_master(struct spi_master *master)
1446 {
1447 	int dummy;
1448 
1449 	if (master->queued) {
1450 		if (spi_destroy_queue(master))
1451 			dev_err(&master->dev, "queue remove failed\n");
1452 	}
1453 
1454 	mutex_lock(&board_lock);
1455 	list_del(&master->list);
1456 	mutex_unlock(&board_lock);
1457 
1458 	dummy = device_for_each_child(&master->dev, NULL, __unregister);
1459 	device_unregister(&master->dev);
1460 }
1461 EXPORT_SYMBOL_GPL(spi_unregister_master);
1462 
1463 int spi_master_suspend(struct spi_master *master)
1464 {
1465 	int ret;
1466 
1467 	/* Basically no-ops for non-queued masters */
1468 	if (!master->queued)
1469 		return 0;
1470 
1471 	ret = spi_stop_queue(master);
1472 	if (ret)
1473 		dev_err(&master->dev, "queue stop failed\n");
1474 
1475 	return ret;
1476 }
1477 EXPORT_SYMBOL_GPL(spi_master_suspend);
1478 
1479 int spi_master_resume(struct spi_master *master)
1480 {
1481 	int ret;
1482 
1483 	if (!master->queued)
1484 		return 0;
1485 
1486 	ret = spi_start_queue(master);
1487 	if (ret)
1488 		dev_err(&master->dev, "queue restart failed\n");
1489 
1490 	return ret;
1491 }
1492 EXPORT_SYMBOL_GPL(spi_master_resume);
1493 
1494 static int __spi_master_match(struct device *dev, const void *data)
1495 {
1496 	struct spi_master *m;
1497 	const u16 *bus_num = data;
1498 
1499 	m = container_of(dev, struct spi_master, dev);
1500 	return m->bus_num == *bus_num;
1501 }
1502 
1503 /**
1504  * spi_busnum_to_master - look up master associated with bus_num
1505  * @bus_num: the master's bus number
1506  * Context: can sleep
1507  *
1508  * This call may be used with devices that are registered after
1509  * arch init time.  It returns a refcounted pointer to the relevant
1510  * spi_master (which the caller must release), or NULL if there is
1511  * no such master registered.
1512  */
1513 struct spi_master *spi_busnum_to_master(u16 bus_num)
1514 {
1515 	struct device		*dev;
1516 	struct spi_master	*master = NULL;
1517 
1518 	dev = class_find_device(&spi_master_class, NULL, &bus_num,
1519 				__spi_master_match);
1520 	if (dev)
1521 		master = container_of(dev, struct spi_master, dev);
1522 	/* reference got in class_find_device */
1523 	return master;
1524 }
1525 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
1526 
1527 
1528 /*-------------------------------------------------------------------------*/
1529 
1530 /* Core methods for SPI master protocol drivers.  Some of the
1531  * other core methods are currently defined as inline functions.
1532  */
1533 
1534 /**
1535  * spi_setup - setup SPI mode and clock rate
1536  * @spi: the device whose settings are being modified
1537  * Context: can sleep, and no requests are queued to the device
1538  *
1539  * SPI protocol drivers may need to update the transfer mode if the
1540  * device doesn't work with its default.  They may likewise need
1541  * to update clock rates or word sizes from initial values.  This function
1542  * changes those settings, and must be called from a context that can sleep.
1543  * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
1544  * effect the next time the device is selected and data is transferred to
1545  * or from it.  When this function returns, the spi device is deselected.
1546  *
1547  * Note that this call will fail if the protocol driver specifies an option
1548  * that the underlying controller or its driver does not support.  For
1549  * example, not all hardware supports wire transfers using nine bit words,
1550  * LSB-first wire encoding, or active-high chipselects.
1551  */
1552 int spi_setup(struct spi_device *spi)
1553 {
1554 	unsigned	bad_bits;
1555 	int		status = 0;
1556 
1557 	/* check mode to prevent that DUAL and QUAD set at the same time
1558 	 */
1559 	if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
1560 		((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
1561 		dev_err(&spi->dev,
1562 		"setup: can not select dual and quad at the same time\n");
1563 		return -EINVAL;
1564 	}
1565 	/* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
1566 	 */
1567 	if ((spi->mode & SPI_3WIRE) && (spi->mode &
1568 		(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
1569 		return -EINVAL;
1570 	/* help drivers fail *cleanly* when they need options
1571 	 * that aren't supported with their current master
1572 	 */
1573 	bad_bits = spi->mode & ~spi->master->mode_bits;
1574 	if (bad_bits) {
1575 		dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
1576 			bad_bits);
1577 		return -EINVAL;
1578 	}
1579 
1580 	if (!spi->bits_per_word)
1581 		spi->bits_per_word = 8;
1582 
1583 	if (spi->master->setup)
1584 		status = spi->master->setup(spi);
1585 
1586 	dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
1587 			(int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
1588 			(spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
1589 			(spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
1590 			(spi->mode & SPI_3WIRE) ? "3wire, " : "",
1591 			(spi->mode & SPI_LOOP) ? "loopback, " : "",
1592 			spi->bits_per_word, spi->max_speed_hz,
1593 			status);
1594 
1595 	return status;
1596 }
1597 EXPORT_SYMBOL_GPL(spi_setup);
1598 
1599 static int __spi_async(struct spi_device *spi, struct spi_message *message)
1600 {
1601 	struct spi_master *master = spi->master;
1602 	struct spi_transfer *xfer;
1603 
1604 	message->spi = spi;
1605 
1606 	trace_spi_message_submit(message);
1607 
1608 	if (list_empty(&message->transfers))
1609 		return -EINVAL;
1610 	if (!message->complete)
1611 		return -EINVAL;
1612 
1613 	/* Half-duplex links include original MicroWire, and ones with
1614 	 * only one data pin like SPI_3WIRE (switches direction) or where
1615 	 * either MOSI or MISO is missing.  They can also be caused by
1616 	 * software limitations.
1617 	 */
1618 	if ((master->flags & SPI_MASTER_HALF_DUPLEX)
1619 			|| (spi->mode & SPI_3WIRE)) {
1620 		unsigned flags = master->flags;
1621 
1622 		list_for_each_entry(xfer, &message->transfers, transfer_list) {
1623 			if (xfer->rx_buf && xfer->tx_buf)
1624 				return -EINVAL;
1625 			if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
1626 				return -EINVAL;
1627 			if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
1628 				return -EINVAL;
1629 		}
1630 	}
1631 
1632 	/**
1633 	 * Set transfer bits_per_word and max speed as spi device default if
1634 	 * it is not set for this transfer.
1635 	 * Set transfer tx_nbits and rx_nbits as single transfer default
1636 	 * (SPI_NBITS_SINGLE) if it is not set for this transfer.
1637 	 */
1638 	list_for_each_entry(xfer, &message->transfers, transfer_list) {
1639 		message->frame_length += xfer->len;
1640 		if (!xfer->bits_per_word)
1641 			xfer->bits_per_word = spi->bits_per_word;
1642 		if (!xfer->speed_hz) {
1643 			xfer->speed_hz = spi->max_speed_hz;
1644 			if (master->max_speed_hz &&
1645 			    xfer->speed_hz > master->max_speed_hz)
1646 				xfer->speed_hz = master->max_speed_hz;
1647 		}
1648 
1649 		if (master->bits_per_word_mask) {
1650 			/* Only 32 bits fit in the mask */
1651 			if (xfer->bits_per_word > 32)
1652 				return -EINVAL;
1653 			if (!(master->bits_per_word_mask &
1654 					BIT(xfer->bits_per_word - 1)))
1655 				return -EINVAL;
1656 		}
1657 
1658 		if (xfer->speed_hz && master->min_speed_hz &&
1659 		    xfer->speed_hz < master->min_speed_hz)
1660 			return -EINVAL;
1661 		if (xfer->speed_hz && master->max_speed_hz &&
1662 		    xfer->speed_hz > master->max_speed_hz)
1663 			return -EINVAL;
1664 
1665 		if (xfer->tx_buf && !xfer->tx_nbits)
1666 			xfer->tx_nbits = SPI_NBITS_SINGLE;
1667 		if (xfer->rx_buf && !xfer->rx_nbits)
1668 			xfer->rx_nbits = SPI_NBITS_SINGLE;
1669 		/* check transfer tx/rx_nbits:
1670 		 * 1. keep the value is not out of single, dual and quad
1671 		 * 2. keep tx/rx_nbits is contained by mode in spi_device
1672 		 * 3. if SPI_3WIRE, tx/rx_nbits should be in single
1673 		 */
1674 		if (xfer->tx_buf) {
1675 			if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
1676 				xfer->tx_nbits != SPI_NBITS_DUAL &&
1677 				xfer->tx_nbits != SPI_NBITS_QUAD)
1678 				return -EINVAL;
1679 			if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
1680 				!(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
1681 				return -EINVAL;
1682 			if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
1683 				!(spi->mode & SPI_TX_QUAD))
1684 				return -EINVAL;
1685 			if ((spi->mode & SPI_3WIRE) &&
1686 				(xfer->tx_nbits != SPI_NBITS_SINGLE))
1687 				return -EINVAL;
1688 		}
1689 		/* check transfer rx_nbits */
1690 		if (xfer->rx_buf) {
1691 			if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
1692 				xfer->rx_nbits != SPI_NBITS_DUAL &&
1693 				xfer->rx_nbits != SPI_NBITS_QUAD)
1694 				return -EINVAL;
1695 			if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
1696 				!(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
1697 				return -EINVAL;
1698 			if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
1699 				!(spi->mode & SPI_RX_QUAD))
1700 				return -EINVAL;
1701 			if ((spi->mode & SPI_3WIRE) &&
1702 				(xfer->rx_nbits != SPI_NBITS_SINGLE))
1703 				return -EINVAL;
1704 		}
1705 	}
1706 
1707 	message->status = -EINPROGRESS;
1708 	return master->transfer(spi, message);
1709 }
1710 
1711 /**
1712  * spi_async - asynchronous SPI transfer
1713  * @spi: device with which data will be exchanged
1714  * @message: describes the data transfers, including completion callback
1715  * Context: any (irqs may be blocked, etc)
1716  *
1717  * This call may be used in_irq and other contexts which can't sleep,
1718  * as well as from task contexts which can sleep.
1719  *
1720  * The completion callback is invoked in a context which can't sleep.
1721  * Before that invocation, the value of message->status is undefined.
1722  * When the callback is issued, message->status holds either zero (to
1723  * indicate complete success) or a negative error code.  After that
1724  * callback returns, the driver which issued the transfer request may
1725  * deallocate the associated memory; it's no longer in use by any SPI
1726  * core or controller driver code.
1727  *
1728  * Note that although all messages to a spi_device are handled in
1729  * FIFO order, messages may go to different devices in other orders.
1730  * Some device might be higher priority, or have various "hard" access
1731  * time requirements, for example.
1732  *
1733  * On detection of any fault during the transfer, processing of
1734  * the entire message is aborted, and the device is deselected.
1735  * Until returning from the associated message completion callback,
1736  * no other spi_message queued to that device will be processed.
1737  * (This rule applies equally to all the synchronous transfer calls,
1738  * which are wrappers around this core asynchronous primitive.)
1739  */
1740 int spi_async(struct spi_device *spi, struct spi_message *message)
1741 {
1742 	struct spi_master *master = spi->master;
1743 	int ret;
1744 	unsigned long flags;
1745 
1746 	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
1747 
1748 	if (master->bus_lock_flag)
1749 		ret = -EBUSY;
1750 	else
1751 		ret = __spi_async(spi, message);
1752 
1753 	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
1754 
1755 	return ret;
1756 }
1757 EXPORT_SYMBOL_GPL(spi_async);
1758 
1759 /**
1760  * spi_async_locked - version of spi_async with exclusive bus usage
1761  * @spi: device with which data will be exchanged
1762  * @message: describes the data transfers, including completion callback
1763  * Context: any (irqs may be blocked, etc)
1764  *
1765  * This call may be used in_irq and other contexts which can't sleep,
1766  * as well as from task contexts which can sleep.
1767  *
1768  * The completion callback is invoked in a context which can't sleep.
1769  * Before that invocation, the value of message->status is undefined.
1770  * When the callback is issued, message->status holds either zero (to
1771  * indicate complete success) or a negative error code.  After that
1772  * callback returns, the driver which issued the transfer request may
1773  * deallocate the associated memory; it's no longer in use by any SPI
1774  * core or controller driver code.
1775  *
1776  * Note that although all messages to a spi_device are handled in
1777  * FIFO order, messages may go to different devices in other orders.
1778  * Some device might be higher priority, or have various "hard" access
1779  * time requirements, for example.
1780  *
1781  * On detection of any fault during the transfer, processing of
1782  * the entire message is aborted, and the device is deselected.
1783  * Until returning from the associated message completion callback,
1784  * no other spi_message queued to that device will be processed.
1785  * (This rule applies equally to all the synchronous transfer calls,
1786  * which are wrappers around this core asynchronous primitive.)
1787  */
1788 int spi_async_locked(struct spi_device *spi, struct spi_message *message)
1789 {
1790 	struct spi_master *master = spi->master;
1791 	int ret;
1792 	unsigned long flags;
1793 
1794 	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
1795 
1796 	ret = __spi_async(spi, message);
1797 
1798 	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
1799 
1800 	return ret;
1801 
1802 }
1803 EXPORT_SYMBOL_GPL(spi_async_locked);
1804 
1805 
1806 /*-------------------------------------------------------------------------*/
1807 
1808 /* Utility methods for SPI master protocol drivers, layered on
1809  * top of the core.  Some other utility methods are defined as
1810  * inline functions.
1811  */
1812 
1813 static void spi_complete(void *arg)
1814 {
1815 	complete(arg);
1816 }
1817 
1818 static int __spi_sync(struct spi_device *spi, struct spi_message *message,
1819 		      int bus_locked)
1820 {
1821 	DECLARE_COMPLETION_ONSTACK(done);
1822 	int status;
1823 	struct spi_master *master = spi->master;
1824 
1825 	message->complete = spi_complete;
1826 	message->context = &done;
1827 
1828 	if (!bus_locked)
1829 		mutex_lock(&master->bus_lock_mutex);
1830 
1831 	status = spi_async_locked(spi, message);
1832 
1833 	if (!bus_locked)
1834 		mutex_unlock(&master->bus_lock_mutex);
1835 
1836 	if (status == 0) {
1837 		wait_for_completion(&done);
1838 		status = message->status;
1839 	}
1840 	message->context = NULL;
1841 	return status;
1842 }
1843 
1844 /**
1845  * spi_sync - blocking/synchronous SPI data transfers
1846  * @spi: device with which data will be exchanged
1847  * @message: describes the data transfers
1848  * Context: can sleep
1849  *
1850  * This call may only be used from a context that may sleep.  The sleep
1851  * is non-interruptible, and has no timeout.  Low-overhead controller
1852  * drivers may DMA directly into and out of the message buffers.
1853  *
1854  * Note that the SPI device's chip select is active during the message,
1855  * and then is normally disabled between messages.  Drivers for some
1856  * frequently-used devices may want to minimize costs of selecting a chip,
1857  * by leaving it selected in anticipation that the next message will go
1858  * to the same chip.  (That may increase power usage.)
1859  *
1860  * Also, the caller is guaranteeing that the memory associated with the
1861  * message will not be freed before this call returns.
1862  *
1863  * It returns zero on success, else a negative error code.
1864  */
1865 int spi_sync(struct spi_device *spi, struct spi_message *message)
1866 {
1867 	return __spi_sync(spi, message, 0);
1868 }
1869 EXPORT_SYMBOL_GPL(spi_sync);
1870 
1871 /**
1872  * spi_sync_locked - version of spi_sync with exclusive bus usage
1873  * @spi: device with which data will be exchanged
1874  * @message: describes the data transfers
1875  * Context: can sleep
1876  *
1877  * This call may only be used from a context that may sleep.  The sleep
1878  * is non-interruptible, and has no timeout.  Low-overhead controller
1879  * drivers may DMA directly into and out of the message buffers.
1880  *
1881  * This call should be used by drivers that require exclusive access to the
1882  * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
1883  * be released by a spi_bus_unlock call when the exclusive access is over.
1884  *
1885  * It returns zero on success, else a negative error code.
1886  */
1887 int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
1888 {
1889 	return __spi_sync(spi, message, 1);
1890 }
1891 EXPORT_SYMBOL_GPL(spi_sync_locked);
1892 
1893 /**
1894  * spi_bus_lock - obtain a lock for exclusive SPI bus usage
1895  * @master: SPI bus master that should be locked for exclusive bus access
1896  * Context: can sleep
1897  *
1898  * This call may only be used from a context that may sleep.  The sleep
1899  * is non-interruptible, and has no timeout.
1900  *
1901  * This call should be used by drivers that require exclusive access to the
1902  * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
1903  * exclusive access is over. Data transfer must be done by spi_sync_locked
1904  * and spi_async_locked calls when the SPI bus lock is held.
1905  *
1906  * It returns zero on success, else a negative error code.
1907  */
1908 int spi_bus_lock(struct spi_master *master)
1909 {
1910 	unsigned long flags;
1911 
1912 	mutex_lock(&master->bus_lock_mutex);
1913 
1914 	spin_lock_irqsave(&master->bus_lock_spinlock, flags);
1915 	master->bus_lock_flag = 1;
1916 	spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
1917 
1918 	/* mutex remains locked until spi_bus_unlock is called */
1919 
1920 	return 0;
1921 }
1922 EXPORT_SYMBOL_GPL(spi_bus_lock);
1923 
1924 /**
1925  * spi_bus_unlock - release the lock for exclusive SPI bus usage
1926  * @master: SPI bus master that was locked for exclusive bus access
1927  * Context: can sleep
1928  *
1929  * This call may only be used from a context that may sleep.  The sleep
1930  * is non-interruptible, and has no timeout.
1931  *
1932  * This call releases an SPI bus lock previously obtained by an spi_bus_lock
1933  * call.
1934  *
1935  * It returns zero on success, else a negative error code.
1936  */
1937 int spi_bus_unlock(struct spi_master *master)
1938 {
1939 	master->bus_lock_flag = 0;
1940 
1941 	mutex_unlock(&master->bus_lock_mutex);
1942 
1943 	return 0;
1944 }
1945 EXPORT_SYMBOL_GPL(spi_bus_unlock);
1946 
1947 /* portable code must never pass more than 32 bytes */
1948 #define	SPI_BUFSIZ	max(32, SMP_CACHE_BYTES)
1949 
1950 static u8	*buf;
1951 
1952 /**
1953  * spi_write_then_read - SPI synchronous write followed by read
1954  * @spi: device with which data will be exchanged
1955  * @txbuf: data to be written (need not be dma-safe)
1956  * @n_tx: size of txbuf, in bytes
1957  * @rxbuf: buffer into which data will be read (need not be dma-safe)
1958  * @n_rx: size of rxbuf, in bytes
1959  * Context: can sleep
1960  *
1961  * This performs a half duplex MicroWire style transaction with the
1962  * device, sending txbuf and then reading rxbuf.  The return value
1963  * is zero for success, else a negative errno status code.
1964  * This call may only be used from a context that may sleep.
1965  *
1966  * Parameters to this routine are always copied using a small buffer;
1967  * portable code should never use this for more than 32 bytes.
1968  * Performance-sensitive or bulk transfer code should instead use
1969  * spi_{async,sync}() calls with dma-safe buffers.
1970  */
1971 int spi_write_then_read(struct spi_device *spi,
1972 		const void *txbuf, unsigned n_tx,
1973 		void *rxbuf, unsigned n_rx)
1974 {
1975 	static DEFINE_MUTEX(lock);
1976 
1977 	int			status;
1978 	struct spi_message	message;
1979 	struct spi_transfer	x[2];
1980 	u8			*local_buf;
1981 
1982 	/* Use preallocated DMA-safe buffer if we can.  We can't avoid
1983 	 * copying here, (as a pure convenience thing), but we can
1984 	 * keep heap costs out of the hot path unless someone else is
1985 	 * using the pre-allocated buffer or the transfer is too large.
1986 	 */
1987 	if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
1988 		local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
1989 				    GFP_KERNEL | GFP_DMA);
1990 		if (!local_buf)
1991 			return -ENOMEM;
1992 	} else {
1993 		local_buf = buf;
1994 	}
1995 
1996 	spi_message_init(&message);
1997 	memset(x, 0, sizeof(x));
1998 	if (n_tx) {
1999 		x[0].len = n_tx;
2000 		spi_message_add_tail(&x[0], &message);
2001 	}
2002 	if (n_rx) {
2003 		x[1].len = n_rx;
2004 		spi_message_add_tail(&x[1], &message);
2005 	}
2006 
2007 	memcpy(local_buf, txbuf, n_tx);
2008 	x[0].tx_buf = local_buf;
2009 	x[1].rx_buf = local_buf + n_tx;
2010 
2011 	/* do the i/o */
2012 	status = spi_sync(spi, &message);
2013 	if (status == 0)
2014 		memcpy(rxbuf, x[1].rx_buf, n_rx);
2015 
2016 	if (x[0].tx_buf == buf)
2017 		mutex_unlock(&lock);
2018 	else
2019 		kfree(local_buf);
2020 
2021 	return status;
2022 }
2023 EXPORT_SYMBOL_GPL(spi_write_then_read);
2024 
2025 /*-------------------------------------------------------------------------*/
2026 
2027 static int __init spi_init(void)
2028 {
2029 	int	status;
2030 
2031 	buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
2032 	if (!buf) {
2033 		status = -ENOMEM;
2034 		goto err0;
2035 	}
2036 
2037 	status = bus_register(&spi_bus_type);
2038 	if (status < 0)
2039 		goto err1;
2040 
2041 	status = class_register(&spi_master_class);
2042 	if (status < 0)
2043 		goto err2;
2044 	return 0;
2045 
2046 err2:
2047 	bus_unregister(&spi_bus_type);
2048 err1:
2049 	kfree(buf);
2050 	buf = NULL;
2051 err0:
2052 	return status;
2053 }
2054 
2055 /* board_info is normally registered in arch_initcall(),
2056  * but even essential drivers wait till later
2057  *
2058  * REVISIT only boardinfo really needs static linking. the rest (device and
2059  * driver registration) _could_ be dynamically linked (modular) ... costs
2060  * include needing to have boardinfo data structures be much more public.
2061  */
2062 postcore_initcall(spi_init);
2063 
2064