xref: /linux/drivers/scsi/scsi_scan.c (revision f990ad67f0febc51274adb604d5bdeab0d06d024)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * scsi_scan.c
4  *
5  * Copyright (C) 2000 Eric Youngdale,
6  * Copyright (C) 2002 Patrick Mansfield
7  *
8  * The general scanning/probing algorithm is as follows, exceptions are
9  * made to it depending on device specific flags, compilation options, and
10  * global variable (boot or module load time) settings.
11  *
12  * A specific LUN is scanned via an INQUIRY command; if the LUN has a
13  * device attached, a scsi_device is allocated and setup for it.
14  *
15  * For every id of every channel on the given host:
16  *
17  * 	Scan LUN 0; if the target responds to LUN 0 (even if there is no
18  * 	device or storage attached to LUN 0):
19  *
20  * 		If LUN 0 has a device attached, allocate and setup a
21  * 		scsi_device for it.
22  *
23  * 		If target is SCSI-3 or up, issue a REPORT LUN, and scan
24  * 		all of the LUNs returned by the REPORT LUN; else,
25  * 		sequentially scan LUNs up until some maximum is reached,
26  * 		or a LUN is seen that cannot have a device attached to it.
27  */
28 
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
31 #include <linux/init.h>
32 #include <linux/blkdev.h>
33 #include <linux/delay.h>
34 #include <linux/kthread.h>
35 #include <linux/spinlock.h>
36 #include <linux/async.h>
37 #include <linux/slab.h>
38 #include <linux/unaligned.h>
39 
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_cmnd.h>
42 #include <scsi/scsi_device.h>
43 #include <scsi/scsi_driver.h>
44 #include <scsi/scsi_devinfo.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_transport.h>
47 #include <scsi/scsi_dh.h>
48 #include <scsi/scsi_eh.h>
49 
50 #include "scsi_priv.h"
51 #include "scsi_logging.h"
52 
53 #define ALLOC_FAILURE_MSG	KERN_ERR "%s: Allocation failure during" \
54 	" SCSI scanning, some SCSI devices might not be configured\n"
55 
56 /*
57  * Default timeout
58  */
59 #define SCSI_TIMEOUT (2*HZ)
60 #define SCSI_REPORT_LUNS_TIMEOUT (30*HZ)
61 
62 /*
63  * Prefix values for the SCSI id's (stored in sysfs name field)
64  */
65 #define SCSI_UID_SER_NUM 'S'
66 #define SCSI_UID_UNKNOWN 'Z'
67 
68 /*
69  * Return values of some of the scanning functions.
70  *
71  * SCSI_SCAN_NO_RESPONSE: no valid response received from the target, this
72  * includes allocation or general failures preventing IO from being sent.
73  *
74  * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is available
75  * on the given LUN.
76  *
77  * SCSI_SCAN_LUN_PRESENT: target responded, and a device is available on a
78  * given LUN.
79  */
80 #define SCSI_SCAN_NO_RESPONSE		0
81 #define SCSI_SCAN_TARGET_PRESENT	1
82 #define SCSI_SCAN_LUN_PRESENT		2
83 
84 static const char *scsi_null_device_strs = "nullnullnullnull";
85 
86 #define MAX_SCSI_LUNS	512
87 
88 static u64 max_scsi_luns = MAX_SCSI_LUNS;
89 
90 module_param_named(max_luns, max_scsi_luns, ullong, S_IRUGO|S_IWUSR);
91 MODULE_PARM_DESC(max_luns,
92 		 "last scsi LUN (should be between 1 and 2^64-1)");
93 
94 #ifdef CONFIG_SCSI_SCAN_ASYNC
95 #define SCSI_SCAN_TYPE_DEFAULT "async"
96 #else
97 #define SCSI_SCAN_TYPE_DEFAULT "sync"
98 #endif
99 
100 static char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
101 
102 module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
103 		    S_IRUGO|S_IWUSR);
104 MODULE_PARM_DESC(scan, "sync, async, manual, or none. "
105 		 "Setting to 'manual' disables automatic scanning, but allows "
106 		 "for manual device scan via the 'scan' sysfs attribute.");
107 
108 static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
109 
110 module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
111 MODULE_PARM_DESC(inq_timeout,
112 		 "Timeout (in seconds) waiting for devices to answer INQUIRY."
113 		 " Default is 20. Some devices may need more; most need less.");
114 
115 /* This lock protects only this list */
116 static DEFINE_SPINLOCK(async_scan_lock);
117 static LIST_HEAD(scanning_hosts);
118 
119 struct async_scan_data {
120 	struct list_head list;
121 	struct Scsi_Host *shost;
122 	struct completion prev_finished;
123 };
124 
125 /*
126  * scsi_enable_async_suspend - Enable async suspend and resume
127  */
128 void scsi_enable_async_suspend(struct device *dev)
129 {
130 	/*
131 	 * If a user has disabled async probing a likely reason is due to a
132 	 * storage enclosure that does not inject staggered spin-ups. For
133 	 * safety, make resume synchronous as well in that case.
134 	 */
135 	if (strncmp(scsi_scan_type, "async", 5) != 0)
136 		return;
137 	/* Enable asynchronous suspend and resume. */
138 	device_enable_async_suspend(dev);
139 }
140 
141 /**
142  * scsi_complete_async_scans - Wait for asynchronous scans to complete
143  *
144  * When this function returns, any host which started scanning before
145  * this function was called will have finished its scan.  Hosts which
146  * started scanning after this function was called may or may not have
147  * finished.
148  */
149 int scsi_complete_async_scans(void)
150 {
151 	struct async_scan_data *data;
152 
153 	do {
154 		scoped_guard(spinlock, &async_scan_lock)
155 			if (list_empty(&scanning_hosts))
156 				return 0;
157 		/* If we can't get memory immediately, that's OK.  Just
158 		 * sleep a little.  Even if we never get memory, the async
159 		 * scans will finish eventually.
160 		 */
161 		data = kmalloc(sizeof(*data), GFP_KERNEL);
162 		if (!data)
163 			msleep(1);
164 	} while (!data);
165 
166 	data->shost = NULL;
167 	init_completion(&data->prev_finished);
168 
169 	spin_lock(&async_scan_lock);
170 	/* Check that there's still somebody else on the list */
171 	if (list_empty(&scanning_hosts))
172 		goto done;
173 	list_add_tail(&data->list, &scanning_hosts);
174 	spin_unlock(&async_scan_lock);
175 
176 	printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
177 	wait_for_completion(&data->prev_finished);
178 
179 	spin_lock(&async_scan_lock);
180 	list_del(&data->list);
181 	if (!list_empty(&scanning_hosts)) {
182 		struct async_scan_data *next = list_entry(scanning_hosts.next,
183 				struct async_scan_data, list);
184 		complete(&next->prev_finished);
185 	}
186  done:
187 	spin_unlock(&async_scan_lock);
188 
189 	kfree(data);
190 	return 0;
191 }
192 
193 /**
194  * scsi_unlock_floptical - unlock device via a special MODE SENSE command
195  * @sdev:	scsi device to send command to
196  * @result:	area to store the result of the MODE SENSE
197  *
198  * Description:
199  *     Send a vendor specific MODE SENSE (not a MODE SELECT) command.
200  *     Called for BLIST_KEY devices.
201  **/
202 static void scsi_unlock_floptical(struct scsi_device *sdev,
203 				  unsigned char *result)
204 {
205 	unsigned char scsi_cmd[MAX_COMMAND_SIZE];
206 
207 	sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n");
208 	scsi_cmd[0] = MODE_SENSE;
209 	scsi_cmd[1] = 0;
210 	scsi_cmd[2] = 0x2e;
211 	scsi_cmd[3] = 0;
212 	scsi_cmd[4] = 0x2a;     /* size */
213 	scsi_cmd[5] = 0;
214 	scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, result, 0x2a,
215 			 SCSI_TIMEOUT, 3, NULL);
216 }
217 
218 static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
219 					unsigned int depth)
220 {
221 	int new_shift = sbitmap_calculate_shift(depth);
222 	bool need_alloc = !sdev->budget_map.map;
223 	bool need_free = false;
224 	unsigned int memflags;
225 	int ret;
226 	struct sbitmap sb_backup;
227 
228 	depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev));
229 
230 	/*
231 	 * realloc if new shift is calculated, which is caused by setting
232 	 * up one new default queue depth after calling ->sdev_configure
233 	 */
234 	if (!need_alloc && new_shift != sdev->budget_map.shift)
235 		need_alloc = need_free = true;
236 
237 	if (!need_alloc)
238 		return 0;
239 
240 	/*
241 	 * Request queue has to be frozen for reallocating budget map,
242 	 * and here disk isn't added yet, so freezing is pretty fast
243 	 */
244 	if (need_free) {
245 		memflags = blk_mq_freeze_queue(sdev->request_queue);
246 		sb_backup = sdev->budget_map;
247 	}
248 	ret = sbitmap_init_node(&sdev->budget_map,
249 				scsi_device_max_queue_depth(sdev),
250 				new_shift, GFP_NOIO,
251 				sdev->request_queue->node, false, true);
252 	if (!ret)
253 		sbitmap_resize(&sdev->budget_map, depth);
254 
255 	if (need_free) {
256 		if (ret)
257 			sdev->budget_map = sb_backup;
258 		else
259 			sbitmap_free(&sb_backup);
260 		ret = 0;
261 		blk_mq_unfreeze_queue(sdev->request_queue, memflags);
262 	}
263 	return ret;
264 }
265 
266 /**
267  * scsi_alloc_sdev - allocate and setup a scsi_Device
268  * @starget: which target to allocate a &scsi_device for
269  * @lun: which lun
270  * @hostdata: usually NULL and set by ->sdev_init instead
271  *
272  * Description:
273  *     Allocate, initialize for io, and return a pointer to a scsi_Device.
274  *     Stores the @shost, @channel, @id, and @lun in the scsi_Device, and
275  *     adds scsi_Device to the appropriate list.
276  *
277  * Return value:
278  *     scsi_Device pointer, or NULL on failure.
279  **/
280 static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
281 					   u64 lun, void *hostdata)
282 {
283 	unsigned int depth;
284 	struct scsi_device *sdev;
285 	struct request_queue *q;
286 	int display_failure_msg = 1, ret;
287 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
288 	struct queue_limits lim;
289 
290 	sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
291 		       GFP_KERNEL);
292 	if (!sdev)
293 		goto out;
294 
295 	sdev->vendor = scsi_null_device_strs;
296 	sdev->model = scsi_null_device_strs;
297 	sdev->rev = scsi_null_device_strs;
298 	sdev->host = shost;
299 	sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD;
300 	sdev->id = starget->id;
301 	sdev->lun = lun;
302 	sdev->channel = starget->channel;
303 	mutex_init(&sdev->state_mutex);
304 	sdev->sdev_state = SDEV_CREATED;
305 	INIT_LIST_HEAD(&sdev->siblings);
306 	INIT_LIST_HEAD(&sdev->same_target_siblings);
307 	INIT_LIST_HEAD(&sdev->starved_entry);
308 	INIT_LIST_HEAD(&sdev->event_list);
309 	spin_lock_init(&sdev->list_lock);
310 	mutex_init(&sdev->inquiry_mutex);
311 	INIT_WORK(&sdev->event_work, scsi_evt_thread);
312 	INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue);
313 
314 	sdev->sdev_gendev.parent = get_device(&starget->dev);
315 	sdev->sdev_target = starget;
316 
317 	/* usually NULL and set by ->sdev_init instead */
318 	sdev->hostdata = hostdata;
319 
320 	/* if the device needs this changing, it may do so in the
321 	 * sdev_configure function */
322 	sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
323 
324 	/*
325 	 * Some low level driver could use device->type
326 	 */
327 	sdev->type = -1;
328 
329 	/*
330 	 * Assume that the device will have handshaking problems,
331 	 * and then fix this field later if it turns out it
332 	 * doesn't
333 	 */
334 	sdev->borken = 1;
335 
336 	sdev->sg_reserved_size = INT_MAX;
337 
338 	scsi_init_limits(shost, &lim);
339 	q = blk_mq_alloc_queue(&sdev->host->tag_set, &lim, sdev);
340 	if (IS_ERR(q)) {
341 		/* release fn is set up in scsi_sysfs_device_initialise, so
342 		 * have to free and put manually here */
343 		put_device(&starget->dev);
344 		kfree(sdev);
345 		goto out;
346 	}
347 	kref_get(&sdev->host->tagset_refcnt);
348 	sdev->request_queue = q;
349 
350 	scsi_sysfs_device_initialize(sdev);
351 
352 	if (scsi_device_is_pseudo_dev(sdev))
353 		return sdev;
354 
355 	depth = sdev->host->cmd_per_lun ?: 1;
356 
357 	/*
358 	 * Use .can_queue as budget map's depth because we have to
359 	 * support adjusting queue depth from sysfs. Meantime use
360 	 * default device queue depth to figure out sbitmap shift
361 	 * since we use this queue depth most of times.
362 	 */
363 	if (scsi_realloc_sdev_budget_map(sdev, depth)) {
364 		kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags);
365 		put_device(&starget->dev);
366 		kfree(sdev);
367 		goto out;
368 	}
369 
370 	scsi_change_queue_depth(sdev, depth);
371 
372 	if (shost->hostt->sdev_init) {
373 		ret = shost->hostt->sdev_init(sdev);
374 		if (ret) {
375 			/*
376 			 * if LLDD reports slave not present, don't clutter
377 			 * console with alloc failure messages
378 			 */
379 			if (ret == -ENXIO)
380 				display_failure_msg = 0;
381 			goto out_device_destroy;
382 		}
383 	}
384 
385 	return sdev;
386 
387 out_device_destroy:
388 	__scsi_remove_device(sdev);
389 out:
390 	if (display_failure_msg)
391 		printk(ALLOC_FAILURE_MSG, __func__);
392 	return NULL;
393 }
394 
395 static void scsi_target_destroy(struct scsi_target *starget)
396 {
397 	struct device *dev = &starget->dev;
398 	struct Scsi_Host *shost = dev_to_shost(dev->parent);
399 	unsigned long flags;
400 
401 	BUG_ON(starget->state == STARGET_DEL);
402 	starget->state = STARGET_DEL;
403 	transport_destroy_device(dev);
404 	spin_lock_irqsave(shost->host_lock, flags);
405 	if (shost->hostt->target_destroy)
406 		shost->hostt->target_destroy(starget);
407 	list_del_init(&starget->siblings);
408 	spin_unlock_irqrestore(shost->host_lock, flags);
409 	put_device(dev);
410 }
411 
412 static void scsi_target_dev_release(struct device *dev)
413 {
414 	struct device *parent = dev->parent;
415 	struct scsi_target *starget = to_scsi_target(dev);
416 
417 	kfree(starget);
418 	put_device(parent);
419 }
420 
421 static const struct device_type scsi_target_type = {
422 	.name =		"scsi_target",
423 	.release =	scsi_target_dev_release,
424 };
425 
426 int scsi_is_target_device(const struct device *dev)
427 {
428 	return dev->type == &scsi_target_type;
429 }
430 EXPORT_SYMBOL(scsi_is_target_device);
431 
432 static struct scsi_target *__scsi_find_target(struct device *parent,
433 					      int channel, uint id)
434 {
435 	struct scsi_target *starget, *found_starget = NULL;
436 	struct Scsi_Host *shost = dev_to_shost(parent);
437 	/*
438 	 * Search for an existing target for this sdev.
439 	 */
440 	list_for_each_entry(starget, &shost->__targets, siblings) {
441 		if (starget->id == id &&
442 		    starget->channel == channel) {
443 			found_starget = starget;
444 			break;
445 		}
446 	}
447 	if (found_starget)
448 		get_device(&found_starget->dev);
449 
450 	return found_starget;
451 }
452 
453 /**
454  * scsi_target_reap_ref_release - remove target from visibility
455  * @kref: the reap_ref in the target being released
456  *
457  * Called on last put of reap_ref, which is the indication that no device
458  * under this target is visible anymore, so render the target invisible in
459  * sysfs.  Note: we have to be in user context here because the target reaps
460  * should be done in places where the scsi device visibility is being removed.
461  */
462 static void scsi_target_reap_ref_release(struct kref *kref)
463 {
464 	struct scsi_target *starget
465 		= container_of(kref, struct scsi_target, reap_ref);
466 
467 	/*
468 	 * if we get here and the target is still in a CREATED state that
469 	 * means it was allocated but never made visible (because a scan
470 	 * turned up no LUNs), so don't call device_del() on it.
471 	 */
472 	if ((starget->state != STARGET_CREATED) &&
473 	    (starget->state != STARGET_CREATED_REMOVE)) {
474 		transport_remove_device(&starget->dev);
475 		device_del(&starget->dev);
476 	}
477 	scsi_target_destroy(starget);
478 }
479 
480 static void scsi_target_reap_ref_put(struct scsi_target *starget)
481 {
482 	kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
483 }
484 
485 /**
486  * scsi_alloc_target - allocate a new or find an existing target
487  * @parent:	parent of the target (need not be a scsi host)
488  * @channel:	target channel number (zero if no channels)
489  * @id:		target id number
490  *
491  * Return an existing target if one exists, provided it hasn't already
492  * gone into STARGET_DEL state, otherwise allocate a new target.
493  *
494  * The target is returned with an incremented reference, so the caller
495  * is responsible for both reaping and doing a last put
496  */
497 static struct scsi_target *scsi_alloc_target(struct device *parent,
498 					     int channel, uint id)
499 {
500 	struct Scsi_Host *shost = dev_to_shost(parent);
501 	struct device *dev = NULL;
502 	unsigned long flags;
503 	const int size = sizeof(struct scsi_target)
504 		+ shost->transportt->target_size;
505 	struct scsi_target *starget;
506 	struct scsi_target *found_target;
507 	int error, ref_got;
508 
509 	starget = kzalloc(size, GFP_KERNEL);
510 	if (!starget) {
511 		printk(KERN_ERR "%s: allocation failure\n", __func__);
512 		return NULL;
513 	}
514 	dev = &starget->dev;
515 	device_initialize(dev);
516 	kref_init(&starget->reap_ref);
517 	dev->parent = get_device(parent);
518 	dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
519 	dev->bus = &scsi_bus_type;
520 	dev->type = &scsi_target_type;
521 	scsi_enable_async_suspend(dev);
522 	starget->id = id;
523 	starget->channel = channel;
524 	starget->can_queue = 0;
525 	INIT_LIST_HEAD(&starget->siblings);
526 	INIT_LIST_HEAD(&starget->devices);
527 	starget->state = STARGET_CREATED;
528 	starget->scsi_level = SCSI_2;
529 	starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
530  retry:
531 	spin_lock_irqsave(shost->host_lock, flags);
532 
533 	found_target = __scsi_find_target(parent, channel, id);
534 	if (found_target)
535 		goto found;
536 
537 	list_add_tail(&starget->siblings, &shost->__targets);
538 	spin_unlock_irqrestore(shost->host_lock, flags);
539 	/* allocate and add */
540 	transport_setup_device(dev);
541 	if (shost->hostt->target_alloc) {
542 		error = shost->hostt->target_alloc(starget);
543 
544 		if(error) {
545 			if (error != -ENXIO)
546 				dev_err(dev, "target allocation failed, error %d\n", error);
547 			/* don't want scsi_target_reap to do the final
548 			 * put because it will be under the host lock */
549 			scsi_target_destroy(starget);
550 			return NULL;
551 		}
552 	}
553 	get_device(dev);
554 
555 	return starget;
556 
557  found:
558 	/*
559 	 * release routine already fired if kref is zero, so if we can still
560 	 * take the reference, the target must be alive.  If we can't, it must
561 	 * be dying and we need to wait for a new target
562 	 */
563 	ref_got = kref_get_unless_zero(&found_target->reap_ref);
564 
565 	spin_unlock_irqrestore(shost->host_lock, flags);
566 	if (ref_got) {
567 		put_device(dev);
568 		return found_target;
569 	}
570 	/*
571 	 * Unfortunately, we found a dying target; need to wait until it's
572 	 * dead before we can get a new one.  There is an anomaly here.  We
573 	 * *should* call scsi_target_reap() to balance the kref_get() of the
574 	 * reap_ref above.  However, since the target being released, it's
575 	 * already invisible and the reap_ref is irrelevant.  If we call
576 	 * scsi_target_reap() we might spuriously do another device_del() on
577 	 * an already invisible target.
578 	 */
579 	put_device(&found_target->dev);
580 	/*
581 	 * length of time is irrelevant here, we just want to yield the CPU
582 	 * for a tick to avoid busy waiting for the target to die.
583 	 */
584 	msleep(1);
585 	goto retry;
586 }
587 
588 /**
589  * scsi_target_reap - check to see if target is in use and destroy if not
590  * @starget: target to be checked
591  *
592  * This is used after removing a LUN or doing a last put of the target
593  * it checks atomically that nothing is using the target and removes
594  * it if so.
595  */
596 void scsi_target_reap(struct scsi_target *starget)
597 {
598 	/*
599 	 * serious problem if this triggers: STARGET_DEL is only set in the if
600 	 * the reap_ref drops to zero, so we're trying to do another final put
601 	 * on an already released kref
602 	 */
603 	BUG_ON(starget->state == STARGET_DEL);
604 	scsi_target_reap_ref_put(starget);
605 }
606 
607 /**
608  * scsi_sanitize_inquiry_string - remove non-graphical chars from an
609  *                                INQUIRY result string
610  * @s: INQUIRY result string to sanitize
611  * @len: length of the string
612  *
613  * Description:
614  *	The SCSI spec says that INQUIRY vendor, product, and revision
615  *	strings must consist entirely of graphic ASCII characters,
616  *	padded on the right with spaces.  Since not all devices obey
617  *	this rule, we will replace non-graphic or non-ASCII characters
618  *	with spaces.  Exception: a NUL character is interpreted as a
619  *	string terminator, so all the following characters are set to
620  *	spaces.
621  **/
622 void scsi_sanitize_inquiry_string(unsigned char *s, int len)
623 {
624 	int terminated = 0;
625 
626 	for (; len > 0; (--len, ++s)) {
627 		if (*s == 0)
628 			terminated = 1;
629 		if (terminated || *s < 0x20 || *s > 0x7e)
630 			*s = ' ';
631 	}
632 }
633 EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
634 
635 
636 /**
637  * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
638  * @sdev:	scsi_device to probe
639  * @inq_result:	area to store the INQUIRY result
640  * @result_len: len of inq_result
641  * @bflags:	store any bflags found here
642  *
643  * Description:
644  *     Probe the lun associated with @req using a standard SCSI INQUIRY;
645  *
646  *     If the INQUIRY is successful, zero is returned and the
647  *     INQUIRY data is in @inq_result; the scsi_level and INQUIRY length
648  *     are copied to the scsi_device any flags value is stored in *@bflags.
649  **/
650 static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
651 			  int result_len, blist_flags_t *bflags)
652 {
653 	unsigned char scsi_cmd[MAX_COMMAND_SIZE];
654 	int first_inquiry_len, try_inquiry_len, next_inquiry_len;
655 	int response_len = 0;
656 	int pass, count, result, resid;
657 	struct scsi_failure failure_defs[] = {
658 		/*
659 		 * not-ready to ready transition [asc/ascq=0x28/0x0] or
660 		 * power-on, reset [asc/ascq=0x29/0x0], continue. INQUIRY
661 		 * should not yield UNIT_ATTENTION but many buggy devices do
662 		 * so anyway.
663 		 */
664 		{
665 			.sense = UNIT_ATTENTION,
666 			.asc = 0x28,
667 			.result = SAM_STAT_CHECK_CONDITION,
668 		},
669 		{
670 			.sense = UNIT_ATTENTION,
671 			.asc = 0x29,
672 			.result = SAM_STAT_CHECK_CONDITION,
673 		},
674 		{
675 			.allowed = 1,
676 			.result = DID_TIME_OUT << 16,
677 		},
678 		{}
679 	};
680 	struct scsi_failures failures = {
681 		.total_allowed = 3,
682 		.failure_definitions = failure_defs,
683 	};
684 	const struct scsi_exec_args exec_args = {
685 		.resid = &resid,
686 		.failures = &failures,
687 	};
688 
689 	*bflags = 0;
690 
691 	/* Perform up to 3 passes.  The first pass uses a conservative
692 	 * transfer length of 36 unless sdev->inquiry_len specifies a
693 	 * different value. */
694 	first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
695 	try_inquiry_len = first_inquiry_len;
696 	pass = 1;
697 
698  next_pass:
699 	SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
700 				"scsi scan: INQUIRY pass %d length %d\n",
701 				pass, try_inquiry_len));
702 
703 	/* Each pass gets up to three chances to ignore Unit Attention */
704 	scsi_failures_reset_retries(&failures);
705 
706 	for (count = 0; count < 3; ++count) {
707 		memset(scsi_cmd, 0, 6);
708 		scsi_cmd[0] = INQUIRY;
709 		scsi_cmd[4] = (unsigned char) try_inquiry_len;
710 
711 		memset(inq_result, 0, try_inquiry_len);
712 
713 		result = scsi_execute_cmd(sdev,  scsi_cmd, REQ_OP_DRV_IN,
714 					  inq_result, try_inquiry_len,
715 					  HZ / 2 + HZ * scsi_inq_timeout, 3,
716 					  &exec_args);
717 
718 		SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
719 				"scsi scan: INQUIRY %s with code 0x%x\n",
720 				result ? "failed" : "successful", result));
721 
722 		if (result == 0) {
723 			/*
724 			 * if nothing was transferred, we try
725 			 * again. It's a workaround for some USB
726 			 * devices.
727 			 */
728 			if (resid == try_inquiry_len)
729 				continue;
730 		}
731 		break;
732 	}
733 
734 	if (result == 0) {
735 		scsi_sanitize_inquiry_string(&inq_result[8], 8);
736 		scsi_sanitize_inquiry_string(&inq_result[16], 16);
737 		scsi_sanitize_inquiry_string(&inq_result[32], 4);
738 
739 		response_len = inq_result[4] + 5;
740 		if (response_len > 255)
741 			response_len = first_inquiry_len;	/* sanity */
742 
743 		/*
744 		 * Get any flags for this device.
745 		 *
746 		 * XXX add a bflags to scsi_device, and replace the
747 		 * corresponding bit fields in scsi_device, so bflags
748 		 * need not be passed as an argument.
749 		 */
750 		*bflags = scsi_get_device_flags(sdev, &inq_result[8],
751 				&inq_result[16]);
752 
753 		/* When the first pass succeeds we gain information about
754 		 * what larger transfer lengths might work. */
755 		if (pass == 1) {
756 			if (BLIST_INQUIRY_36 & *bflags)
757 				next_inquiry_len = 36;
758 			/*
759 			 * LLD specified a maximum sdev->inquiry_len
760 			 * but device claims it has more data. Capping
761 			 * the length only makes sense for legacy
762 			 * devices. If a device supports SPC-4 (2014)
763 			 * or newer, assume that it is safe to ask for
764 			 * as much as the device says it supports.
765 			 */
766 			else if (sdev->inquiry_len &&
767 				 response_len > sdev->inquiry_len &&
768 				 (inq_result[2] & 0x7) < 6) /* SPC-4 */
769 				next_inquiry_len = sdev->inquiry_len;
770 			else
771 				next_inquiry_len = response_len;
772 
773 			/* If more data is available perform the second pass */
774 			if (next_inquiry_len > try_inquiry_len) {
775 				try_inquiry_len = next_inquiry_len;
776 				pass = 2;
777 				goto next_pass;
778 			}
779 		}
780 
781 	} else if (pass == 2) {
782 		sdev_printk(KERN_INFO, sdev,
783 			    "scsi scan: %d byte inquiry failed.  "
784 			    "Consider BLIST_INQUIRY_36 for this device\n",
785 			    try_inquiry_len);
786 
787 		/* If this pass failed, the third pass goes back and transfers
788 		 * the same amount as we successfully got in the first pass. */
789 		try_inquiry_len = first_inquiry_len;
790 		pass = 3;
791 		goto next_pass;
792 	}
793 
794 	/* If the last transfer attempt got an error, assume the
795 	 * peripheral doesn't exist or is dead. */
796 	if (result)
797 		return -EIO;
798 
799 	/* Don't report any more data than the device says is valid */
800 	sdev->inquiry_len = min(try_inquiry_len, response_len);
801 
802 	/*
803 	 * XXX Abort if the response length is less than 36? If less than
804 	 * 32, the lookup of the device flags (above) could be invalid,
805 	 * and it would be possible to take an incorrect action - we do
806 	 * not want to hang because of a short INQUIRY. On the flip side,
807 	 * if the device is spun down or becoming ready (and so it gives a
808 	 * short INQUIRY), an abort here prevents any further use of the
809 	 * device, including spin up.
810 	 *
811 	 * On the whole, the best approach seems to be to assume the first
812 	 * 36 bytes are valid no matter what the device says.  That's
813 	 * better than copying < 36 bytes to the inquiry-result buffer
814 	 * and displaying garbage for the Vendor, Product, or Revision
815 	 * strings.
816 	 */
817 	if (sdev->inquiry_len < 36) {
818 		if (!sdev->host->short_inquiry) {
819 			shost_printk(KERN_INFO, sdev->host,
820 				    "scsi scan: INQUIRY result too short (%d),"
821 				    " using 36\n", sdev->inquiry_len);
822 			sdev->host->short_inquiry = 1;
823 		}
824 		sdev->inquiry_len = 36;
825 	}
826 
827 	/*
828 	 * Related to the above issue:
829 	 *
830 	 * XXX Devices (disk or all?) should be sent a TEST UNIT READY,
831 	 * and if not ready, sent a START_STOP to start (maybe spin up) and
832 	 * then send the INQUIRY again, since the INQUIRY can change after
833 	 * a device is initialized.
834 	 *
835 	 * Ideally, start a device if explicitly asked to do so.  This
836 	 * assumes that a device is spun up on power on, spun down on
837 	 * request, and then spun up on request.
838 	 */
839 
840 	/*
841 	 * The scanning code needs to know the scsi_level, even if no
842 	 * device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so
843 	 * non-zero LUNs can be scanned.
844 	 */
845 	sdev->scsi_level = inq_result[2] & 0x0f;
846 	if (sdev->scsi_level >= 2 ||
847 	    (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
848 		sdev->scsi_level++;
849 	sdev->sdev_target->scsi_level = sdev->scsi_level;
850 
851 	/*
852 	 * If SCSI-2 or lower, and if the transport requires it,
853 	 * store the LUN value in CDB[1].
854 	 */
855 	sdev->lun_in_cdb = 0;
856 	if (sdev->scsi_level <= SCSI_2 &&
857 	    sdev->scsi_level != SCSI_UNKNOWN &&
858 	    !sdev->host->no_scsi2_lun_in_cdb)
859 		sdev->lun_in_cdb = 1;
860 
861 	return 0;
862 }
863 
864 /**
865  * scsi_add_lun - allocate and fully initialze a scsi_device
866  * @sdev:	holds information to be stored in the new scsi_device
867  * @inq_result:	holds the result of a previous INQUIRY to the LUN
868  * @bflags:	black/white list flag
869  * @async:	1 if this device is being scanned asynchronously
870  *
871  * Description:
872  *     Initialize the scsi_device @sdev.  Optionally set fields based
873  *     on values in *@bflags.
874  *
875  * Return:
876  *     SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
877  *     SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
878  **/
879 static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
880 		blist_flags_t *bflags, int async)
881 {
882 	const struct scsi_host_template *hostt = sdev->host->hostt;
883 	struct queue_limits lim;
884 	int ret;
885 
886 	/*
887 	 * XXX do not save the inquiry, since it can change underneath us,
888 	 * save just vendor/model/rev.
889 	 *
890 	 * Rather than save it and have an ioctl that retrieves the saved
891 	 * value, have an ioctl that executes the same INQUIRY code used
892 	 * in scsi_probe_lun, let user level programs doing INQUIRY
893 	 * scanning run at their own risk, or supply a user level program
894 	 * that can correctly scan.
895 	 */
896 
897 	/*
898 	 * Copy at least 36 bytes of INQUIRY data, so that we don't
899 	 * dereference unallocated memory when accessing the Vendor,
900 	 * Product, and Revision strings.  Badly behaved devices may set
901 	 * the INQUIRY Additional Length byte to a small value, indicating
902 	 * these strings are invalid, but often they contain plausible data
903 	 * nonetheless.  It doesn't matter if the device sent < 36 bytes
904 	 * total, since scsi_probe_lun() initializes inq_result with 0s.
905 	 */
906 	sdev->inquiry = kmemdup(inq_result,
907 				max_t(size_t, sdev->inquiry_len, 36),
908 				GFP_KERNEL);
909 	if (sdev->inquiry == NULL)
910 		return SCSI_SCAN_NO_RESPONSE;
911 
912 	sdev->vendor = (char *) (sdev->inquiry + 8);
913 	sdev->model = (char *) (sdev->inquiry + 16);
914 	sdev->rev = (char *) (sdev->inquiry + 32);
915 
916 	sdev->is_ata = strncmp(sdev->vendor, "ATA     ", 8) == 0;
917 	if (sdev->is_ata) {
918 		/*
919 		 * sata emulation layer device.  This is a hack to work around
920 		 * the SATL power management specifications which state that
921 		 * when the SATL detects the device has gone into standby
922 		 * mode, it shall respond with NOT READY.
923 		 */
924 		sdev->allow_restart = 1;
925 	}
926 
927 	if (*bflags & BLIST_ISROM) {
928 		sdev->type = TYPE_ROM;
929 		sdev->removable = 1;
930 	} else {
931 		sdev->type = (inq_result[0] & 0x1f);
932 		sdev->removable = (inq_result[1] & 0x80) >> 7;
933 
934 		/*
935 		 * some devices may respond with wrong type for
936 		 * well-known logical units. Force well-known type
937 		 * to enumerate them correctly.
938 		 */
939 		if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) {
940 			sdev_printk(KERN_WARNING, sdev,
941 				"%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n",
942 				__func__, sdev->type, (unsigned int)sdev->lun);
943 			sdev->type = TYPE_WLUN;
944 		}
945 
946 	}
947 
948 	if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
949 		/* RBC and MMC devices can return SCSI-3 compliance and yet
950 		 * still not support REPORT LUNS, so make them act as
951 		 * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is
952 		 * specifically set */
953 		if ((*bflags & BLIST_REPORTLUN2) == 0)
954 			*bflags |= BLIST_NOREPORTLUN;
955 	}
956 
957 	/*
958 	 * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI
959 	 * spec says: The device server is capable of supporting the
960 	 * specified peripheral device type on this logical unit. However,
961 	 * the physical device is not currently connected to this logical
962 	 * unit.
963 	 *
964 	 * The above is vague, as it implies that we could treat 001 and
965 	 * 011 the same. Stay compatible with previous code, and create a
966 	 * scsi_device for a PQ of 1
967 	 *
968 	 * Don't set the device offline here; rather let the upper
969 	 * level drivers eval the PQ to decide whether they should
970 	 * attach. So remove ((inq_result[0] >> 5) & 7) == 1 check.
971 	 */
972 
973 	sdev->inq_periph_qual = (inq_result[0] >> 5) & 7;
974 	sdev->lockable = sdev->removable;
975 	sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2);
976 
977 	if (sdev->scsi_level >= SCSI_3 ||
978 			(sdev->inquiry_len > 56 && inq_result[56] & 0x04))
979 		sdev->ppr = 1;
980 	if (inq_result[7] & 0x60)
981 		sdev->wdtr = 1;
982 	if (inq_result[7] & 0x10)
983 		sdev->sdtr = 1;
984 
985 	sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d "
986 			"ANSI: %d%s\n", scsi_device_type(sdev->type),
987 			sdev->vendor, sdev->model, sdev->rev,
988 			sdev->inq_periph_qual, inq_result[2] & 0x07,
989 			(inq_result[3] & 0x0f) == 1 ? " CCS" : "");
990 
991 	if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) &&
992 	    !(*bflags & BLIST_NOTQ)) {
993 		sdev->tagged_supported = 1;
994 		sdev->simple_tags = 1;
995 	}
996 
997 	/*
998 	 * Some devices (Texel CD ROM drives) have handshaking problems
999 	 * when used with the Seagate controllers. borken is initialized
1000 	 * to 1, and then set it to 0 here.
1001 	 */
1002 	if ((*bflags & BLIST_BORKEN) == 0)
1003 		sdev->borken = 0;
1004 
1005 	if (*bflags & BLIST_NO_ULD_ATTACH)
1006 		sdev->no_uld_attach = 1;
1007 
1008 	/*
1009 	 * Apparently some really broken devices (contrary to the SCSI
1010 	 * standards) need to be selected without asserting ATN
1011 	 */
1012 	if (*bflags & BLIST_SELECT_NO_ATN)
1013 		sdev->select_no_atn = 1;
1014 
1015 	/*
1016 	 * Some devices may not want to have a start command automatically
1017 	 * issued when a device is added.
1018 	 */
1019 	if (*bflags & BLIST_NOSTARTONADD)
1020 		sdev->no_start_on_add = 1;
1021 
1022 	if (*bflags & BLIST_SINGLELUN)
1023 		scsi_target(sdev)->single_lun = 1;
1024 
1025 	sdev->use_10_for_rw = 1;
1026 
1027 	/* some devices don't like REPORT SUPPORTED OPERATION CODES
1028 	 * and will simply timeout causing sd_mod init to take a very
1029 	 * very long time */
1030 	if (*bflags & BLIST_NO_RSOC)
1031 		sdev->no_report_opcodes = 1;
1032 
1033 	/* set the device running here so that slave configure
1034 	 * may do I/O */
1035 	mutex_lock(&sdev->state_mutex);
1036 	ret = scsi_device_set_state(sdev, SDEV_RUNNING);
1037 	if (ret)
1038 		ret = scsi_device_set_state(sdev, SDEV_BLOCK);
1039 	mutex_unlock(&sdev->state_mutex);
1040 
1041 	if (ret) {
1042 		sdev_printk(KERN_ERR, sdev,
1043 			    "in wrong state %s to complete scan\n",
1044 			    scsi_device_state_name(sdev->sdev_state));
1045 		return SCSI_SCAN_NO_RESPONSE;
1046 	}
1047 
1048 	if (*bflags & BLIST_NOT_LOCKABLE)
1049 		sdev->lockable = 0;
1050 
1051 	if (*bflags & BLIST_RETRY_HWERROR)
1052 		sdev->retry_hwerror = 1;
1053 
1054 	if (*bflags & BLIST_NO_DIF)
1055 		sdev->no_dif = 1;
1056 
1057 	if (*bflags & BLIST_UNMAP_LIMIT_WS)
1058 		sdev->unmap_limit_for_ws = 1;
1059 
1060 	if (*bflags & BLIST_IGN_MEDIA_CHANGE)
1061 		sdev->ignore_media_change = 1;
1062 
1063 	sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
1064 
1065 	if (*bflags & BLIST_TRY_VPD_PAGES)
1066 		sdev->try_vpd_pages = 1;
1067 	else if (*bflags & BLIST_SKIP_VPD_PAGES)
1068 		sdev->skip_vpd_pages = 1;
1069 
1070 	if (*bflags & BLIST_NO_VPD_SIZE)
1071 		sdev->no_vpd_size = 1;
1072 
1073 	transport_configure_device(&sdev->sdev_gendev);
1074 
1075 	sdev->sdev_bflags = *bflags;
1076 
1077 	if (scsi_device_is_pseudo_dev(sdev))
1078 		return SCSI_SCAN_LUN_PRESENT;
1079 
1080 	/*
1081 	 * No need to freeze the queue as it isn't reachable to anyone else yet.
1082 	 */
1083 	lim = queue_limits_start_update(sdev->request_queue);
1084 	if (*bflags & BLIST_MAX_512)
1085 		lim.max_hw_sectors = 512;
1086 	else if (*bflags & BLIST_MAX_1024)
1087 		lim.max_hw_sectors = 1024;
1088 
1089 	if (hostt->sdev_configure)
1090 		ret = hostt->sdev_configure(sdev, &lim);
1091 	if (ret) {
1092 		queue_limits_cancel_update(sdev->request_queue);
1093 		/*
1094 		 * If the LLDD reports device not present, don't clutter the
1095 		 * console with failure messages.
1096 		 */
1097 		if (ret != -ENXIO)
1098 			sdev_printk(KERN_ERR, sdev,
1099 				"failed to configure device\n");
1100 		return SCSI_SCAN_NO_RESPONSE;
1101 	}
1102 
1103 	ret = queue_limits_commit_update(sdev->request_queue, &lim);
1104 	if (ret) {
1105 		sdev_printk(KERN_ERR, sdev, "failed to apply queue limits.\n");
1106 		return SCSI_SCAN_NO_RESPONSE;
1107 	}
1108 
1109 	/*
1110 	 * The queue_depth is often changed in ->sdev_configure.
1111 	 *
1112 	 * Set up budget map again since memory consumption of the map depends
1113 	 * on actual queue depth.
1114 	 */
1115 	if (hostt->sdev_configure)
1116 		scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
1117 
1118 	if (sdev->scsi_level >= SCSI_3)
1119 		scsi_attach_vpd(sdev);
1120 
1121 	scsi_cdl_check(sdev);
1122 
1123 	sdev->max_queue_depth = sdev->queue_depth;
1124 	WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth);
1125 
1126 	/*
1127 	 * Ok, the device is now all set up, we can
1128 	 * register it and tell the rest of the kernel
1129 	 * about it.
1130 	 */
1131 	if (!async && scsi_sysfs_add_sdev(sdev) != 0)
1132 		return SCSI_SCAN_NO_RESPONSE;
1133 
1134 	return SCSI_SCAN_LUN_PRESENT;
1135 }
1136 
1137 #ifdef CONFIG_SCSI_LOGGING
1138 /**
1139  * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace
1140  * @buf:   Output buffer with at least end-first+1 bytes of space
1141  * @inq:   Inquiry buffer (input)
1142  * @first: Offset of string into inq
1143  * @end:   Index after last character in inq
1144  */
1145 static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
1146 				   unsigned first, unsigned end)
1147 {
1148 	unsigned term = 0, idx;
1149 
1150 	for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) {
1151 		if (inq[idx+first] > ' ') {
1152 			buf[idx] = inq[idx+first];
1153 			term = idx+1;
1154 		} else {
1155 			buf[idx] = ' ';
1156 		}
1157 	}
1158 	buf[term] = 0;
1159 	return buf;
1160 }
1161 #endif
1162 
1163 /**
1164  * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it
1165  * @starget:	pointer to target device structure
1166  * @lun:	LUN of target device
1167  * @bflagsp:	store bflags here if not NULL
1168  * @sdevp:	probe the LUN corresponding to this scsi_device
1169  * @rescan:     if not equal to SCSI_SCAN_INITIAL skip some code only
1170  *              needed on first scan
1171  * @hostdata:	passed to scsi_alloc_sdev()
1172  *
1173  * Description:
1174  *     Call scsi_probe_lun, if a LUN with an attached device is found,
1175  *     allocate and set it up by calling scsi_add_lun.
1176  *
1177  * Return:
1178  *
1179  *   - SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
1180  *   - SCSI_SCAN_TARGET_PRESENT: target responded, but no device is
1181  *         attached at the LUN
1182  *   - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
1183  **/
1184 static int scsi_probe_and_add_lun(struct scsi_target *starget,
1185 				  u64 lun, blist_flags_t *bflagsp,
1186 				  struct scsi_device **sdevp,
1187 				  enum scsi_scan_mode rescan,
1188 				  void *hostdata)
1189 {
1190 	struct scsi_device *sdev;
1191 	unsigned char *result;
1192 	blist_flags_t bflags;
1193 	int res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
1194 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1195 
1196 	/*
1197 	 * The rescan flag is used as an optimization, the first scan of a
1198 	 * host adapter calls into here with rescan == 0.
1199 	 */
1200 	sdev = scsi_device_lookup_by_target(starget, lun);
1201 	if (sdev) {
1202 		if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) {
1203 			SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1204 				"scsi scan: device exists on %s\n",
1205 				dev_name(&sdev->sdev_gendev)));
1206 			if (sdevp)
1207 				*sdevp = sdev;
1208 			else
1209 				scsi_device_put(sdev);
1210 
1211 			if (bflagsp)
1212 				*bflagsp = scsi_get_device_flags(sdev,
1213 								 sdev->vendor,
1214 								 sdev->model);
1215 			return SCSI_SCAN_LUN_PRESENT;
1216 		}
1217 		scsi_device_put(sdev);
1218 	} else
1219 		sdev = scsi_alloc_sdev(starget, lun, hostdata);
1220 	if (!sdev)
1221 		goto out;
1222 
1223 	if (scsi_device_is_pseudo_dev(sdev)) {
1224 		if (bflagsp)
1225 			*bflagsp = BLIST_NOLUN;
1226 		return SCSI_SCAN_LUN_PRESENT;
1227 	}
1228 
1229 	result = kmalloc(result_len, GFP_KERNEL);
1230 	if (!result)
1231 		goto out_free_sdev;
1232 
1233 	if (scsi_probe_lun(sdev, result, result_len, &bflags))
1234 		goto out_free_result;
1235 
1236 	if (bflagsp)
1237 		*bflagsp = bflags;
1238 	/*
1239 	 * result contains valid SCSI INQUIRY data.
1240 	 */
1241 	if ((result[0] >> 5) == 3) {
1242 		/*
1243 		 * For a Peripheral qualifier 3 (011b), the SCSI
1244 		 * spec says: The device server is not capable of
1245 		 * supporting a physical device on this logical
1246 		 * unit.
1247 		 *
1248 		 * For disks, this implies that there is no
1249 		 * logical disk configured at sdev->lun, but there
1250 		 * is a target id responding.
1251 		 */
1252 		SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:"
1253 				   " peripheral qualifier of 3, device not"
1254 				   " added\n"))
1255 		if (lun == 0) {
1256 			SCSI_LOG_SCAN_BUS(1, {
1257 				unsigned char vend[9];
1258 				unsigned char mod[17];
1259 
1260 				sdev_printk(KERN_INFO, sdev,
1261 					"scsi scan: consider passing scsi_mod."
1262 					"dev_flags=%s:%s:0x240 or 0x1000240\n",
1263 					scsi_inq_str(vend, result, 8, 16),
1264 					scsi_inq_str(mod, result, 16, 32));
1265 			});
1266 
1267 		}
1268 
1269 		res = SCSI_SCAN_TARGET_PRESENT;
1270 		goto out_free_result;
1271 	}
1272 
1273 	/*
1274 	 * Some targets may set slight variations of PQ and PDT to signal
1275 	 * that no LUN is present, so don't add sdev in these cases.
1276 	 * Two specific examples are:
1277 	 * 1) NetApp targets: return PQ=1, PDT=0x1f
1278 	 * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
1279 	 *    in the UFI 1.0 spec (we cannot rely on reserved bits).
1280 	 *
1281 	 * References:
1282 	 * 1) SCSI SPC-3, pp. 145-146
1283 	 * PQ=1: "A peripheral device having the specified peripheral
1284 	 * device type is not connected to this logical unit. However, the
1285 	 * device server is capable of supporting the specified peripheral
1286 	 * device type on this logical unit."
1287 	 * PDT=0x1f: "Unknown or no device type"
1288 	 * 2) USB UFI 1.0, p. 20
1289 	 * PDT=00h Direct-access device (floppy)
1290 	 * PDT=1Fh none (no FDD connected to the requested logical unit)
1291 	 */
1292 	if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
1293 	    (result[0] & 0x1f) == 0x1f &&
1294 	    !scsi_is_wlun(lun)) {
1295 		SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1296 					"scsi scan: peripheral device type"
1297 					" of 31, no device added\n"));
1298 		res = SCSI_SCAN_TARGET_PRESENT;
1299 		goto out_free_result;
1300 	}
1301 
1302 	res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
1303 	if (res == SCSI_SCAN_LUN_PRESENT) {
1304 		if (bflags & BLIST_KEY) {
1305 			sdev->lockable = 0;
1306 			scsi_unlock_floptical(sdev, result);
1307 		}
1308 	}
1309 
1310  out_free_result:
1311 	kfree(result);
1312  out_free_sdev:
1313 	if (res == SCSI_SCAN_LUN_PRESENT) {
1314 		if (sdevp) {
1315 			if (scsi_device_get(sdev) == 0) {
1316 				*sdevp = sdev;
1317 			} else {
1318 				__scsi_remove_device(sdev);
1319 				res = SCSI_SCAN_NO_RESPONSE;
1320 			}
1321 		}
1322 	} else
1323 		__scsi_remove_device(sdev);
1324  out:
1325 	return res;
1326 }
1327 
1328 /**
1329  * scsi_sequential_lun_scan - sequentially scan a SCSI target
1330  * @starget:	pointer to target structure to scan
1331  * @bflags:	black/white list flag for LUN 0
1332  * @scsi_level: Which version of the standard does this device adhere to
1333  * @rescan:     passed to scsi_probe_add_lun()
1334  *
1335  * Description:
1336  *     Generally, scan from LUN 1 (LUN 0 is assumed to already have been
1337  *     scanned) to some maximum lun until a LUN is found with no device
1338  *     attached. Use the bflags to figure out any oddities.
1339  *
1340  *     Modifies sdevscan->lun.
1341  **/
1342 static void scsi_sequential_lun_scan(struct scsi_target *starget,
1343 				     blist_flags_t bflags, int scsi_level,
1344 				     enum scsi_scan_mode rescan)
1345 {
1346 	uint max_dev_lun;
1347 	u64 sparse_lun, lun;
1348 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1349 
1350 	SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget,
1351 		"scsi scan: Sequential scan\n"));
1352 
1353 	max_dev_lun = min(max_scsi_luns, shost->max_lun);
1354 	/*
1355 	 * If this device is known to support sparse multiple units,
1356 	 * override the other settings, and scan all of them. Normally,
1357 	 * SCSI-3 devices should be scanned via the REPORT LUNS.
1358 	 */
1359 	if (bflags & BLIST_SPARSELUN) {
1360 		max_dev_lun = shost->max_lun;
1361 		sparse_lun = 1;
1362 	} else
1363 		sparse_lun = 0;
1364 
1365 	/*
1366 	 * If less than SCSI_1_CCS, and no special lun scanning, stop
1367 	 * scanning; this matches 2.4 behaviour, but could just be a bug
1368 	 * (to continue scanning a SCSI_1_CCS device).
1369 	 *
1370 	 * This test is broken.  We might not have any device on lun0 for
1371 	 * a sparselun device, and if that's the case then how would we
1372 	 * know the real scsi_level, eh?  It might make sense to just not
1373 	 * scan any SCSI_1 device for non-0 luns, but that check would best
1374 	 * go into scsi_alloc_sdev() and just have it return null when asked
1375 	 * to alloc an sdev for lun > 0 on an already found SCSI_1 device.
1376 	 *
1377 	if ((sdevscan->scsi_level < SCSI_1_CCS) &&
1378 	    ((bflags & (BLIST_FORCELUN | BLIST_SPARSELUN | BLIST_MAX5LUN))
1379 	     == 0))
1380 		return;
1381 	 */
1382 	/*
1383 	 * If this device is known to support multiple units, override
1384 	 * the other settings, and scan all of them.
1385 	 */
1386 	if (bflags & BLIST_FORCELUN)
1387 		max_dev_lun = shost->max_lun;
1388 	/*
1389 	 * REGAL CDC-4X: avoid hang after LUN 4
1390 	 */
1391 	if (bflags & BLIST_MAX5LUN)
1392 		max_dev_lun = min(5U, max_dev_lun);
1393 	/*
1394 	 * Do not scan SCSI-2 or lower device past LUN 7, unless
1395 	 * BLIST_LARGELUN.
1396 	 */
1397 	if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN))
1398 		max_dev_lun = min(8U, max_dev_lun);
1399 	else
1400 		max_dev_lun = min(256U, max_dev_lun);
1401 
1402 	/*
1403 	 * We have already scanned LUN 0, so start at LUN 1. Keep scanning
1404 	 * until we reach the max, or no LUN is found and we are not
1405 	 * sparse_lun.
1406 	 */
1407 	for (lun = 1; lun < max_dev_lun; ++lun)
1408 		if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan,
1409 					    NULL) != SCSI_SCAN_LUN_PRESENT) &&
1410 		    !sparse_lun)
1411 			return;
1412 }
1413 
1414 /**
1415  * scsi_report_lun_scan - Scan using SCSI REPORT LUN results
1416  * @starget: which target
1417  * @bflags: Zero or a mix of BLIST_NOLUN, BLIST_REPORTLUN2, or BLIST_NOREPORTLUN
1418  * @rescan: nonzero if we can skip code only needed on first scan
1419  *
1420  * Description:
1421  *   Fast scanning for modern (SCSI-3) devices by sending a REPORT LUN command.
1422  *   Scan the resulting list of LUNs by calling scsi_probe_and_add_lun.
1423  *
1424  *   If BLINK_REPORTLUN2 is set, scan a target that supports more than 8
1425  *   LUNs even if it's older than SCSI-3.
1426  *   If BLIST_NOREPORTLUN is set, return 1 always.
1427  *   If BLIST_NOLUN is set, return 0 always.
1428  *   If starget->no_report_luns is set, return 1 always.
1429  *
1430  * Return:
1431  *     0: scan completed (or no memory, so further scanning is futile)
1432  *     1: could not scan with REPORT LUN
1433  **/
1434 static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags,
1435 				enum scsi_scan_mode rescan)
1436 {
1437 	unsigned char scsi_cmd[MAX_COMMAND_SIZE];
1438 	unsigned int length;
1439 	u64 lun;
1440 	unsigned int num_luns;
1441 	int result;
1442 	struct scsi_lun *lunp, *lun_data;
1443 	struct scsi_device *sdev;
1444 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1445 	struct scsi_failure failure_defs[] = {
1446 		{
1447 			.sense = UNIT_ATTENTION,
1448 			.asc = SCMD_FAILURE_ASC_ANY,
1449 			.ascq = SCMD_FAILURE_ASCQ_ANY,
1450 			.result = SAM_STAT_CHECK_CONDITION,
1451 		},
1452 		/* Fail all CCs except the UA above */
1453 		{
1454 			.sense = SCMD_FAILURE_SENSE_ANY,
1455 			.result = SAM_STAT_CHECK_CONDITION,
1456 		},
1457 		/* Retry any other errors not listed above */
1458 		{
1459 			.result = SCMD_FAILURE_RESULT_ANY,
1460 		},
1461 		{}
1462 	};
1463 	struct scsi_failures failures = {
1464 		.total_allowed = 3,
1465 		.failure_definitions = failure_defs,
1466 	};
1467 	const struct scsi_exec_args exec_args = {
1468 		.failures = &failures,
1469 	};
1470 	int ret = 0;
1471 
1472 	/*
1473 	 * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set.
1474 	 * Also allow SCSI-2 if BLIST_REPORTLUN2 is set and host adapter does
1475 	 * support more than 8 LUNs.
1476 	 * Don't attempt if the target doesn't support REPORT LUNS.
1477 	 */
1478 	if (bflags & BLIST_NOREPORTLUN)
1479 		return 1;
1480 	if (starget->scsi_level < SCSI_2 &&
1481 	    starget->scsi_level != SCSI_UNKNOWN)
1482 		return 1;
1483 	if (starget->scsi_level < SCSI_3 &&
1484 	    (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8))
1485 		return 1;
1486 	if (bflags & BLIST_NOLUN)
1487 		return 0;
1488 	if (starget->no_report_luns)
1489 		return 1;
1490 
1491 	if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
1492 		sdev = scsi_alloc_sdev(starget, 0, NULL);
1493 		if (!sdev)
1494 			return 0;
1495 		if (scsi_device_get(sdev)) {
1496 			__scsi_remove_device(sdev);
1497 			return 0;
1498 		}
1499 	}
1500 
1501 	/*
1502 	 * Allocate enough to hold the header (the same size as one scsi_lun)
1503 	 * plus the number of luns we are requesting.  511 was the default
1504 	 * value of the now removed max_report_luns parameter.
1505 	 */
1506 	length = (511 + 1) * sizeof(struct scsi_lun);
1507 retry:
1508 	lun_data = kmalloc(length, GFP_KERNEL);
1509 	if (!lun_data) {
1510 		printk(ALLOC_FAILURE_MSG, __func__);
1511 		goto out;
1512 	}
1513 
1514 	scsi_cmd[0] = REPORT_LUNS;
1515 
1516 	/*
1517 	 * bytes 1 - 5: reserved, set to zero.
1518 	 */
1519 	memset(&scsi_cmd[1], 0, 5);
1520 
1521 	/*
1522 	 * bytes 6 - 9: length of the command.
1523 	 */
1524 	put_unaligned_be32(length, &scsi_cmd[6]);
1525 
1526 	scsi_cmd[10] = 0;	/* reserved */
1527 	scsi_cmd[11] = 0;	/* control */
1528 
1529 	/*
1530 	 * We can get a UNIT ATTENTION, for example a power on/reset, so
1531 	 * retry a few times (like sd.c does for TEST UNIT READY).
1532 	 * Experience shows some combinations of adapter/devices get at
1533 	 * least two power on/resets.
1534 	 *
1535 	 * Illegal requests (for devices that do not support REPORT LUNS)
1536 	 * should come through as a check condition, and will not generate
1537 	 * a retry.
1538 	 */
1539 	scsi_failures_reset_retries(&failures);
1540 
1541 	SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1542 			  "scsi scan: Sending REPORT LUNS\n"));
1543 
1544 	result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, lun_data,
1545 				  length, SCSI_REPORT_LUNS_TIMEOUT, 3,
1546 				  &exec_args);
1547 
1548 	SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1549 			  "scsi scan: REPORT LUNS  %s result 0x%x\n",
1550 			  result ?  "failed" : "successful", result));
1551 	if (result) {
1552 		/*
1553 		 * The device probably does not support a REPORT LUN command
1554 		 */
1555 		ret = 1;
1556 		goto out_err;
1557 	}
1558 
1559 	/*
1560 	 * Get the length from the first four bytes of lun_data.
1561 	 */
1562 	if (get_unaligned_be32(lun_data->scsi_lun) +
1563 	    sizeof(struct scsi_lun) > length) {
1564 		length = get_unaligned_be32(lun_data->scsi_lun) +
1565 			 sizeof(struct scsi_lun);
1566 		kfree(lun_data);
1567 		goto retry;
1568 	}
1569 	length = get_unaligned_be32(lun_data->scsi_lun);
1570 
1571 	num_luns = (length / sizeof(struct scsi_lun));
1572 
1573 	SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1574 		"scsi scan: REPORT LUN scan\n"));
1575 
1576 	/*
1577 	 * Scan the luns in lun_data. The entry at offset 0 is really
1578 	 * the header, so start at 1 and go up to and including num_luns.
1579 	 */
1580 	for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) {
1581 		lun = scsilun_to_int(lunp);
1582 
1583 		if (lun > sdev->host->max_lun) {
1584 			sdev_printk(KERN_WARNING, sdev,
1585 				    "lun%llu has a LUN larger than"
1586 				    " allowed by the host adapter\n", lun);
1587 		} else {
1588 			int res;
1589 
1590 			res = scsi_probe_and_add_lun(starget,
1591 				lun, NULL, NULL, rescan, NULL);
1592 			if (res == SCSI_SCAN_NO_RESPONSE) {
1593 				/*
1594 				 * Got some results, but now none, abort.
1595 				 */
1596 				sdev_printk(KERN_ERR, sdev,
1597 					"Unexpected response"
1598 					" from lun %llu while scanning, scan"
1599 					" aborted\n", (unsigned long long)lun);
1600 				break;
1601 			}
1602 		}
1603 	}
1604 
1605  out_err:
1606 	kfree(lun_data);
1607  out:
1608 	if (scsi_device_created(sdev))
1609 		/*
1610 		 * the sdev we used didn't appear in the report luns scan
1611 		 */
1612 		__scsi_remove_device(sdev);
1613 	scsi_device_put(sdev);
1614 	return ret;
1615 }
1616 
1617 struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1618 				      uint id, u64 lun, void *hostdata)
1619 {
1620 	struct scsi_device *sdev = ERR_PTR(-ENODEV);
1621 	struct device *parent = &shost->shost_gendev;
1622 	struct scsi_target *starget;
1623 
1624 	if (strncmp(scsi_scan_type, "none", 4) == 0)
1625 		return ERR_PTR(-ENODEV);
1626 
1627 	starget = scsi_alloc_target(parent, channel, id);
1628 	if (!starget)
1629 		return ERR_PTR(-ENOMEM);
1630 	scsi_autopm_get_target(starget);
1631 
1632 	mutex_lock(&shost->scan_mutex);
1633 	if (!shost->async_scan)
1634 		scsi_complete_async_scans();
1635 
1636 	if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1637 		scsi_probe_and_add_lun(starget, lun, NULL, &sdev,
1638 				       SCSI_SCAN_RESCAN, hostdata);
1639 		scsi_autopm_put_host(shost);
1640 	}
1641 	mutex_unlock(&shost->scan_mutex);
1642 	scsi_autopm_put_target(starget);
1643 	/*
1644 	 * paired with scsi_alloc_target().  Target will be destroyed unless
1645 	 * scsi_probe_and_add_lun made an underlying device visible
1646 	 */
1647 	scsi_target_reap(starget);
1648 	put_device(&starget->dev);
1649 
1650 	return sdev;
1651 }
1652 EXPORT_SYMBOL(__scsi_add_device);
1653 
1654 /**
1655  * scsi_add_device - creates a new SCSI (LU) instance
1656  * @host: the &Scsi_Host instance where the device is located
1657  * @channel: target channel number (rarely other than %0)
1658  * @target: target id number
1659  * @lun: LUN of target device
1660  *
1661  * Probe for a specific LUN and add it if found.
1662  *
1663  * Notes: This call is usually performed internally during a SCSI
1664  * bus scan when an HBA is added (i.e. scsi_scan_host()). So it
1665  * should only be called if the HBA becomes aware of a new SCSI
1666  * device (LU) after scsi_scan_host() has completed. If successful
1667  * this call can lead to sdev_init() and sdev_configure() callbacks
1668  * into the LLD.
1669  *
1670  * Return: %0 on success or negative error code on failure
1671  */
1672 int scsi_add_device(struct Scsi_Host *host, uint channel,
1673 		    uint target, u64 lun)
1674 {
1675 	struct scsi_device *sdev =
1676 		__scsi_add_device(host, channel, target, lun, NULL);
1677 	if (IS_ERR(sdev))
1678 		return PTR_ERR(sdev);
1679 
1680 	scsi_device_put(sdev);
1681 	return 0;
1682 }
1683 EXPORT_SYMBOL(scsi_add_device);
1684 
1685 int scsi_resume_device(struct scsi_device *sdev)
1686 {
1687 	struct device *dev = &sdev->sdev_gendev;
1688 	int ret = 0;
1689 
1690 	device_lock(dev);
1691 
1692 	/*
1693 	 * Bail out if the device or its queue are not running. Otherwise,
1694 	 * the rescan may block waiting for commands to be executed, with us
1695 	 * holding the device lock. This can result in a potential deadlock
1696 	 * in the power management core code when system resume is on-going.
1697 	 */
1698 	if (sdev->sdev_state != SDEV_RUNNING ||
1699 	    blk_queue_pm_only(sdev->request_queue)) {
1700 		ret = -EWOULDBLOCK;
1701 		goto unlock;
1702 	}
1703 
1704 	if (dev->driver && try_module_get(dev->driver->owner)) {
1705 		struct scsi_driver *drv = to_scsi_driver(dev->driver);
1706 
1707 		if (drv->resume)
1708 			ret = drv->resume(dev);
1709 		module_put(dev->driver->owner);
1710 	}
1711 
1712 unlock:
1713 	device_unlock(dev);
1714 
1715 	return ret;
1716 }
1717 EXPORT_SYMBOL(scsi_resume_device);
1718 
1719 int scsi_rescan_device(struct scsi_device *sdev)
1720 {
1721 	struct device *dev = &sdev->sdev_gendev;
1722 	int ret = 0;
1723 
1724 	device_lock(dev);
1725 
1726 	/*
1727 	 * Bail out if the device or its queue are not running. Otherwise,
1728 	 * the rescan may block waiting for commands to be executed, with us
1729 	 * holding the device lock. This can result in a potential deadlock
1730 	 * in the power management core code when system resume is on-going.
1731 	 */
1732 	if (sdev->sdev_state != SDEV_RUNNING ||
1733 	    blk_queue_pm_only(sdev->request_queue)) {
1734 		ret = -EWOULDBLOCK;
1735 		goto unlock;
1736 	}
1737 
1738 	scsi_attach_vpd(sdev);
1739 	scsi_cdl_check(sdev);
1740 
1741 	if (sdev->handler && sdev->handler->rescan)
1742 		sdev->handler->rescan(sdev);
1743 
1744 	if (dev->driver && try_module_get(dev->driver->owner)) {
1745 		struct scsi_driver *drv = to_scsi_driver(dev->driver);
1746 
1747 		if (drv->rescan)
1748 			drv->rescan(dev);
1749 		module_put(dev->driver->owner);
1750 	}
1751 
1752 unlock:
1753 	device_unlock(dev);
1754 
1755 	return ret;
1756 }
1757 EXPORT_SYMBOL(scsi_rescan_device);
1758 
1759 static void __scsi_scan_target(struct device *parent, unsigned int channel,
1760 		unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1761 {
1762 	struct Scsi_Host *shost = dev_to_shost(parent);
1763 	blist_flags_t bflags = 0;
1764 	int res;
1765 	struct scsi_target *starget;
1766 
1767 	if (shost->this_id == id)
1768 		/*
1769 		 * Don't scan the host adapter
1770 		 */
1771 		return;
1772 
1773 	starget = scsi_alloc_target(parent, channel, id);
1774 	if (!starget)
1775 		return;
1776 	scsi_autopm_get_target(starget);
1777 
1778 	if (lun != SCAN_WILD_CARD) {
1779 		/*
1780 		 * Scan for a specific host/chan/id/lun.
1781 		 */
1782 		scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL);
1783 		goto out_reap;
1784 	}
1785 
1786 	/*
1787 	 * Scan LUN 0, if there is some response, scan further. Ideally, we
1788 	 * would not configure LUN 0 until all LUNs are scanned.
1789 	 */
1790 	res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL);
1791 	if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) {
1792 		if (scsi_report_lun_scan(starget, bflags, rescan) != 0)
1793 			/*
1794 			 * The REPORT LUN did not scan the target,
1795 			 * do a sequential scan.
1796 			 */
1797 			scsi_sequential_lun_scan(starget, bflags,
1798 						 starget->scsi_level, rescan);
1799 	}
1800 
1801  out_reap:
1802 	scsi_autopm_put_target(starget);
1803 	/*
1804 	 * paired with scsi_alloc_target(): determine if the target has
1805 	 * any children at all and if not, nuke it
1806 	 */
1807 	scsi_target_reap(starget);
1808 
1809 	put_device(&starget->dev);
1810 }
1811 
1812 /**
1813  * scsi_scan_target - scan a target id, possibly including all LUNs on the target.
1814  * @parent:	host to scan
1815  * @channel:	channel to scan
1816  * @id:		target id to scan
1817  * @lun:	Specific LUN to scan or SCAN_WILD_CARD
1818  * @rescan:	passed to LUN scanning routines; SCSI_SCAN_INITIAL for
1819  *              no rescan, SCSI_SCAN_RESCAN to rescan existing LUNs,
1820  *              and SCSI_SCAN_MANUAL to force scanning even if
1821  *              'scan=manual' is set.
1822  *
1823  * Description:
1824  *     Scan the target id on @parent, @channel, and @id. Scan at least LUN 0,
1825  *     and possibly all LUNs on the target id.
1826  *
1827  *     First try a REPORT LUN scan, if that does not scan the target, do a
1828  *     sequential scan of LUNs on the target id.
1829  **/
1830 void scsi_scan_target(struct device *parent, unsigned int channel,
1831 		      unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1832 {
1833 	struct Scsi_Host *shost = dev_to_shost(parent);
1834 
1835 	if (strncmp(scsi_scan_type, "none", 4) == 0)
1836 		return;
1837 
1838 	if (rescan != SCSI_SCAN_MANUAL &&
1839 	    strncmp(scsi_scan_type, "manual", 6) == 0)
1840 		return;
1841 
1842 	mutex_lock(&shost->scan_mutex);
1843 	if (!shost->async_scan)
1844 		scsi_complete_async_scans();
1845 
1846 	if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1847 		__scsi_scan_target(parent, channel, id, lun, rescan);
1848 		scsi_autopm_put_host(shost);
1849 	}
1850 	mutex_unlock(&shost->scan_mutex);
1851 }
1852 EXPORT_SYMBOL(scsi_scan_target);
1853 
1854 static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
1855 			      unsigned int id, u64 lun,
1856 			      enum scsi_scan_mode rescan)
1857 {
1858 	uint order_id;
1859 
1860 	if (id == SCAN_WILD_CARD)
1861 		for (id = 0; id < shost->max_id; ++id) {
1862 			/*
1863 			 * XXX adapter drivers when possible (FCP, iSCSI)
1864 			 * could modify max_id to match the current max,
1865 			 * not the absolute max.
1866 			 *
1867 			 * XXX add a shost id iterator, so for example,
1868 			 * the FC ID can be the same as a target id
1869 			 * without a huge overhead of sparse id's.
1870 			 */
1871 			if (shost->reverse_ordering)
1872 				/*
1873 				 * Scan from high to low id.
1874 				 */
1875 				order_id = shost->max_id - id - 1;
1876 			else
1877 				order_id = id;
1878 			__scsi_scan_target(&shost->shost_gendev, channel,
1879 					order_id, lun, rescan);
1880 		}
1881 	else
1882 		__scsi_scan_target(&shost->shost_gendev, channel,
1883 				id, lun, rescan);
1884 }
1885 
1886 int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1887 			    unsigned int id, u64 lun,
1888 			    enum scsi_scan_mode rescan)
1889 {
1890 	SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
1891 		"%s: <%u:%u:%llu>\n",
1892 		__func__, channel, id, lun));
1893 
1894 	if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
1895 	    ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
1896 	    ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun)))
1897 		return -EINVAL;
1898 
1899 	mutex_lock(&shost->scan_mutex);
1900 	if (!shost->async_scan)
1901 		scsi_complete_async_scans();
1902 
1903 	if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1904 		if (channel == SCAN_WILD_CARD)
1905 			for (channel = 0; channel <= shost->max_channel;
1906 			     channel++)
1907 				scsi_scan_channel(shost, channel, id, lun,
1908 						  rescan);
1909 		else
1910 			scsi_scan_channel(shost, channel, id, lun, rescan);
1911 		scsi_autopm_put_host(shost);
1912 	}
1913 	mutex_unlock(&shost->scan_mutex);
1914 
1915 	return 0;
1916 }
1917 EXPORT_SYMBOL(scsi_scan_host_selected);
1918 static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
1919 {
1920 	struct scsi_device *sdev;
1921 	shost_for_each_device(sdev, shost) {
1922 		/* target removed before the device could be added */
1923 		if (sdev->sdev_state == SDEV_DEL)
1924 			continue;
1925 		/* If device is already visible, skip adding it to sysfs */
1926 		if (sdev->is_visible)
1927 			continue;
1928 		if (!scsi_host_scan_allowed(shost) ||
1929 		    scsi_sysfs_add_sdev(sdev) != 0)
1930 			__scsi_remove_device(sdev);
1931 	}
1932 }
1933 
1934 /**
1935  * scsi_prep_async_scan - prepare for an async scan
1936  * @shost: the host which will be scanned
1937  * Returns: a cookie to be passed to scsi_finish_async_scan()
1938  *
1939  * Tells the midlayer this host is going to do an asynchronous scan.
1940  * It reserves the host's position in the scanning list and ensures
1941  * that other asynchronous scans started after this one won't affect the
1942  * ordering of the discovered devices.
1943  */
1944 static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
1945 {
1946 	struct async_scan_data *data = NULL;
1947 	unsigned long flags;
1948 
1949 	if (strncmp(scsi_scan_type, "sync", 4) == 0)
1950 		return NULL;
1951 
1952 	mutex_lock(&shost->scan_mutex);
1953 	if (shost->async_scan) {
1954 		shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
1955 		goto err;
1956 	}
1957 
1958 	data = kmalloc_obj(*data);
1959 	if (!data)
1960 		goto err;
1961 	data->shost = scsi_host_get(shost);
1962 	if (!data->shost)
1963 		goto err;
1964 	init_completion(&data->prev_finished);
1965 
1966 	spin_lock_irqsave(shost->host_lock, flags);
1967 	shost->async_scan = 1;
1968 	spin_unlock_irqrestore(shost->host_lock, flags);
1969 	mutex_unlock(&shost->scan_mutex);
1970 
1971 	spin_lock(&async_scan_lock);
1972 	if (list_empty(&scanning_hosts))
1973 		complete(&data->prev_finished);
1974 	list_add_tail(&data->list, &scanning_hosts);
1975 	spin_unlock(&async_scan_lock);
1976 
1977 	return data;
1978 
1979  err:
1980 	mutex_unlock(&shost->scan_mutex);
1981 	kfree(data);
1982 	return NULL;
1983 }
1984 
1985 /**
1986  * scsi_finish_async_scan - asynchronous scan has finished
1987  * @data: cookie returned from earlier call to scsi_prep_async_scan()
1988  *
1989  * All the devices currently attached to this host have been found.
1990  * This function announces all the devices it has found to the rest
1991  * of the system.
1992  */
1993 static void scsi_finish_async_scan(struct async_scan_data *data)
1994 {
1995 	struct Scsi_Host *shost;
1996 	unsigned long flags;
1997 
1998 	if (!data)
1999 		return;
2000 
2001 	shost = data->shost;
2002 
2003 	mutex_lock(&shost->scan_mutex);
2004 
2005 	if (!shost->async_scan) {
2006 		shost_printk(KERN_INFO, shost, "%s called twice\n", __func__);
2007 		dump_stack();
2008 		mutex_unlock(&shost->scan_mutex);
2009 		return;
2010 	}
2011 
2012 	wait_for_completion(&data->prev_finished);
2013 
2014 	scsi_sysfs_add_devices(shost);
2015 
2016 	spin_lock_irqsave(shost->host_lock, flags);
2017 	shost->async_scan = 0;
2018 	spin_unlock_irqrestore(shost->host_lock, flags);
2019 
2020 	mutex_unlock(&shost->scan_mutex);
2021 
2022 	spin_lock(&async_scan_lock);
2023 	list_del(&data->list);
2024 	if (!list_empty(&scanning_hosts)) {
2025 		struct async_scan_data *next = list_entry(scanning_hosts.next,
2026 				struct async_scan_data, list);
2027 		complete(&next->prev_finished);
2028 	}
2029 	spin_unlock(&async_scan_lock);
2030 
2031 	scsi_autopm_put_host(shost);
2032 	scsi_host_put(shost);
2033 	kfree(data);
2034 }
2035 
2036 static void do_scsi_scan_host(struct Scsi_Host *shost)
2037 {
2038 	if (shost->hostt->scan_finished) {
2039 		unsigned long start = jiffies;
2040 		if (shost->hostt->scan_start)
2041 			shost->hostt->scan_start(shost);
2042 
2043 		while (!shost->hostt->scan_finished(shost, jiffies - start))
2044 			msleep(10);
2045 	} else {
2046 		scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
2047 				SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
2048 	}
2049 }
2050 
2051 static void do_scan_async(void *_data, async_cookie_t c)
2052 {
2053 	struct async_scan_data *data = _data;
2054 	struct Scsi_Host *shost = data->shost;
2055 
2056 	do_scsi_scan_host(shost);
2057 	scsi_finish_async_scan(data);
2058 }
2059 
2060 /**
2061  * scsi_scan_host - scan the given adapter
2062  * @shost:	adapter to scan
2063  *
2064  * Notes: Should be called after scsi_add_host()
2065  **/
2066 void scsi_scan_host(struct Scsi_Host *shost)
2067 {
2068 	struct async_scan_data *data;
2069 
2070 	if (strncmp(scsi_scan_type, "none", 4) == 0 ||
2071 	    strncmp(scsi_scan_type, "manual", 6) == 0)
2072 		return;
2073 	if (scsi_autopm_get_host(shost) < 0)
2074 		return;
2075 
2076 	data = scsi_prep_async_scan(shost);
2077 	if (!data) {
2078 		do_scsi_scan_host(shost);
2079 		scsi_autopm_put_host(shost);
2080 		return;
2081 	}
2082 
2083 	/* register with the async subsystem so wait_for_device_probe()
2084 	 * will flush this work
2085 	 */
2086 	async_schedule(do_scan_async, data);
2087 
2088 	/* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */
2089 }
2090 EXPORT_SYMBOL(scsi_scan_host);
2091 
2092 void scsi_forget_host(struct Scsi_Host *shost)
2093 {
2094 	struct scsi_device *sdev;
2095 	unsigned long flags;
2096 
2097  restart:
2098 	spin_lock_irqsave(shost->host_lock, flags);
2099 	list_for_each_entry(sdev, &shost->__devices, siblings) {
2100 		if (scsi_device_is_pseudo_dev(sdev) ||
2101 		    sdev->sdev_state == SDEV_DEL)
2102 			continue;
2103 		spin_unlock_irqrestore(shost->host_lock, flags);
2104 		__scsi_remove_device(sdev);
2105 		goto restart;
2106 	}
2107 	spin_unlock_irqrestore(shost->host_lock, flags);
2108 
2109 	/*
2110 	 * Remove the pseudo device last since it may be needed during removal
2111 	 * of other SCSI devices.
2112 	 */
2113 	if (shost->pseudo_sdev)
2114 		__scsi_remove_device(shost->pseudo_sdev);
2115 }
2116 
2117 /**
2118  * scsi_get_pseudo_sdev() - Attach a pseudo SCSI device to a SCSI host
2119  * @shost: Host that needs a pseudo SCSI device
2120  *
2121  * Lock status: None assumed.
2122  *
2123  * Returns:     The scsi_device or NULL
2124  *
2125  * Notes:
2126  *	Attach a single scsi_device to the Scsi_Host. The primary aim for this
2127  *	device is to serve as a container from which SCSI commands can be
2128  *	allocated. Each SCSI command will carry a command tag allocated by the
2129  *	block layer. These SCSI commands can be used by the LLDD to send
2130  *	internal or passthrough commands without having to manage tag allocation
2131  *	inside the LLDD.
2132  */
2133 struct scsi_device *scsi_get_pseudo_sdev(struct Scsi_Host *shost)
2134 {
2135 	struct scsi_device *sdev = NULL;
2136 	struct scsi_target *starget;
2137 
2138 	guard(mutex)(&shost->scan_mutex);
2139 
2140 	if (!scsi_host_scan_allowed(shost))
2141 		goto out;
2142 
2143 	starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->max_id);
2144 	if (!starget)
2145 		goto out;
2146 
2147 	sdev = scsi_alloc_sdev(starget, U64_MAX, NULL);
2148 	if (!sdev) {
2149 		scsi_target_reap(starget);
2150 		goto put_target;
2151 	}
2152 
2153 	sdev->borken = 0;
2154 
2155 put_target:
2156 	/* See also the get_device(dev) call in scsi_alloc_target(). */
2157 	put_device(&starget->dev);
2158 
2159 out:
2160 	return sdev;
2161 }
2162