1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * scsi_scan.c
4 *
5 * Copyright (C) 2000 Eric Youngdale,
6 * Copyright (C) 2002 Patrick Mansfield
7 *
8 * The general scanning/probing algorithm is as follows, exceptions are
9 * made to it depending on device specific flags, compilation options, and
10 * global variable (boot or module load time) settings.
11 *
12 * A specific LUN is scanned via an INQUIRY command; if the LUN has a
13 * device attached, a scsi_device is allocated and setup for it.
14 *
15 * For every id of every channel on the given host:
16 *
17 * Scan LUN 0; if the target responds to LUN 0 (even if there is no
18 * device or storage attached to LUN 0):
19 *
20 * If LUN 0 has a device attached, allocate and setup a
21 * scsi_device for it.
22 *
23 * If target is SCSI-3 or up, issue a REPORT LUN, and scan
24 * all of the LUNs returned by the REPORT LUN; else,
25 * sequentially scan LUNs up until some maximum is reached,
26 * or a LUN is seen that cannot have a device attached to it.
27 */
28
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
31 #include <linux/init.h>
32 #include <linux/blkdev.h>
33 #include <linux/delay.h>
34 #include <linux/kthread.h>
35 #include <linux/spinlock.h>
36 #include <linux/async.h>
37 #include <linux/slab.h>
38 #include <linux/unaligned.h>
39
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_cmnd.h>
42 #include <scsi/scsi_device.h>
43 #include <scsi/scsi_driver.h>
44 #include <scsi/scsi_devinfo.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_transport.h>
47 #include <scsi/scsi_dh.h>
48 #include <scsi/scsi_eh.h>
49
50 #include "scsi_priv.h"
51 #include "scsi_logging.h"
52
53 #define ALLOC_FAILURE_MSG KERN_ERR "%s: Allocation failure during" \
54 " SCSI scanning, some SCSI devices might not be configured\n"
55
56 /*
57 * Default timeout
58 */
59 #define SCSI_TIMEOUT (2*HZ)
60 #define SCSI_REPORT_LUNS_TIMEOUT (30*HZ)
61
62 /*
63 * Prefix values for the SCSI id's (stored in sysfs name field)
64 */
65 #define SCSI_UID_SER_NUM 'S'
66 #define SCSI_UID_UNKNOWN 'Z'
67
68 /*
69 * Return values of some of the scanning functions.
70 *
71 * SCSI_SCAN_NO_RESPONSE: no valid response received from the target, this
72 * includes allocation or general failures preventing IO from being sent.
73 *
74 * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is available
75 * on the given LUN.
76 *
77 * SCSI_SCAN_LUN_PRESENT: target responded, and a device is available on a
78 * given LUN.
79 */
80 #define SCSI_SCAN_NO_RESPONSE 0
81 #define SCSI_SCAN_TARGET_PRESENT 1
82 #define SCSI_SCAN_LUN_PRESENT 2
83
84 static const char *scsi_null_device_strs = "nullnullnullnull";
85
86 #define MAX_SCSI_LUNS 512
87
88 static u64 max_scsi_luns = MAX_SCSI_LUNS;
89
90 module_param_named(max_luns, max_scsi_luns, ullong, S_IRUGO|S_IWUSR);
91 MODULE_PARM_DESC(max_luns,
92 "last scsi LUN (should be between 1 and 2^64-1)");
93
94 #ifdef CONFIG_SCSI_SCAN_ASYNC
95 #define SCSI_SCAN_TYPE_DEFAULT "async"
96 #else
97 #define SCSI_SCAN_TYPE_DEFAULT "sync"
98 #endif
99
100 static char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
101
102 module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
103 S_IRUGO|S_IWUSR);
104 MODULE_PARM_DESC(scan, "sync, async, manual, or none. "
105 "Setting to 'manual' disables automatic scanning, but allows "
106 "for manual device scan via the 'scan' sysfs attribute.");
107
108 static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
109
110 module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
111 MODULE_PARM_DESC(inq_timeout,
112 "Timeout (in seconds) waiting for devices to answer INQUIRY."
113 " Default is 20. Some devices may need more; most need less.");
114
115 /* This lock protects only this list */
116 static DEFINE_SPINLOCK(async_scan_lock);
117 static LIST_HEAD(scanning_hosts);
118
119 struct async_scan_data {
120 struct list_head list;
121 struct Scsi_Host *shost;
122 struct completion prev_finished;
123 };
124
125 /*
126 * scsi_enable_async_suspend - Enable async suspend and resume
127 */
scsi_enable_async_suspend(struct device * dev)128 void scsi_enable_async_suspend(struct device *dev)
129 {
130 /*
131 * If a user has disabled async probing a likely reason is due to a
132 * storage enclosure that does not inject staggered spin-ups. For
133 * safety, make resume synchronous as well in that case.
134 */
135 if (strncmp(scsi_scan_type, "async", 5) != 0)
136 return;
137 /* Enable asynchronous suspend and resume. */
138 device_enable_async_suspend(dev);
139 }
140
141 /**
142 * scsi_complete_async_scans - Wait for asynchronous scans to complete
143 *
144 * When this function returns, any host which started scanning before
145 * this function was called will have finished its scan. Hosts which
146 * started scanning after this function was called may or may not have
147 * finished.
148 */
scsi_complete_async_scans(void)149 int scsi_complete_async_scans(void)
150 {
151 struct async_scan_data *data;
152
153 do {
154 scoped_guard(spinlock, &async_scan_lock)
155 if (list_empty(&scanning_hosts))
156 return 0;
157 /* If we can't get memory immediately, that's OK. Just
158 * sleep a little. Even if we never get memory, the async
159 * scans will finish eventually.
160 */
161 data = kmalloc(sizeof(*data), GFP_KERNEL);
162 if (!data)
163 msleep(1);
164 } while (!data);
165
166 data->shost = NULL;
167 init_completion(&data->prev_finished);
168
169 spin_lock(&async_scan_lock);
170 /* Check that there's still somebody else on the list */
171 if (list_empty(&scanning_hosts))
172 goto done;
173 list_add_tail(&data->list, &scanning_hosts);
174 spin_unlock(&async_scan_lock);
175
176 printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
177 wait_for_completion(&data->prev_finished);
178
179 spin_lock(&async_scan_lock);
180 list_del(&data->list);
181 if (!list_empty(&scanning_hosts)) {
182 struct async_scan_data *next = list_entry(scanning_hosts.next,
183 struct async_scan_data, list);
184 complete(&next->prev_finished);
185 }
186 done:
187 spin_unlock(&async_scan_lock);
188
189 kfree(data);
190 return 0;
191 }
192
193 /**
194 * scsi_unlock_floptical - unlock device via a special MODE SENSE command
195 * @sdev: scsi device to send command to
196 * @result: area to store the result of the MODE SENSE
197 *
198 * Description:
199 * Send a vendor specific MODE SENSE (not a MODE SELECT) command.
200 * Called for BLIST_KEY devices.
201 **/
scsi_unlock_floptical(struct scsi_device * sdev,unsigned char * result)202 static void scsi_unlock_floptical(struct scsi_device *sdev,
203 unsigned char *result)
204 {
205 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
206
207 sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n");
208 scsi_cmd[0] = MODE_SENSE;
209 scsi_cmd[1] = 0;
210 scsi_cmd[2] = 0x2e;
211 scsi_cmd[3] = 0;
212 scsi_cmd[4] = 0x2a; /* size */
213 scsi_cmd[5] = 0;
214 scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, result, 0x2a,
215 SCSI_TIMEOUT, 3, NULL);
216 }
217
scsi_realloc_sdev_budget_map(struct scsi_device * sdev,unsigned int depth)218 static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
219 unsigned int depth)
220 {
221 int new_shift = sbitmap_calculate_shift(depth);
222 bool need_alloc = !sdev->budget_map.map;
223 bool need_free = false;
224 unsigned int memflags;
225 int ret;
226 struct sbitmap sb_backup;
227
228 depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev));
229
230 /*
231 * realloc if new shift is calculated, which is caused by setting
232 * up one new default queue depth after calling ->sdev_configure
233 */
234 if (!need_alloc && new_shift != sdev->budget_map.shift)
235 need_alloc = need_free = true;
236
237 if (!need_alloc)
238 return 0;
239
240 /*
241 * Request queue has to be frozen for reallocating budget map,
242 * and here disk isn't added yet, so freezing is pretty fast
243 */
244 if (need_free) {
245 memflags = blk_mq_freeze_queue(sdev->request_queue);
246 sb_backup = sdev->budget_map;
247 }
248 ret = sbitmap_init_node(&sdev->budget_map,
249 scsi_device_max_queue_depth(sdev),
250 new_shift, GFP_NOIO,
251 sdev->request_queue->node, false, true);
252 if (!ret)
253 sbitmap_resize(&sdev->budget_map, depth);
254
255 if (need_free) {
256 if (ret)
257 sdev->budget_map = sb_backup;
258 else
259 sbitmap_free(&sb_backup);
260 ret = 0;
261 blk_mq_unfreeze_queue(sdev->request_queue, memflags);
262 }
263 return ret;
264 }
265
266 /**
267 * scsi_alloc_sdev - allocate and setup a scsi_Device
268 * @starget: which target to allocate a &scsi_device for
269 * @lun: which lun
270 * @hostdata: usually NULL and set by ->sdev_init instead
271 *
272 * Description:
273 * Allocate, initialize for io, and return a pointer to a scsi_Device.
274 * Stores the @shost, @channel, @id, and @lun in the scsi_Device, and
275 * adds scsi_Device to the appropriate list.
276 *
277 * Return value:
278 * scsi_Device pointer, or NULL on failure.
279 **/
scsi_alloc_sdev(struct scsi_target * starget,u64 lun,void * hostdata)280 static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
281 u64 lun, void *hostdata)
282 {
283 unsigned int depth;
284 struct scsi_device *sdev;
285 struct request_queue *q;
286 int display_failure_msg = 1, ret;
287 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
288 struct queue_limits lim;
289
290 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
291 GFP_KERNEL);
292 if (!sdev)
293 goto out;
294
295 sdev->vendor = scsi_null_device_strs;
296 sdev->model = scsi_null_device_strs;
297 sdev->rev = scsi_null_device_strs;
298 sdev->host = shost;
299 sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD;
300 sdev->id = starget->id;
301 sdev->lun = lun;
302 sdev->channel = starget->channel;
303 mutex_init(&sdev->state_mutex);
304 sdev->sdev_state = SDEV_CREATED;
305 INIT_LIST_HEAD(&sdev->siblings);
306 INIT_LIST_HEAD(&sdev->same_target_siblings);
307 INIT_LIST_HEAD(&sdev->starved_entry);
308 INIT_LIST_HEAD(&sdev->event_list);
309 spin_lock_init(&sdev->list_lock);
310 mutex_init(&sdev->inquiry_mutex);
311 INIT_WORK(&sdev->event_work, scsi_evt_thread);
312 INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue);
313
314 sdev->sdev_gendev.parent = get_device(&starget->dev);
315 sdev->sdev_target = starget;
316
317 /* usually NULL and set by ->sdev_init instead */
318 sdev->hostdata = hostdata;
319
320 /* if the device needs this changing, it may do so in the
321 * sdev_configure function */
322 sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
323
324 /*
325 * Some low level driver could use device->type
326 */
327 sdev->type = -1;
328
329 /*
330 * Assume that the device will have handshaking problems,
331 * and then fix this field later if it turns out it
332 * doesn't
333 */
334 sdev->borken = 1;
335
336 sdev->sg_reserved_size = INT_MAX;
337
338 scsi_init_limits(shost, &lim);
339 q = blk_mq_alloc_queue(&sdev->host->tag_set, &lim, sdev);
340 if (IS_ERR(q)) {
341 /* release fn is set up in scsi_sysfs_device_initialise, so
342 * have to free and put manually here */
343 put_device(&starget->dev);
344 kfree(sdev);
345 goto out;
346 }
347 kref_get(&sdev->host->tagset_refcnt);
348 sdev->request_queue = q;
349
350 scsi_sysfs_device_initialize(sdev);
351
352 if (scsi_device_is_pseudo_dev(sdev))
353 return sdev;
354
355 depth = sdev->host->cmd_per_lun ?: 1;
356
357 /*
358 * Use .can_queue as budget map's depth because we have to
359 * support adjusting queue depth from sysfs. Meantime use
360 * default device queue depth to figure out sbitmap shift
361 * since we use this queue depth most of times.
362 */
363 if (scsi_realloc_sdev_budget_map(sdev, depth)) {
364 put_device(&starget->dev);
365 kfree(sdev);
366 goto out;
367 }
368
369 scsi_change_queue_depth(sdev, depth);
370
371 if (shost->hostt->sdev_init) {
372 ret = shost->hostt->sdev_init(sdev);
373 if (ret) {
374 /*
375 * if LLDD reports slave not present, don't clutter
376 * console with alloc failure messages
377 */
378 if (ret == -ENXIO)
379 display_failure_msg = 0;
380 goto out_device_destroy;
381 }
382 }
383
384 return sdev;
385
386 out_device_destroy:
387 __scsi_remove_device(sdev);
388 out:
389 if (display_failure_msg)
390 printk(ALLOC_FAILURE_MSG, __func__);
391 return NULL;
392 }
393
scsi_target_destroy(struct scsi_target * starget)394 static void scsi_target_destroy(struct scsi_target *starget)
395 {
396 struct device *dev = &starget->dev;
397 struct Scsi_Host *shost = dev_to_shost(dev->parent);
398 unsigned long flags;
399
400 BUG_ON(starget->state == STARGET_DEL);
401 starget->state = STARGET_DEL;
402 transport_destroy_device(dev);
403 spin_lock_irqsave(shost->host_lock, flags);
404 if (shost->hostt->target_destroy)
405 shost->hostt->target_destroy(starget);
406 list_del_init(&starget->siblings);
407 spin_unlock_irqrestore(shost->host_lock, flags);
408 put_device(dev);
409 }
410
scsi_target_dev_release(struct device * dev)411 static void scsi_target_dev_release(struct device *dev)
412 {
413 struct device *parent = dev->parent;
414 struct scsi_target *starget = to_scsi_target(dev);
415
416 kfree(starget);
417 put_device(parent);
418 }
419
420 static const struct device_type scsi_target_type = {
421 .name = "scsi_target",
422 .release = scsi_target_dev_release,
423 };
424
scsi_is_target_device(const struct device * dev)425 int scsi_is_target_device(const struct device *dev)
426 {
427 return dev->type == &scsi_target_type;
428 }
429 EXPORT_SYMBOL(scsi_is_target_device);
430
__scsi_find_target(struct device * parent,int channel,uint id)431 static struct scsi_target *__scsi_find_target(struct device *parent,
432 int channel, uint id)
433 {
434 struct scsi_target *starget, *found_starget = NULL;
435 struct Scsi_Host *shost = dev_to_shost(parent);
436 /*
437 * Search for an existing target for this sdev.
438 */
439 list_for_each_entry(starget, &shost->__targets, siblings) {
440 if (starget->id == id &&
441 starget->channel == channel) {
442 found_starget = starget;
443 break;
444 }
445 }
446 if (found_starget)
447 get_device(&found_starget->dev);
448
449 return found_starget;
450 }
451
452 /**
453 * scsi_target_reap_ref_release - remove target from visibility
454 * @kref: the reap_ref in the target being released
455 *
456 * Called on last put of reap_ref, which is the indication that no device
457 * under this target is visible anymore, so render the target invisible in
458 * sysfs. Note: we have to be in user context here because the target reaps
459 * should be done in places where the scsi device visibility is being removed.
460 */
scsi_target_reap_ref_release(struct kref * kref)461 static void scsi_target_reap_ref_release(struct kref *kref)
462 {
463 struct scsi_target *starget
464 = container_of(kref, struct scsi_target, reap_ref);
465
466 /*
467 * if we get here and the target is still in a CREATED state that
468 * means it was allocated but never made visible (because a scan
469 * turned up no LUNs), so don't call device_del() on it.
470 */
471 if ((starget->state != STARGET_CREATED) &&
472 (starget->state != STARGET_CREATED_REMOVE)) {
473 transport_remove_device(&starget->dev);
474 device_del(&starget->dev);
475 }
476 scsi_target_destroy(starget);
477 }
478
scsi_target_reap_ref_put(struct scsi_target * starget)479 static void scsi_target_reap_ref_put(struct scsi_target *starget)
480 {
481 kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
482 }
483
484 /**
485 * scsi_alloc_target - allocate a new or find an existing target
486 * @parent: parent of the target (need not be a scsi host)
487 * @channel: target channel number (zero if no channels)
488 * @id: target id number
489 *
490 * Return an existing target if one exists, provided it hasn't already
491 * gone into STARGET_DEL state, otherwise allocate a new target.
492 *
493 * The target is returned with an incremented reference, so the caller
494 * is responsible for both reaping and doing a last put
495 */
scsi_alloc_target(struct device * parent,int channel,uint id)496 static struct scsi_target *scsi_alloc_target(struct device *parent,
497 int channel, uint id)
498 {
499 struct Scsi_Host *shost = dev_to_shost(parent);
500 struct device *dev = NULL;
501 unsigned long flags;
502 const int size = sizeof(struct scsi_target)
503 + shost->transportt->target_size;
504 struct scsi_target *starget;
505 struct scsi_target *found_target;
506 int error, ref_got;
507
508 starget = kzalloc(size, GFP_KERNEL);
509 if (!starget) {
510 printk(KERN_ERR "%s: allocation failure\n", __func__);
511 return NULL;
512 }
513 dev = &starget->dev;
514 device_initialize(dev);
515 kref_init(&starget->reap_ref);
516 dev->parent = get_device(parent);
517 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
518 dev->bus = &scsi_bus_type;
519 dev->type = &scsi_target_type;
520 scsi_enable_async_suspend(dev);
521 starget->id = id;
522 starget->channel = channel;
523 starget->can_queue = 0;
524 INIT_LIST_HEAD(&starget->siblings);
525 INIT_LIST_HEAD(&starget->devices);
526 starget->state = STARGET_CREATED;
527 starget->scsi_level = SCSI_2;
528 starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
529 retry:
530 spin_lock_irqsave(shost->host_lock, flags);
531
532 found_target = __scsi_find_target(parent, channel, id);
533 if (found_target)
534 goto found;
535
536 list_add_tail(&starget->siblings, &shost->__targets);
537 spin_unlock_irqrestore(shost->host_lock, flags);
538 /* allocate and add */
539 transport_setup_device(dev);
540 if (shost->hostt->target_alloc) {
541 error = shost->hostt->target_alloc(starget);
542
543 if(error) {
544 if (error != -ENXIO)
545 dev_err(dev, "target allocation failed, error %d\n", error);
546 /* don't want scsi_target_reap to do the final
547 * put because it will be under the host lock */
548 scsi_target_destroy(starget);
549 return NULL;
550 }
551 }
552 get_device(dev);
553
554 return starget;
555
556 found:
557 /*
558 * release routine already fired if kref is zero, so if we can still
559 * take the reference, the target must be alive. If we can't, it must
560 * be dying and we need to wait for a new target
561 */
562 ref_got = kref_get_unless_zero(&found_target->reap_ref);
563
564 spin_unlock_irqrestore(shost->host_lock, flags);
565 if (ref_got) {
566 put_device(dev);
567 return found_target;
568 }
569 /*
570 * Unfortunately, we found a dying target; need to wait until it's
571 * dead before we can get a new one. There is an anomaly here. We
572 * *should* call scsi_target_reap() to balance the kref_get() of the
573 * reap_ref above. However, since the target being released, it's
574 * already invisible and the reap_ref is irrelevant. If we call
575 * scsi_target_reap() we might spuriously do another device_del() on
576 * an already invisible target.
577 */
578 put_device(&found_target->dev);
579 /*
580 * length of time is irrelevant here, we just want to yield the CPU
581 * for a tick to avoid busy waiting for the target to die.
582 */
583 msleep(1);
584 goto retry;
585 }
586
587 /**
588 * scsi_target_reap - check to see if target is in use and destroy if not
589 * @starget: target to be checked
590 *
591 * This is used after removing a LUN or doing a last put of the target
592 * it checks atomically that nothing is using the target and removes
593 * it if so.
594 */
scsi_target_reap(struct scsi_target * starget)595 void scsi_target_reap(struct scsi_target *starget)
596 {
597 /*
598 * serious problem if this triggers: STARGET_DEL is only set in the if
599 * the reap_ref drops to zero, so we're trying to do another final put
600 * on an already released kref
601 */
602 BUG_ON(starget->state == STARGET_DEL);
603 scsi_target_reap_ref_put(starget);
604 }
605
606 /**
607 * scsi_sanitize_inquiry_string - remove non-graphical chars from an
608 * INQUIRY result string
609 * @s: INQUIRY result string to sanitize
610 * @len: length of the string
611 *
612 * Description:
613 * The SCSI spec says that INQUIRY vendor, product, and revision
614 * strings must consist entirely of graphic ASCII characters,
615 * padded on the right with spaces. Since not all devices obey
616 * this rule, we will replace non-graphic or non-ASCII characters
617 * with spaces. Exception: a NUL character is interpreted as a
618 * string terminator, so all the following characters are set to
619 * spaces.
620 **/
scsi_sanitize_inquiry_string(unsigned char * s,int len)621 void scsi_sanitize_inquiry_string(unsigned char *s, int len)
622 {
623 int terminated = 0;
624
625 for (; len > 0; (--len, ++s)) {
626 if (*s == 0)
627 terminated = 1;
628 if (terminated || *s < 0x20 || *s > 0x7e)
629 *s = ' ';
630 }
631 }
632 EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
633
634
635 /**
636 * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
637 * @sdev: scsi_device to probe
638 * @inq_result: area to store the INQUIRY result
639 * @result_len: len of inq_result
640 * @bflags: store any bflags found here
641 *
642 * Description:
643 * Probe the lun associated with @req using a standard SCSI INQUIRY;
644 *
645 * If the INQUIRY is successful, zero is returned and the
646 * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length
647 * are copied to the scsi_device any flags value is stored in *@bflags.
648 **/
scsi_probe_lun(struct scsi_device * sdev,unsigned char * inq_result,int result_len,blist_flags_t * bflags)649 static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
650 int result_len, blist_flags_t *bflags)
651 {
652 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
653 int first_inquiry_len, try_inquiry_len, next_inquiry_len;
654 int response_len = 0;
655 int pass, count, result, resid;
656 struct scsi_failure failure_defs[] = {
657 /*
658 * not-ready to ready transition [asc/ascq=0x28/0x0] or
659 * power-on, reset [asc/ascq=0x29/0x0], continue. INQUIRY
660 * should not yield UNIT_ATTENTION but many buggy devices do
661 * so anyway.
662 */
663 {
664 .sense = UNIT_ATTENTION,
665 .asc = 0x28,
666 .result = SAM_STAT_CHECK_CONDITION,
667 },
668 {
669 .sense = UNIT_ATTENTION,
670 .asc = 0x29,
671 .result = SAM_STAT_CHECK_CONDITION,
672 },
673 {
674 .allowed = 1,
675 .result = DID_TIME_OUT << 16,
676 },
677 {}
678 };
679 struct scsi_failures failures = {
680 .total_allowed = 3,
681 .failure_definitions = failure_defs,
682 };
683 const struct scsi_exec_args exec_args = {
684 .resid = &resid,
685 .failures = &failures,
686 };
687
688 *bflags = 0;
689
690 /* Perform up to 3 passes. The first pass uses a conservative
691 * transfer length of 36 unless sdev->inquiry_len specifies a
692 * different value. */
693 first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
694 try_inquiry_len = first_inquiry_len;
695 pass = 1;
696
697 next_pass:
698 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
699 "scsi scan: INQUIRY pass %d length %d\n",
700 pass, try_inquiry_len));
701
702 /* Each pass gets up to three chances to ignore Unit Attention */
703 scsi_failures_reset_retries(&failures);
704
705 for (count = 0; count < 3; ++count) {
706 memset(scsi_cmd, 0, 6);
707 scsi_cmd[0] = INQUIRY;
708 scsi_cmd[4] = (unsigned char) try_inquiry_len;
709
710 memset(inq_result, 0, try_inquiry_len);
711
712 result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN,
713 inq_result, try_inquiry_len,
714 HZ / 2 + HZ * scsi_inq_timeout, 3,
715 &exec_args);
716
717 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
718 "scsi scan: INQUIRY %s with code 0x%x\n",
719 result ? "failed" : "successful", result));
720
721 if (result == 0) {
722 /*
723 * if nothing was transferred, we try
724 * again. It's a workaround for some USB
725 * devices.
726 */
727 if (resid == try_inquiry_len)
728 continue;
729 }
730 break;
731 }
732
733 if (result == 0) {
734 scsi_sanitize_inquiry_string(&inq_result[8], 8);
735 scsi_sanitize_inquiry_string(&inq_result[16], 16);
736 scsi_sanitize_inquiry_string(&inq_result[32], 4);
737
738 response_len = inq_result[4] + 5;
739 if (response_len > 255)
740 response_len = first_inquiry_len; /* sanity */
741
742 /*
743 * Get any flags for this device.
744 *
745 * XXX add a bflags to scsi_device, and replace the
746 * corresponding bit fields in scsi_device, so bflags
747 * need not be passed as an argument.
748 */
749 *bflags = scsi_get_device_flags(sdev, &inq_result[8],
750 &inq_result[16]);
751
752 /* When the first pass succeeds we gain information about
753 * what larger transfer lengths might work. */
754 if (pass == 1) {
755 if (BLIST_INQUIRY_36 & *bflags)
756 next_inquiry_len = 36;
757 /*
758 * LLD specified a maximum sdev->inquiry_len
759 * but device claims it has more data. Capping
760 * the length only makes sense for legacy
761 * devices. If a device supports SPC-4 (2014)
762 * or newer, assume that it is safe to ask for
763 * as much as the device says it supports.
764 */
765 else if (sdev->inquiry_len &&
766 response_len > sdev->inquiry_len &&
767 (inq_result[2] & 0x7) < 6) /* SPC-4 */
768 next_inquiry_len = sdev->inquiry_len;
769 else
770 next_inquiry_len = response_len;
771
772 /* If more data is available perform the second pass */
773 if (next_inquiry_len > try_inquiry_len) {
774 try_inquiry_len = next_inquiry_len;
775 pass = 2;
776 goto next_pass;
777 }
778 }
779
780 } else if (pass == 2) {
781 sdev_printk(KERN_INFO, sdev,
782 "scsi scan: %d byte inquiry failed. "
783 "Consider BLIST_INQUIRY_36 for this device\n",
784 try_inquiry_len);
785
786 /* If this pass failed, the third pass goes back and transfers
787 * the same amount as we successfully got in the first pass. */
788 try_inquiry_len = first_inquiry_len;
789 pass = 3;
790 goto next_pass;
791 }
792
793 /* If the last transfer attempt got an error, assume the
794 * peripheral doesn't exist or is dead. */
795 if (result)
796 return -EIO;
797
798 /* Don't report any more data than the device says is valid */
799 sdev->inquiry_len = min(try_inquiry_len, response_len);
800
801 /*
802 * XXX Abort if the response length is less than 36? If less than
803 * 32, the lookup of the device flags (above) could be invalid,
804 * and it would be possible to take an incorrect action - we do
805 * not want to hang because of a short INQUIRY. On the flip side,
806 * if the device is spun down or becoming ready (and so it gives a
807 * short INQUIRY), an abort here prevents any further use of the
808 * device, including spin up.
809 *
810 * On the whole, the best approach seems to be to assume the first
811 * 36 bytes are valid no matter what the device says. That's
812 * better than copying < 36 bytes to the inquiry-result buffer
813 * and displaying garbage for the Vendor, Product, or Revision
814 * strings.
815 */
816 if (sdev->inquiry_len < 36) {
817 if (!sdev->host->short_inquiry) {
818 shost_printk(KERN_INFO, sdev->host,
819 "scsi scan: INQUIRY result too short (%d),"
820 " using 36\n", sdev->inquiry_len);
821 sdev->host->short_inquiry = 1;
822 }
823 sdev->inquiry_len = 36;
824 }
825
826 /*
827 * Related to the above issue:
828 *
829 * XXX Devices (disk or all?) should be sent a TEST UNIT READY,
830 * and if not ready, sent a START_STOP to start (maybe spin up) and
831 * then send the INQUIRY again, since the INQUIRY can change after
832 * a device is initialized.
833 *
834 * Ideally, start a device if explicitly asked to do so. This
835 * assumes that a device is spun up on power on, spun down on
836 * request, and then spun up on request.
837 */
838
839 /*
840 * The scanning code needs to know the scsi_level, even if no
841 * device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so
842 * non-zero LUNs can be scanned.
843 */
844 sdev->scsi_level = inq_result[2] & 0x0f;
845 if (sdev->scsi_level >= 2 ||
846 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
847 sdev->scsi_level++;
848 sdev->sdev_target->scsi_level = sdev->scsi_level;
849
850 /*
851 * If SCSI-2 or lower, and if the transport requires it,
852 * store the LUN value in CDB[1].
853 */
854 sdev->lun_in_cdb = 0;
855 if (sdev->scsi_level <= SCSI_2 &&
856 sdev->scsi_level != SCSI_UNKNOWN &&
857 !sdev->host->no_scsi2_lun_in_cdb)
858 sdev->lun_in_cdb = 1;
859
860 return 0;
861 }
862
863 /**
864 * scsi_add_lun - allocate and fully initialze a scsi_device
865 * @sdev: holds information to be stored in the new scsi_device
866 * @inq_result: holds the result of a previous INQUIRY to the LUN
867 * @bflags: black/white list flag
868 * @async: 1 if this device is being scanned asynchronously
869 *
870 * Description:
871 * Initialize the scsi_device @sdev. Optionally set fields based
872 * on values in *@bflags.
873 *
874 * Return:
875 * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
876 * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
877 **/
scsi_add_lun(struct scsi_device * sdev,unsigned char * inq_result,blist_flags_t * bflags,int async)878 static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
879 blist_flags_t *bflags, int async)
880 {
881 const struct scsi_host_template *hostt = sdev->host->hostt;
882 struct queue_limits lim;
883 int ret;
884
885 /*
886 * XXX do not save the inquiry, since it can change underneath us,
887 * save just vendor/model/rev.
888 *
889 * Rather than save it and have an ioctl that retrieves the saved
890 * value, have an ioctl that executes the same INQUIRY code used
891 * in scsi_probe_lun, let user level programs doing INQUIRY
892 * scanning run at their own risk, or supply a user level program
893 * that can correctly scan.
894 */
895
896 /*
897 * Copy at least 36 bytes of INQUIRY data, so that we don't
898 * dereference unallocated memory when accessing the Vendor,
899 * Product, and Revision strings. Badly behaved devices may set
900 * the INQUIRY Additional Length byte to a small value, indicating
901 * these strings are invalid, but often they contain plausible data
902 * nonetheless. It doesn't matter if the device sent < 36 bytes
903 * total, since scsi_probe_lun() initializes inq_result with 0s.
904 */
905 sdev->inquiry = kmemdup(inq_result,
906 max_t(size_t, sdev->inquiry_len, 36),
907 GFP_KERNEL);
908 if (sdev->inquiry == NULL)
909 return SCSI_SCAN_NO_RESPONSE;
910
911 sdev->vendor = (char *) (sdev->inquiry + 8);
912 sdev->model = (char *) (sdev->inquiry + 16);
913 sdev->rev = (char *) (sdev->inquiry + 32);
914
915 sdev->is_ata = strncmp(sdev->vendor, "ATA ", 8) == 0;
916 if (sdev->is_ata) {
917 /*
918 * sata emulation layer device. This is a hack to work around
919 * the SATL power management specifications which state that
920 * when the SATL detects the device has gone into standby
921 * mode, it shall respond with NOT READY.
922 */
923 sdev->allow_restart = 1;
924 }
925
926 if (*bflags & BLIST_ISROM) {
927 sdev->type = TYPE_ROM;
928 sdev->removable = 1;
929 } else {
930 sdev->type = (inq_result[0] & 0x1f);
931 sdev->removable = (inq_result[1] & 0x80) >> 7;
932
933 /*
934 * some devices may respond with wrong type for
935 * well-known logical units. Force well-known type
936 * to enumerate them correctly.
937 */
938 if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) {
939 sdev_printk(KERN_WARNING, sdev,
940 "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n",
941 __func__, sdev->type, (unsigned int)sdev->lun);
942 sdev->type = TYPE_WLUN;
943 }
944
945 }
946
947 if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
948 /* RBC and MMC devices can return SCSI-3 compliance and yet
949 * still not support REPORT LUNS, so make them act as
950 * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is
951 * specifically set */
952 if ((*bflags & BLIST_REPORTLUN2) == 0)
953 *bflags |= BLIST_NOREPORTLUN;
954 }
955
956 /*
957 * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI
958 * spec says: The device server is capable of supporting the
959 * specified peripheral device type on this logical unit. However,
960 * the physical device is not currently connected to this logical
961 * unit.
962 *
963 * The above is vague, as it implies that we could treat 001 and
964 * 011 the same. Stay compatible with previous code, and create a
965 * scsi_device for a PQ of 1
966 *
967 * Don't set the device offline here; rather let the upper
968 * level drivers eval the PQ to decide whether they should
969 * attach. So remove ((inq_result[0] >> 5) & 7) == 1 check.
970 */
971
972 sdev->inq_periph_qual = (inq_result[0] >> 5) & 7;
973 sdev->lockable = sdev->removable;
974 sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2);
975
976 if (sdev->scsi_level >= SCSI_3 ||
977 (sdev->inquiry_len > 56 && inq_result[56] & 0x04))
978 sdev->ppr = 1;
979 if (inq_result[7] & 0x60)
980 sdev->wdtr = 1;
981 if (inq_result[7] & 0x10)
982 sdev->sdtr = 1;
983
984 sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d "
985 "ANSI: %d%s\n", scsi_device_type(sdev->type),
986 sdev->vendor, sdev->model, sdev->rev,
987 sdev->inq_periph_qual, inq_result[2] & 0x07,
988 (inq_result[3] & 0x0f) == 1 ? " CCS" : "");
989
990 if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) &&
991 !(*bflags & BLIST_NOTQ)) {
992 sdev->tagged_supported = 1;
993 sdev->simple_tags = 1;
994 }
995
996 /*
997 * Some devices (Texel CD ROM drives) have handshaking problems
998 * when used with the Seagate controllers. borken is initialized
999 * to 1, and then set it to 0 here.
1000 */
1001 if ((*bflags & BLIST_BORKEN) == 0)
1002 sdev->borken = 0;
1003
1004 if (*bflags & BLIST_NO_ULD_ATTACH)
1005 sdev->no_uld_attach = 1;
1006
1007 /*
1008 * Apparently some really broken devices (contrary to the SCSI
1009 * standards) need to be selected without asserting ATN
1010 */
1011 if (*bflags & BLIST_SELECT_NO_ATN)
1012 sdev->select_no_atn = 1;
1013
1014 /*
1015 * Some devices may not want to have a start command automatically
1016 * issued when a device is added.
1017 */
1018 if (*bflags & BLIST_NOSTARTONADD)
1019 sdev->no_start_on_add = 1;
1020
1021 if (*bflags & BLIST_SINGLELUN)
1022 scsi_target(sdev)->single_lun = 1;
1023
1024 sdev->use_10_for_rw = 1;
1025
1026 /* some devices don't like REPORT SUPPORTED OPERATION CODES
1027 * and will simply timeout causing sd_mod init to take a very
1028 * very long time */
1029 if (*bflags & BLIST_NO_RSOC)
1030 sdev->no_report_opcodes = 1;
1031
1032 /* set the device running here so that slave configure
1033 * may do I/O */
1034 mutex_lock(&sdev->state_mutex);
1035 ret = scsi_device_set_state(sdev, SDEV_RUNNING);
1036 if (ret)
1037 ret = scsi_device_set_state(sdev, SDEV_BLOCK);
1038 mutex_unlock(&sdev->state_mutex);
1039
1040 if (ret) {
1041 sdev_printk(KERN_ERR, sdev,
1042 "in wrong state %s to complete scan\n",
1043 scsi_device_state_name(sdev->sdev_state));
1044 return SCSI_SCAN_NO_RESPONSE;
1045 }
1046
1047 if (*bflags & BLIST_NOT_LOCKABLE)
1048 sdev->lockable = 0;
1049
1050 if (*bflags & BLIST_RETRY_HWERROR)
1051 sdev->retry_hwerror = 1;
1052
1053 if (*bflags & BLIST_NO_DIF)
1054 sdev->no_dif = 1;
1055
1056 if (*bflags & BLIST_UNMAP_LIMIT_WS)
1057 sdev->unmap_limit_for_ws = 1;
1058
1059 if (*bflags & BLIST_IGN_MEDIA_CHANGE)
1060 sdev->ignore_media_change = 1;
1061
1062 sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
1063
1064 if (*bflags & BLIST_TRY_VPD_PAGES)
1065 sdev->try_vpd_pages = 1;
1066 else if (*bflags & BLIST_SKIP_VPD_PAGES)
1067 sdev->skip_vpd_pages = 1;
1068
1069 if (*bflags & BLIST_NO_VPD_SIZE)
1070 sdev->no_vpd_size = 1;
1071
1072 transport_configure_device(&sdev->sdev_gendev);
1073
1074 sdev->sdev_bflags = *bflags;
1075
1076 if (scsi_device_is_pseudo_dev(sdev))
1077 return SCSI_SCAN_LUN_PRESENT;
1078
1079 /*
1080 * No need to freeze the queue as it isn't reachable to anyone else yet.
1081 */
1082 lim = queue_limits_start_update(sdev->request_queue);
1083 if (*bflags & BLIST_MAX_512)
1084 lim.max_hw_sectors = 512;
1085 else if (*bflags & BLIST_MAX_1024)
1086 lim.max_hw_sectors = 1024;
1087
1088 if (hostt->sdev_configure)
1089 ret = hostt->sdev_configure(sdev, &lim);
1090 if (ret) {
1091 queue_limits_cancel_update(sdev->request_queue);
1092 /*
1093 * If the LLDD reports device not present, don't clutter the
1094 * console with failure messages.
1095 */
1096 if (ret != -ENXIO)
1097 sdev_printk(KERN_ERR, sdev,
1098 "failed to configure device\n");
1099 return SCSI_SCAN_NO_RESPONSE;
1100 }
1101
1102 ret = queue_limits_commit_update(sdev->request_queue, &lim);
1103 if (ret) {
1104 sdev_printk(KERN_ERR, sdev, "failed to apply queue limits.\n");
1105 return SCSI_SCAN_NO_RESPONSE;
1106 }
1107
1108 /*
1109 * The queue_depth is often changed in ->sdev_configure.
1110 *
1111 * Set up budget map again since memory consumption of the map depends
1112 * on actual queue depth.
1113 */
1114 if (hostt->sdev_configure)
1115 scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
1116
1117 if (sdev->scsi_level >= SCSI_3)
1118 scsi_attach_vpd(sdev);
1119
1120 scsi_cdl_check(sdev);
1121
1122 sdev->max_queue_depth = sdev->queue_depth;
1123 WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth);
1124
1125 /*
1126 * Ok, the device is now all set up, we can
1127 * register it and tell the rest of the kernel
1128 * about it.
1129 */
1130 if (!async && scsi_sysfs_add_sdev(sdev) != 0)
1131 return SCSI_SCAN_NO_RESPONSE;
1132
1133 return SCSI_SCAN_LUN_PRESENT;
1134 }
1135
1136 #ifdef CONFIG_SCSI_LOGGING
1137 /**
1138 * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace
1139 * @buf: Output buffer with at least end-first+1 bytes of space
1140 * @inq: Inquiry buffer (input)
1141 * @first: Offset of string into inq
1142 * @end: Index after last character in inq
1143 */
scsi_inq_str(unsigned char * buf,unsigned char * inq,unsigned first,unsigned end)1144 static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
1145 unsigned first, unsigned end)
1146 {
1147 unsigned term = 0, idx;
1148
1149 for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) {
1150 if (inq[idx+first] > ' ') {
1151 buf[idx] = inq[idx+first];
1152 term = idx+1;
1153 } else {
1154 buf[idx] = ' ';
1155 }
1156 }
1157 buf[term] = 0;
1158 return buf;
1159 }
1160 #endif
1161
1162 /**
1163 * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it
1164 * @starget: pointer to target device structure
1165 * @lun: LUN of target device
1166 * @bflagsp: store bflags here if not NULL
1167 * @sdevp: probe the LUN corresponding to this scsi_device
1168 * @rescan: if not equal to SCSI_SCAN_INITIAL skip some code only
1169 * needed on first scan
1170 * @hostdata: passed to scsi_alloc_sdev()
1171 *
1172 * Description:
1173 * Call scsi_probe_lun, if a LUN with an attached device is found,
1174 * allocate and set it up by calling scsi_add_lun.
1175 *
1176 * Return:
1177 *
1178 * - SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
1179 * - SCSI_SCAN_TARGET_PRESENT: target responded, but no device is
1180 * attached at the LUN
1181 * - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
1182 **/
scsi_probe_and_add_lun(struct scsi_target * starget,u64 lun,blist_flags_t * bflagsp,struct scsi_device ** sdevp,enum scsi_scan_mode rescan,void * hostdata)1183 static int scsi_probe_and_add_lun(struct scsi_target *starget,
1184 u64 lun, blist_flags_t *bflagsp,
1185 struct scsi_device **sdevp,
1186 enum scsi_scan_mode rescan,
1187 void *hostdata)
1188 {
1189 struct scsi_device *sdev;
1190 unsigned char *result;
1191 blist_flags_t bflags;
1192 int res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
1193 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1194
1195 /*
1196 * The rescan flag is used as an optimization, the first scan of a
1197 * host adapter calls into here with rescan == 0.
1198 */
1199 sdev = scsi_device_lookup_by_target(starget, lun);
1200 if (sdev) {
1201 if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) {
1202 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1203 "scsi scan: device exists on %s\n",
1204 dev_name(&sdev->sdev_gendev)));
1205 if (sdevp)
1206 *sdevp = sdev;
1207 else
1208 scsi_device_put(sdev);
1209
1210 if (bflagsp)
1211 *bflagsp = scsi_get_device_flags(sdev,
1212 sdev->vendor,
1213 sdev->model);
1214 return SCSI_SCAN_LUN_PRESENT;
1215 }
1216 scsi_device_put(sdev);
1217 } else
1218 sdev = scsi_alloc_sdev(starget, lun, hostdata);
1219 if (!sdev)
1220 goto out;
1221
1222 if (scsi_device_is_pseudo_dev(sdev)) {
1223 if (bflagsp)
1224 *bflagsp = BLIST_NOLUN;
1225 return SCSI_SCAN_LUN_PRESENT;
1226 }
1227
1228 result = kmalloc(result_len, GFP_KERNEL);
1229 if (!result)
1230 goto out_free_sdev;
1231
1232 if (scsi_probe_lun(sdev, result, result_len, &bflags))
1233 goto out_free_result;
1234
1235 if (bflagsp)
1236 *bflagsp = bflags;
1237 /*
1238 * result contains valid SCSI INQUIRY data.
1239 */
1240 if ((result[0] >> 5) == 3) {
1241 /*
1242 * For a Peripheral qualifier 3 (011b), the SCSI
1243 * spec says: The device server is not capable of
1244 * supporting a physical device on this logical
1245 * unit.
1246 *
1247 * For disks, this implies that there is no
1248 * logical disk configured at sdev->lun, but there
1249 * is a target id responding.
1250 */
1251 SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:"
1252 " peripheral qualifier of 3, device not"
1253 " added\n"))
1254 if (lun == 0) {
1255 SCSI_LOG_SCAN_BUS(1, {
1256 unsigned char vend[9];
1257 unsigned char mod[17];
1258
1259 sdev_printk(KERN_INFO, sdev,
1260 "scsi scan: consider passing scsi_mod."
1261 "dev_flags=%s:%s:0x240 or 0x1000240\n",
1262 scsi_inq_str(vend, result, 8, 16),
1263 scsi_inq_str(mod, result, 16, 32));
1264 });
1265
1266 }
1267
1268 res = SCSI_SCAN_TARGET_PRESENT;
1269 goto out_free_result;
1270 }
1271
1272 /*
1273 * Some targets may set slight variations of PQ and PDT to signal
1274 * that no LUN is present, so don't add sdev in these cases.
1275 * Two specific examples are:
1276 * 1) NetApp targets: return PQ=1, PDT=0x1f
1277 * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
1278 * in the UFI 1.0 spec (we cannot rely on reserved bits).
1279 *
1280 * References:
1281 * 1) SCSI SPC-3, pp. 145-146
1282 * PQ=1: "A peripheral device having the specified peripheral
1283 * device type is not connected to this logical unit. However, the
1284 * device server is capable of supporting the specified peripheral
1285 * device type on this logical unit."
1286 * PDT=0x1f: "Unknown or no device type"
1287 * 2) USB UFI 1.0, p. 20
1288 * PDT=00h Direct-access device (floppy)
1289 * PDT=1Fh none (no FDD connected to the requested logical unit)
1290 */
1291 if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
1292 (result[0] & 0x1f) == 0x1f &&
1293 !scsi_is_wlun(lun)) {
1294 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1295 "scsi scan: peripheral device type"
1296 " of 31, no device added\n"));
1297 res = SCSI_SCAN_TARGET_PRESENT;
1298 goto out_free_result;
1299 }
1300
1301 res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
1302 if (res == SCSI_SCAN_LUN_PRESENT) {
1303 if (bflags & BLIST_KEY) {
1304 sdev->lockable = 0;
1305 scsi_unlock_floptical(sdev, result);
1306 }
1307 }
1308
1309 out_free_result:
1310 kfree(result);
1311 out_free_sdev:
1312 if (res == SCSI_SCAN_LUN_PRESENT) {
1313 if (sdevp) {
1314 if (scsi_device_get(sdev) == 0) {
1315 *sdevp = sdev;
1316 } else {
1317 __scsi_remove_device(sdev);
1318 res = SCSI_SCAN_NO_RESPONSE;
1319 }
1320 }
1321 } else
1322 __scsi_remove_device(sdev);
1323 out:
1324 return res;
1325 }
1326
1327 /**
1328 * scsi_sequential_lun_scan - sequentially scan a SCSI target
1329 * @starget: pointer to target structure to scan
1330 * @bflags: black/white list flag for LUN 0
1331 * @scsi_level: Which version of the standard does this device adhere to
1332 * @rescan: passed to scsi_probe_add_lun()
1333 *
1334 * Description:
1335 * Generally, scan from LUN 1 (LUN 0 is assumed to already have been
1336 * scanned) to some maximum lun until a LUN is found with no device
1337 * attached. Use the bflags to figure out any oddities.
1338 *
1339 * Modifies sdevscan->lun.
1340 **/
scsi_sequential_lun_scan(struct scsi_target * starget,blist_flags_t bflags,int scsi_level,enum scsi_scan_mode rescan)1341 static void scsi_sequential_lun_scan(struct scsi_target *starget,
1342 blist_flags_t bflags, int scsi_level,
1343 enum scsi_scan_mode rescan)
1344 {
1345 uint max_dev_lun;
1346 u64 sparse_lun, lun;
1347 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1348
1349 SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget,
1350 "scsi scan: Sequential scan\n"));
1351
1352 max_dev_lun = min(max_scsi_luns, shost->max_lun);
1353 /*
1354 * If this device is known to support sparse multiple units,
1355 * override the other settings, and scan all of them. Normally,
1356 * SCSI-3 devices should be scanned via the REPORT LUNS.
1357 */
1358 if (bflags & BLIST_SPARSELUN) {
1359 max_dev_lun = shost->max_lun;
1360 sparse_lun = 1;
1361 } else
1362 sparse_lun = 0;
1363
1364 /*
1365 * If less than SCSI_1_CCS, and no special lun scanning, stop
1366 * scanning; this matches 2.4 behaviour, but could just be a bug
1367 * (to continue scanning a SCSI_1_CCS device).
1368 *
1369 * This test is broken. We might not have any device on lun0 for
1370 * a sparselun device, and if that's the case then how would we
1371 * know the real scsi_level, eh? It might make sense to just not
1372 * scan any SCSI_1 device for non-0 luns, but that check would best
1373 * go into scsi_alloc_sdev() and just have it return null when asked
1374 * to alloc an sdev for lun > 0 on an already found SCSI_1 device.
1375 *
1376 if ((sdevscan->scsi_level < SCSI_1_CCS) &&
1377 ((bflags & (BLIST_FORCELUN | BLIST_SPARSELUN | BLIST_MAX5LUN))
1378 == 0))
1379 return;
1380 */
1381 /*
1382 * If this device is known to support multiple units, override
1383 * the other settings, and scan all of them.
1384 */
1385 if (bflags & BLIST_FORCELUN)
1386 max_dev_lun = shost->max_lun;
1387 /*
1388 * REGAL CDC-4X: avoid hang after LUN 4
1389 */
1390 if (bflags & BLIST_MAX5LUN)
1391 max_dev_lun = min(5U, max_dev_lun);
1392 /*
1393 * Do not scan SCSI-2 or lower device past LUN 7, unless
1394 * BLIST_LARGELUN.
1395 */
1396 if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN))
1397 max_dev_lun = min(8U, max_dev_lun);
1398 else
1399 max_dev_lun = min(256U, max_dev_lun);
1400
1401 /*
1402 * We have already scanned LUN 0, so start at LUN 1. Keep scanning
1403 * until we reach the max, or no LUN is found and we are not
1404 * sparse_lun.
1405 */
1406 for (lun = 1; lun < max_dev_lun; ++lun)
1407 if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan,
1408 NULL) != SCSI_SCAN_LUN_PRESENT) &&
1409 !sparse_lun)
1410 return;
1411 }
1412
1413 /**
1414 * scsi_report_lun_scan - Scan using SCSI REPORT LUN results
1415 * @starget: which target
1416 * @bflags: Zero or a mix of BLIST_NOLUN, BLIST_REPORTLUN2, or BLIST_NOREPORTLUN
1417 * @rescan: nonzero if we can skip code only needed on first scan
1418 *
1419 * Description:
1420 * Fast scanning for modern (SCSI-3) devices by sending a REPORT LUN command.
1421 * Scan the resulting list of LUNs by calling scsi_probe_and_add_lun.
1422 *
1423 * If BLINK_REPORTLUN2 is set, scan a target that supports more than 8
1424 * LUNs even if it's older than SCSI-3.
1425 * If BLIST_NOREPORTLUN is set, return 1 always.
1426 * If BLIST_NOLUN is set, return 0 always.
1427 * If starget->no_report_luns is set, return 1 always.
1428 *
1429 * Return:
1430 * 0: scan completed (or no memory, so further scanning is futile)
1431 * 1: could not scan with REPORT LUN
1432 **/
scsi_report_lun_scan(struct scsi_target * starget,blist_flags_t bflags,enum scsi_scan_mode rescan)1433 static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags,
1434 enum scsi_scan_mode rescan)
1435 {
1436 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
1437 unsigned int length;
1438 u64 lun;
1439 unsigned int num_luns;
1440 int result;
1441 struct scsi_lun *lunp, *lun_data;
1442 struct scsi_device *sdev;
1443 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1444 struct scsi_failure failure_defs[] = {
1445 {
1446 .sense = UNIT_ATTENTION,
1447 .asc = SCMD_FAILURE_ASC_ANY,
1448 .ascq = SCMD_FAILURE_ASCQ_ANY,
1449 .result = SAM_STAT_CHECK_CONDITION,
1450 },
1451 /* Fail all CCs except the UA above */
1452 {
1453 .sense = SCMD_FAILURE_SENSE_ANY,
1454 .result = SAM_STAT_CHECK_CONDITION,
1455 },
1456 /* Retry any other errors not listed above */
1457 {
1458 .result = SCMD_FAILURE_RESULT_ANY,
1459 },
1460 {}
1461 };
1462 struct scsi_failures failures = {
1463 .total_allowed = 3,
1464 .failure_definitions = failure_defs,
1465 };
1466 const struct scsi_exec_args exec_args = {
1467 .failures = &failures,
1468 };
1469 int ret = 0;
1470
1471 /*
1472 * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set.
1473 * Also allow SCSI-2 if BLIST_REPORTLUN2 is set and host adapter does
1474 * support more than 8 LUNs.
1475 * Don't attempt if the target doesn't support REPORT LUNS.
1476 */
1477 if (bflags & BLIST_NOREPORTLUN)
1478 return 1;
1479 if (starget->scsi_level < SCSI_2 &&
1480 starget->scsi_level != SCSI_UNKNOWN)
1481 return 1;
1482 if (starget->scsi_level < SCSI_3 &&
1483 (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8))
1484 return 1;
1485 if (bflags & BLIST_NOLUN)
1486 return 0;
1487 if (starget->no_report_luns)
1488 return 1;
1489
1490 if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
1491 sdev = scsi_alloc_sdev(starget, 0, NULL);
1492 if (!sdev)
1493 return 0;
1494 if (scsi_device_get(sdev)) {
1495 __scsi_remove_device(sdev);
1496 return 0;
1497 }
1498 }
1499
1500 /*
1501 * Allocate enough to hold the header (the same size as one scsi_lun)
1502 * plus the number of luns we are requesting. 511 was the default
1503 * value of the now removed max_report_luns parameter.
1504 */
1505 length = (511 + 1) * sizeof(struct scsi_lun);
1506 retry:
1507 lun_data = kmalloc(length, GFP_KERNEL);
1508 if (!lun_data) {
1509 printk(ALLOC_FAILURE_MSG, __func__);
1510 goto out;
1511 }
1512
1513 scsi_cmd[0] = REPORT_LUNS;
1514
1515 /*
1516 * bytes 1 - 5: reserved, set to zero.
1517 */
1518 memset(&scsi_cmd[1], 0, 5);
1519
1520 /*
1521 * bytes 6 - 9: length of the command.
1522 */
1523 put_unaligned_be32(length, &scsi_cmd[6]);
1524
1525 scsi_cmd[10] = 0; /* reserved */
1526 scsi_cmd[11] = 0; /* control */
1527
1528 /*
1529 * We can get a UNIT ATTENTION, for example a power on/reset, so
1530 * retry a few times (like sd.c does for TEST UNIT READY).
1531 * Experience shows some combinations of adapter/devices get at
1532 * least two power on/resets.
1533 *
1534 * Illegal requests (for devices that do not support REPORT LUNS)
1535 * should come through as a check condition, and will not generate
1536 * a retry.
1537 */
1538 scsi_failures_reset_retries(&failures);
1539
1540 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1541 "scsi scan: Sending REPORT LUNS\n"));
1542
1543 result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, lun_data,
1544 length, SCSI_REPORT_LUNS_TIMEOUT, 3,
1545 &exec_args);
1546
1547 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1548 "scsi scan: REPORT LUNS %s result 0x%x\n",
1549 result ? "failed" : "successful", result));
1550 if (result) {
1551 /*
1552 * The device probably does not support a REPORT LUN command
1553 */
1554 ret = 1;
1555 goto out_err;
1556 }
1557
1558 /*
1559 * Get the length from the first four bytes of lun_data.
1560 */
1561 if (get_unaligned_be32(lun_data->scsi_lun) +
1562 sizeof(struct scsi_lun) > length) {
1563 length = get_unaligned_be32(lun_data->scsi_lun) +
1564 sizeof(struct scsi_lun);
1565 kfree(lun_data);
1566 goto retry;
1567 }
1568 length = get_unaligned_be32(lun_data->scsi_lun);
1569
1570 num_luns = (length / sizeof(struct scsi_lun));
1571
1572 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1573 "scsi scan: REPORT LUN scan\n"));
1574
1575 /*
1576 * Scan the luns in lun_data. The entry at offset 0 is really
1577 * the header, so start at 1 and go up to and including num_luns.
1578 */
1579 for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) {
1580 lun = scsilun_to_int(lunp);
1581
1582 if (lun > sdev->host->max_lun) {
1583 sdev_printk(KERN_WARNING, sdev,
1584 "lun%llu has a LUN larger than"
1585 " allowed by the host adapter\n", lun);
1586 } else {
1587 int res;
1588
1589 res = scsi_probe_and_add_lun(starget,
1590 lun, NULL, NULL, rescan, NULL);
1591 if (res == SCSI_SCAN_NO_RESPONSE) {
1592 /*
1593 * Got some results, but now none, abort.
1594 */
1595 sdev_printk(KERN_ERR, sdev,
1596 "Unexpected response"
1597 " from lun %llu while scanning, scan"
1598 " aborted\n", (unsigned long long)lun);
1599 break;
1600 }
1601 }
1602 }
1603
1604 out_err:
1605 kfree(lun_data);
1606 out:
1607 if (scsi_device_created(sdev))
1608 /*
1609 * the sdev we used didn't appear in the report luns scan
1610 */
1611 __scsi_remove_device(sdev);
1612 scsi_device_put(sdev);
1613 return ret;
1614 }
1615
__scsi_add_device(struct Scsi_Host * shost,uint channel,uint id,u64 lun,void * hostdata)1616 struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1617 uint id, u64 lun, void *hostdata)
1618 {
1619 struct scsi_device *sdev = ERR_PTR(-ENODEV);
1620 struct device *parent = &shost->shost_gendev;
1621 struct scsi_target *starget;
1622
1623 if (strncmp(scsi_scan_type, "none", 4) == 0)
1624 return ERR_PTR(-ENODEV);
1625
1626 starget = scsi_alloc_target(parent, channel, id);
1627 if (!starget)
1628 return ERR_PTR(-ENOMEM);
1629 scsi_autopm_get_target(starget);
1630
1631 mutex_lock(&shost->scan_mutex);
1632 if (!shost->async_scan)
1633 scsi_complete_async_scans();
1634
1635 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1636 scsi_probe_and_add_lun(starget, lun, NULL, &sdev,
1637 SCSI_SCAN_RESCAN, hostdata);
1638 scsi_autopm_put_host(shost);
1639 }
1640 mutex_unlock(&shost->scan_mutex);
1641 scsi_autopm_put_target(starget);
1642 /*
1643 * paired with scsi_alloc_target(). Target will be destroyed unless
1644 * scsi_probe_and_add_lun made an underlying device visible
1645 */
1646 scsi_target_reap(starget);
1647 put_device(&starget->dev);
1648
1649 return sdev;
1650 }
1651 EXPORT_SYMBOL(__scsi_add_device);
1652
1653 /**
1654 * scsi_add_device - creates a new SCSI (LU) instance
1655 * @host: the &Scsi_Host instance where the device is located
1656 * @channel: target channel number (rarely other than %0)
1657 * @target: target id number
1658 * @lun: LUN of target device
1659 *
1660 * Probe for a specific LUN and add it if found.
1661 *
1662 * Notes: This call is usually performed internally during a SCSI
1663 * bus scan when an HBA is added (i.e. scsi_scan_host()). So it
1664 * should only be called if the HBA becomes aware of a new SCSI
1665 * device (LU) after scsi_scan_host() has completed. If successful
1666 * this call can lead to sdev_init() and sdev_configure() callbacks
1667 * into the LLD.
1668 *
1669 * Return: %0 on success or negative error code on failure
1670 */
scsi_add_device(struct Scsi_Host * host,uint channel,uint target,u64 lun)1671 int scsi_add_device(struct Scsi_Host *host, uint channel,
1672 uint target, u64 lun)
1673 {
1674 struct scsi_device *sdev =
1675 __scsi_add_device(host, channel, target, lun, NULL);
1676 if (IS_ERR(sdev))
1677 return PTR_ERR(sdev);
1678
1679 scsi_device_put(sdev);
1680 return 0;
1681 }
1682 EXPORT_SYMBOL(scsi_add_device);
1683
scsi_resume_device(struct scsi_device * sdev)1684 int scsi_resume_device(struct scsi_device *sdev)
1685 {
1686 struct device *dev = &sdev->sdev_gendev;
1687 int ret = 0;
1688
1689 device_lock(dev);
1690
1691 /*
1692 * Bail out if the device or its queue are not running. Otherwise,
1693 * the rescan may block waiting for commands to be executed, with us
1694 * holding the device lock. This can result in a potential deadlock
1695 * in the power management core code when system resume is on-going.
1696 */
1697 if (sdev->sdev_state != SDEV_RUNNING ||
1698 blk_queue_pm_only(sdev->request_queue)) {
1699 ret = -EWOULDBLOCK;
1700 goto unlock;
1701 }
1702
1703 if (dev->driver && try_module_get(dev->driver->owner)) {
1704 struct scsi_driver *drv = to_scsi_driver(dev->driver);
1705
1706 if (drv->resume)
1707 ret = drv->resume(dev);
1708 module_put(dev->driver->owner);
1709 }
1710
1711 unlock:
1712 device_unlock(dev);
1713
1714 return ret;
1715 }
1716 EXPORT_SYMBOL(scsi_resume_device);
1717
scsi_rescan_device(struct scsi_device * sdev)1718 int scsi_rescan_device(struct scsi_device *sdev)
1719 {
1720 struct device *dev = &sdev->sdev_gendev;
1721 int ret = 0;
1722
1723 device_lock(dev);
1724
1725 /*
1726 * Bail out if the device or its queue are not running. Otherwise,
1727 * the rescan may block waiting for commands to be executed, with us
1728 * holding the device lock. This can result in a potential deadlock
1729 * in the power management core code when system resume is on-going.
1730 */
1731 if (sdev->sdev_state != SDEV_RUNNING ||
1732 blk_queue_pm_only(sdev->request_queue)) {
1733 ret = -EWOULDBLOCK;
1734 goto unlock;
1735 }
1736
1737 scsi_attach_vpd(sdev);
1738 scsi_cdl_check(sdev);
1739
1740 if (sdev->handler && sdev->handler->rescan)
1741 sdev->handler->rescan(sdev);
1742
1743 if (dev->driver && try_module_get(dev->driver->owner)) {
1744 struct scsi_driver *drv = to_scsi_driver(dev->driver);
1745
1746 if (drv->rescan)
1747 drv->rescan(dev);
1748 module_put(dev->driver->owner);
1749 }
1750
1751 unlock:
1752 device_unlock(dev);
1753
1754 return ret;
1755 }
1756 EXPORT_SYMBOL(scsi_rescan_device);
1757
__scsi_scan_target(struct device * parent,unsigned int channel,unsigned int id,u64 lun,enum scsi_scan_mode rescan)1758 static void __scsi_scan_target(struct device *parent, unsigned int channel,
1759 unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1760 {
1761 struct Scsi_Host *shost = dev_to_shost(parent);
1762 blist_flags_t bflags = 0;
1763 int res;
1764 struct scsi_target *starget;
1765
1766 if (shost->this_id == id)
1767 /*
1768 * Don't scan the host adapter
1769 */
1770 return;
1771
1772 starget = scsi_alloc_target(parent, channel, id);
1773 if (!starget)
1774 return;
1775 scsi_autopm_get_target(starget);
1776
1777 if (lun != SCAN_WILD_CARD) {
1778 /*
1779 * Scan for a specific host/chan/id/lun.
1780 */
1781 scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL);
1782 goto out_reap;
1783 }
1784
1785 /*
1786 * Scan LUN 0, if there is some response, scan further. Ideally, we
1787 * would not configure LUN 0 until all LUNs are scanned.
1788 */
1789 res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL);
1790 if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) {
1791 if (scsi_report_lun_scan(starget, bflags, rescan) != 0)
1792 /*
1793 * The REPORT LUN did not scan the target,
1794 * do a sequential scan.
1795 */
1796 scsi_sequential_lun_scan(starget, bflags,
1797 starget->scsi_level, rescan);
1798 }
1799
1800 out_reap:
1801 scsi_autopm_put_target(starget);
1802 /*
1803 * paired with scsi_alloc_target(): determine if the target has
1804 * any children at all and if not, nuke it
1805 */
1806 scsi_target_reap(starget);
1807
1808 put_device(&starget->dev);
1809 }
1810
1811 /**
1812 * scsi_scan_target - scan a target id, possibly including all LUNs on the target.
1813 * @parent: host to scan
1814 * @channel: channel to scan
1815 * @id: target id to scan
1816 * @lun: Specific LUN to scan or SCAN_WILD_CARD
1817 * @rescan: passed to LUN scanning routines; SCSI_SCAN_INITIAL for
1818 * no rescan, SCSI_SCAN_RESCAN to rescan existing LUNs,
1819 * and SCSI_SCAN_MANUAL to force scanning even if
1820 * 'scan=manual' is set.
1821 *
1822 * Description:
1823 * Scan the target id on @parent, @channel, and @id. Scan at least LUN 0,
1824 * and possibly all LUNs on the target id.
1825 *
1826 * First try a REPORT LUN scan, if that does not scan the target, do a
1827 * sequential scan of LUNs on the target id.
1828 **/
scsi_scan_target(struct device * parent,unsigned int channel,unsigned int id,u64 lun,enum scsi_scan_mode rescan)1829 void scsi_scan_target(struct device *parent, unsigned int channel,
1830 unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1831 {
1832 struct Scsi_Host *shost = dev_to_shost(parent);
1833
1834 if (strncmp(scsi_scan_type, "none", 4) == 0)
1835 return;
1836
1837 if (rescan != SCSI_SCAN_MANUAL &&
1838 strncmp(scsi_scan_type, "manual", 6) == 0)
1839 return;
1840
1841 mutex_lock(&shost->scan_mutex);
1842 if (!shost->async_scan)
1843 scsi_complete_async_scans();
1844
1845 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1846 __scsi_scan_target(parent, channel, id, lun, rescan);
1847 scsi_autopm_put_host(shost);
1848 }
1849 mutex_unlock(&shost->scan_mutex);
1850 }
1851 EXPORT_SYMBOL(scsi_scan_target);
1852
scsi_scan_channel(struct Scsi_Host * shost,unsigned int channel,unsigned int id,u64 lun,enum scsi_scan_mode rescan)1853 static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
1854 unsigned int id, u64 lun,
1855 enum scsi_scan_mode rescan)
1856 {
1857 uint order_id;
1858
1859 if (id == SCAN_WILD_CARD)
1860 for (id = 0; id < shost->max_id; ++id) {
1861 /*
1862 * XXX adapter drivers when possible (FCP, iSCSI)
1863 * could modify max_id to match the current max,
1864 * not the absolute max.
1865 *
1866 * XXX add a shost id iterator, so for example,
1867 * the FC ID can be the same as a target id
1868 * without a huge overhead of sparse id's.
1869 */
1870 if (shost->reverse_ordering)
1871 /*
1872 * Scan from high to low id.
1873 */
1874 order_id = shost->max_id - id - 1;
1875 else
1876 order_id = id;
1877 __scsi_scan_target(&shost->shost_gendev, channel,
1878 order_id, lun, rescan);
1879 }
1880 else
1881 __scsi_scan_target(&shost->shost_gendev, channel,
1882 id, lun, rescan);
1883 }
1884
scsi_scan_host_selected(struct Scsi_Host * shost,unsigned int channel,unsigned int id,u64 lun,enum scsi_scan_mode rescan)1885 int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1886 unsigned int id, u64 lun,
1887 enum scsi_scan_mode rescan)
1888 {
1889 SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
1890 "%s: <%u:%u:%llu>\n",
1891 __func__, channel, id, lun));
1892
1893 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
1894 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
1895 ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun)))
1896 return -EINVAL;
1897
1898 mutex_lock(&shost->scan_mutex);
1899 if (!shost->async_scan)
1900 scsi_complete_async_scans();
1901
1902 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1903 if (channel == SCAN_WILD_CARD)
1904 for (channel = 0; channel <= shost->max_channel;
1905 channel++)
1906 scsi_scan_channel(shost, channel, id, lun,
1907 rescan);
1908 else
1909 scsi_scan_channel(shost, channel, id, lun, rescan);
1910 scsi_autopm_put_host(shost);
1911 }
1912 mutex_unlock(&shost->scan_mutex);
1913
1914 return 0;
1915 }
1916 EXPORT_SYMBOL(scsi_scan_host_selected);
scsi_sysfs_add_devices(struct Scsi_Host * shost)1917 static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
1918 {
1919 struct scsi_device *sdev;
1920 shost_for_each_device(sdev, shost) {
1921 /* target removed before the device could be added */
1922 if (sdev->sdev_state == SDEV_DEL)
1923 continue;
1924 /* If device is already visible, skip adding it to sysfs */
1925 if (sdev->is_visible)
1926 continue;
1927 if (!scsi_host_scan_allowed(shost) ||
1928 scsi_sysfs_add_sdev(sdev) != 0)
1929 __scsi_remove_device(sdev);
1930 }
1931 }
1932
1933 /**
1934 * scsi_prep_async_scan - prepare for an async scan
1935 * @shost: the host which will be scanned
1936 * Returns: a cookie to be passed to scsi_finish_async_scan()
1937 *
1938 * Tells the midlayer this host is going to do an asynchronous scan.
1939 * It reserves the host's position in the scanning list and ensures
1940 * that other asynchronous scans started after this one won't affect the
1941 * ordering of the discovered devices.
1942 */
scsi_prep_async_scan(struct Scsi_Host * shost)1943 static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
1944 {
1945 struct async_scan_data *data = NULL;
1946 unsigned long flags;
1947
1948 if (strncmp(scsi_scan_type, "sync", 4) == 0)
1949 return NULL;
1950
1951 mutex_lock(&shost->scan_mutex);
1952 if (shost->async_scan) {
1953 shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
1954 goto err;
1955 }
1956
1957 data = kmalloc(sizeof(*data), GFP_KERNEL);
1958 if (!data)
1959 goto err;
1960 data->shost = scsi_host_get(shost);
1961 if (!data->shost)
1962 goto err;
1963 init_completion(&data->prev_finished);
1964
1965 spin_lock_irqsave(shost->host_lock, flags);
1966 shost->async_scan = 1;
1967 spin_unlock_irqrestore(shost->host_lock, flags);
1968 mutex_unlock(&shost->scan_mutex);
1969
1970 spin_lock(&async_scan_lock);
1971 if (list_empty(&scanning_hosts))
1972 complete(&data->prev_finished);
1973 list_add_tail(&data->list, &scanning_hosts);
1974 spin_unlock(&async_scan_lock);
1975
1976 return data;
1977
1978 err:
1979 mutex_unlock(&shost->scan_mutex);
1980 kfree(data);
1981 return NULL;
1982 }
1983
1984 /**
1985 * scsi_finish_async_scan - asynchronous scan has finished
1986 * @data: cookie returned from earlier call to scsi_prep_async_scan()
1987 *
1988 * All the devices currently attached to this host have been found.
1989 * This function announces all the devices it has found to the rest
1990 * of the system.
1991 */
scsi_finish_async_scan(struct async_scan_data * data)1992 static void scsi_finish_async_scan(struct async_scan_data *data)
1993 {
1994 struct Scsi_Host *shost;
1995 unsigned long flags;
1996
1997 if (!data)
1998 return;
1999
2000 shost = data->shost;
2001
2002 mutex_lock(&shost->scan_mutex);
2003
2004 if (!shost->async_scan) {
2005 shost_printk(KERN_INFO, shost, "%s called twice\n", __func__);
2006 dump_stack();
2007 mutex_unlock(&shost->scan_mutex);
2008 return;
2009 }
2010
2011 wait_for_completion(&data->prev_finished);
2012
2013 scsi_sysfs_add_devices(shost);
2014
2015 spin_lock_irqsave(shost->host_lock, flags);
2016 shost->async_scan = 0;
2017 spin_unlock_irqrestore(shost->host_lock, flags);
2018
2019 mutex_unlock(&shost->scan_mutex);
2020
2021 spin_lock(&async_scan_lock);
2022 list_del(&data->list);
2023 if (!list_empty(&scanning_hosts)) {
2024 struct async_scan_data *next = list_entry(scanning_hosts.next,
2025 struct async_scan_data, list);
2026 complete(&next->prev_finished);
2027 }
2028 spin_unlock(&async_scan_lock);
2029
2030 scsi_autopm_put_host(shost);
2031 scsi_host_put(shost);
2032 kfree(data);
2033 }
2034
do_scsi_scan_host(struct Scsi_Host * shost)2035 static void do_scsi_scan_host(struct Scsi_Host *shost)
2036 {
2037 if (shost->hostt->scan_finished) {
2038 unsigned long start = jiffies;
2039 if (shost->hostt->scan_start)
2040 shost->hostt->scan_start(shost);
2041
2042 while (!shost->hostt->scan_finished(shost, jiffies - start))
2043 msleep(10);
2044 } else {
2045 scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
2046 SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
2047 }
2048 }
2049
do_scan_async(void * _data,async_cookie_t c)2050 static void do_scan_async(void *_data, async_cookie_t c)
2051 {
2052 struct async_scan_data *data = _data;
2053 struct Scsi_Host *shost = data->shost;
2054
2055 do_scsi_scan_host(shost);
2056 scsi_finish_async_scan(data);
2057 }
2058
2059 /**
2060 * scsi_scan_host - scan the given adapter
2061 * @shost: adapter to scan
2062 *
2063 * Notes: Should be called after scsi_add_host()
2064 **/
scsi_scan_host(struct Scsi_Host * shost)2065 void scsi_scan_host(struct Scsi_Host *shost)
2066 {
2067 struct async_scan_data *data;
2068
2069 if (strncmp(scsi_scan_type, "none", 4) == 0 ||
2070 strncmp(scsi_scan_type, "manual", 6) == 0)
2071 return;
2072 if (scsi_autopm_get_host(shost) < 0)
2073 return;
2074
2075 data = scsi_prep_async_scan(shost);
2076 if (!data) {
2077 do_scsi_scan_host(shost);
2078 scsi_autopm_put_host(shost);
2079 return;
2080 }
2081
2082 /* register with the async subsystem so wait_for_device_probe()
2083 * will flush this work
2084 */
2085 async_schedule(do_scan_async, data);
2086
2087 /* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */
2088 }
2089 EXPORT_SYMBOL(scsi_scan_host);
2090
scsi_forget_host(struct Scsi_Host * shost)2091 void scsi_forget_host(struct Scsi_Host *shost)
2092 {
2093 struct scsi_device *sdev;
2094 unsigned long flags;
2095
2096 restart:
2097 spin_lock_irqsave(shost->host_lock, flags);
2098 list_for_each_entry(sdev, &shost->__devices, siblings) {
2099 if (scsi_device_is_pseudo_dev(sdev) ||
2100 sdev->sdev_state == SDEV_DEL)
2101 continue;
2102 spin_unlock_irqrestore(shost->host_lock, flags);
2103 __scsi_remove_device(sdev);
2104 goto restart;
2105 }
2106 spin_unlock_irqrestore(shost->host_lock, flags);
2107
2108 /*
2109 * Remove the pseudo device last since it may be needed during removal
2110 * of other SCSI devices.
2111 */
2112 if (shost->pseudo_sdev)
2113 __scsi_remove_device(shost->pseudo_sdev);
2114 }
2115
2116 /**
2117 * scsi_get_pseudo_sdev() - Attach a pseudo SCSI device to a SCSI host
2118 * @shost: Host that needs a pseudo SCSI device
2119 *
2120 * Lock status: None assumed.
2121 *
2122 * Returns: The scsi_device or NULL
2123 *
2124 * Notes:
2125 * Attach a single scsi_device to the Scsi_Host. The primary aim for this
2126 * device is to serve as a container from which SCSI commands can be
2127 * allocated. Each SCSI command will carry a command tag allocated by the
2128 * block layer. These SCSI commands can be used by the LLDD to send
2129 * internal or passthrough commands without having to manage tag allocation
2130 * inside the LLDD.
2131 */
scsi_get_pseudo_sdev(struct Scsi_Host * shost)2132 struct scsi_device *scsi_get_pseudo_sdev(struct Scsi_Host *shost)
2133 {
2134 struct scsi_device *sdev = NULL;
2135 struct scsi_target *starget;
2136
2137 guard(mutex)(&shost->scan_mutex);
2138
2139 if (!scsi_host_scan_allowed(shost))
2140 goto out;
2141
2142 starget = scsi_alloc_target(&shost->shost_gendev, 0, shost->max_id);
2143 if (!starget)
2144 goto out;
2145
2146 sdev = scsi_alloc_sdev(starget, U64_MAX, NULL);
2147 if (!sdev) {
2148 scsi_target_reap(starget);
2149 goto put_target;
2150 }
2151
2152 sdev->borken = 0;
2153
2154 put_target:
2155 /* See also the get_device(dev) call in scsi_alloc_target(). */
2156 put_device(&starget->dev);
2157
2158 out:
2159 return sdev;
2160 }
2161