1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * scsi_scan.c
4 *
5 * Copyright (C) 2000 Eric Youngdale,
6 * Copyright (C) 2002 Patrick Mansfield
7 *
8 * The general scanning/probing algorithm is as follows, exceptions are
9 * made to it depending on device specific flags, compilation options, and
10 * global variable (boot or module load time) settings.
11 *
12 * A specific LUN is scanned via an INQUIRY command; if the LUN has a
13 * device attached, a scsi_device is allocated and setup for it.
14 *
15 * For every id of every channel on the given host:
16 *
17 * Scan LUN 0; if the target responds to LUN 0 (even if there is no
18 * device or storage attached to LUN 0):
19 *
20 * If LUN 0 has a device attached, allocate and setup a
21 * scsi_device for it.
22 *
23 * If target is SCSI-3 or up, issue a REPORT LUN, and scan
24 * all of the LUNs returned by the REPORT LUN; else,
25 * sequentially scan LUNs up until some maximum is reached,
26 * or a LUN is seen that cannot have a device attached to it.
27 */
28
29 #include <linux/module.h>
30 #include <linux/moduleparam.h>
31 #include <linux/init.h>
32 #include <linux/blkdev.h>
33 #include <linux/delay.h>
34 #include <linux/kthread.h>
35 #include <linux/spinlock.h>
36 #include <linux/async.h>
37 #include <linux/slab.h>
38 #include <linux/unaligned.h>
39
40 #include <scsi/scsi.h>
41 #include <scsi/scsi_cmnd.h>
42 #include <scsi/scsi_device.h>
43 #include <scsi/scsi_driver.h>
44 #include <scsi/scsi_devinfo.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_transport.h>
47 #include <scsi/scsi_dh.h>
48 #include <scsi/scsi_eh.h>
49
50 #include "scsi_priv.h"
51 #include "scsi_logging.h"
52
53 #define ALLOC_FAILURE_MSG KERN_ERR "%s: Allocation failure during" \
54 " SCSI scanning, some SCSI devices might not be configured\n"
55
56 /*
57 * Default timeout
58 */
59 #define SCSI_TIMEOUT (2*HZ)
60 #define SCSI_REPORT_LUNS_TIMEOUT (30*HZ)
61
62 /*
63 * Prefix values for the SCSI id's (stored in sysfs name field)
64 */
65 #define SCSI_UID_SER_NUM 'S'
66 #define SCSI_UID_UNKNOWN 'Z'
67
68 /*
69 * Return values of some of the scanning functions.
70 *
71 * SCSI_SCAN_NO_RESPONSE: no valid response received from the target, this
72 * includes allocation or general failures preventing IO from being sent.
73 *
74 * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is available
75 * on the given LUN.
76 *
77 * SCSI_SCAN_LUN_PRESENT: target responded, and a device is available on a
78 * given LUN.
79 */
80 #define SCSI_SCAN_NO_RESPONSE 0
81 #define SCSI_SCAN_TARGET_PRESENT 1
82 #define SCSI_SCAN_LUN_PRESENT 2
83
84 static const char *scsi_null_device_strs = "nullnullnullnull";
85
86 #define MAX_SCSI_LUNS 512
87
88 static u64 max_scsi_luns = MAX_SCSI_LUNS;
89
90 module_param_named(max_luns, max_scsi_luns, ullong, S_IRUGO|S_IWUSR);
91 MODULE_PARM_DESC(max_luns,
92 "last scsi LUN (should be between 1 and 2^64-1)");
93
94 #ifdef CONFIG_SCSI_SCAN_ASYNC
95 #define SCSI_SCAN_TYPE_DEFAULT "async"
96 #else
97 #define SCSI_SCAN_TYPE_DEFAULT "sync"
98 #endif
99
100 static char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
101
102 module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
103 S_IRUGO|S_IWUSR);
104 MODULE_PARM_DESC(scan, "sync, async, manual, or none. "
105 "Setting to 'manual' disables automatic scanning, but allows "
106 "for manual device scan via the 'scan' sysfs attribute.");
107
108 static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
109
110 module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
111 MODULE_PARM_DESC(inq_timeout,
112 "Timeout (in seconds) waiting for devices to answer INQUIRY."
113 " Default is 20. Some devices may need more; most need less.");
114
115 /* This lock protects only this list */
116 static DEFINE_SPINLOCK(async_scan_lock);
117 static LIST_HEAD(scanning_hosts);
118
119 struct async_scan_data {
120 struct list_head list;
121 struct Scsi_Host *shost;
122 struct completion prev_finished;
123 };
124
125 /*
126 * scsi_enable_async_suspend - Enable async suspend and resume
127 */
scsi_enable_async_suspend(struct device * dev)128 void scsi_enable_async_suspend(struct device *dev)
129 {
130 /*
131 * If a user has disabled async probing a likely reason is due to a
132 * storage enclosure that does not inject staggered spin-ups. For
133 * safety, make resume synchronous as well in that case.
134 */
135 if (strncmp(scsi_scan_type, "async", 5) != 0)
136 return;
137 /* Enable asynchronous suspend and resume. */
138 device_enable_async_suspend(dev);
139 }
140
141 /**
142 * scsi_complete_async_scans - Wait for asynchronous scans to complete
143 *
144 * When this function returns, any host which started scanning before
145 * this function was called will have finished its scan. Hosts which
146 * started scanning after this function was called may or may not have
147 * finished.
148 */
scsi_complete_async_scans(void)149 int scsi_complete_async_scans(void)
150 {
151 struct async_scan_data *data;
152
153 do {
154 if (list_empty(&scanning_hosts))
155 return 0;
156 /* If we can't get memory immediately, that's OK. Just
157 * sleep a little. Even if we never get memory, the async
158 * scans will finish eventually.
159 */
160 data = kmalloc(sizeof(*data), GFP_KERNEL);
161 if (!data)
162 msleep(1);
163 } while (!data);
164
165 data->shost = NULL;
166 init_completion(&data->prev_finished);
167
168 spin_lock(&async_scan_lock);
169 /* Check that there's still somebody else on the list */
170 if (list_empty(&scanning_hosts))
171 goto done;
172 list_add_tail(&data->list, &scanning_hosts);
173 spin_unlock(&async_scan_lock);
174
175 printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
176 wait_for_completion(&data->prev_finished);
177
178 spin_lock(&async_scan_lock);
179 list_del(&data->list);
180 if (!list_empty(&scanning_hosts)) {
181 struct async_scan_data *next = list_entry(scanning_hosts.next,
182 struct async_scan_data, list);
183 complete(&next->prev_finished);
184 }
185 done:
186 spin_unlock(&async_scan_lock);
187
188 kfree(data);
189 return 0;
190 }
191
192 /**
193 * scsi_unlock_floptical - unlock device via a special MODE SENSE command
194 * @sdev: scsi device to send command to
195 * @result: area to store the result of the MODE SENSE
196 *
197 * Description:
198 * Send a vendor specific MODE SENSE (not a MODE SELECT) command.
199 * Called for BLIST_KEY devices.
200 **/
scsi_unlock_floptical(struct scsi_device * sdev,unsigned char * result)201 static void scsi_unlock_floptical(struct scsi_device *sdev,
202 unsigned char *result)
203 {
204 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
205
206 sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n");
207 scsi_cmd[0] = MODE_SENSE;
208 scsi_cmd[1] = 0;
209 scsi_cmd[2] = 0x2e;
210 scsi_cmd[3] = 0;
211 scsi_cmd[4] = 0x2a; /* size */
212 scsi_cmd[5] = 0;
213 scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, result, 0x2a,
214 SCSI_TIMEOUT, 3, NULL);
215 }
216
scsi_realloc_sdev_budget_map(struct scsi_device * sdev,unsigned int depth)217 static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
218 unsigned int depth)
219 {
220 int new_shift = sbitmap_calculate_shift(depth);
221 bool need_alloc = !sdev->budget_map.map;
222 bool need_free = false;
223 unsigned int memflags;
224 int ret;
225 struct sbitmap sb_backup;
226
227 depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev));
228
229 /*
230 * realloc if new shift is calculated, which is caused by setting
231 * up one new default queue depth after calling ->sdev_configure
232 */
233 if (!need_alloc && new_shift != sdev->budget_map.shift)
234 need_alloc = need_free = true;
235
236 if (!need_alloc)
237 return 0;
238
239 /*
240 * Request queue has to be frozen for reallocating budget map,
241 * and here disk isn't added yet, so freezing is pretty fast
242 */
243 if (need_free) {
244 memflags = blk_mq_freeze_queue(sdev->request_queue);
245 sb_backup = sdev->budget_map;
246 }
247 ret = sbitmap_init_node(&sdev->budget_map,
248 scsi_device_max_queue_depth(sdev),
249 new_shift, GFP_NOIO,
250 sdev->request_queue->node, false, true);
251 if (!ret)
252 sbitmap_resize(&sdev->budget_map, depth);
253
254 if (need_free) {
255 if (ret)
256 sdev->budget_map = sb_backup;
257 else
258 sbitmap_free(&sb_backup);
259 ret = 0;
260 blk_mq_unfreeze_queue(sdev->request_queue, memflags);
261 }
262 return ret;
263 }
264
265 /**
266 * scsi_alloc_sdev - allocate and setup a scsi_Device
267 * @starget: which target to allocate a &scsi_device for
268 * @lun: which lun
269 * @hostdata: usually NULL and set by ->sdev_init instead
270 *
271 * Description:
272 * Allocate, initialize for io, and return a pointer to a scsi_Device.
273 * Stores the @shost, @channel, @id, and @lun in the scsi_Device, and
274 * adds scsi_Device to the appropriate list.
275 *
276 * Return value:
277 * scsi_Device pointer, or NULL on failure.
278 **/
scsi_alloc_sdev(struct scsi_target * starget,u64 lun,void * hostdata)279 static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
280 u64 lun, void *hostdata)
281 {
282 unsigned int depth;
283 struct scsi_device *sdev;
284 struct request_queue *q;
285 int display_failure_msg = 1, ret;
286 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
287 struct queue_limits lim;
288
289 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
290 GFP_KERNEL);
291 if (!sdev)
292 goto out;
293
294 sdev->vendor = scsi_null_device_strs;
295 sdev->model = scsi_null_device_strs;
296 sdev->rev = scsi_null_device_strs;
297 sdev->host = shost;
298 sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD;
299 sdev->id = starget->id;
300 sdev->lun = lun;
301 sdev->channel = starget->channel;
302 mutex_init(&sdev->state_mutex);
303 sdev->sdev_state = SDEV_CREATED;
304 INIT_LIST_HEAD(&sdev->siblings);
305 INIT_LIST_HEAD(&sdev->same_target_siblings);
306 INIT_LIST_HEAD(&sdev->starved_entry);
307 INIT_LIST_HEAD(&sdev->event_list);
308 spin_lock_init(&sdev->list_lock);
309 mutex_init(&sdev->inquiry_mutex);
310 INIT_WORK(&sdev->event_work, scsi_evt_thread);
311 INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue);
312
313 sdev->sdev_gendev.parent = get_device(&starget->dev);
314 sdev->sdev_target = starget;
315
316 /* usually NULL and set by ->sdev_init instead */
317 sdev->hostdata = hostdata;
318
319 /* if the device needs this changing, it may do so in the
320 * sdev_configure function */
321 sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
322
323 /*
324 * Some low level driver could use device->type
325 */
326 sdev->type = -1;
327
328 /*
329 * Assume that the device will have handshaking problems,
330 * and then fix this field later if it turns out it
331 * doesn't
332 */
333 sdev->borken = 1;
334
335 sdev->sg_reserved_size = INT_MAX;
336
337 scsi_init_limits(shost, &lim);
338 q = blk_mq_alloc_queue(&sdev->host->tag_set, &lim, sdev);
339 if (IS_ERR(q)) {
340 /* release fn is set up in scsi_sysfs_device_initialise, so
341 * have to free and put manually here */
342 put_device(&starget->dev);
343 kfree(sdev);
344 goto out;
345 }
346 kref_get(&sdev->host->tagset_refcnt);
347 sdev->request_queue = q;
348
349 depth = sdev->host->cmd_per_lun ?: 1;
350
351 /*
352 * Use .can_queue as budget map's depth because we have to
353 * support adjusting queue depth from sysfs. Meantime use
354 * default device queue depth to figure out sbitmap shift
355 * since we use this queue depth most of times.
356 */
357 if (scsi_realloc_sdev_budget_map(sdev, depth)) {
358 put_device(&starget->dev);
359 kfree(sdev);
360 goto out;
361 }
362
363 scsi_change_queue_depth(sdev, depth);
364
365 scsi_sysfs_device_initialize(sdev);
366
367 if (shost->hostt->sdev_init) {
368 ret = shost->hostt->sdev_init(sdev);
369 if (ret) {
370 /*
371 * if LLDD reports slave not present, don't clutter
372 * console with alloc failure messages
373 */
374 if (ret == -ENXIO)
375 display_failure_msg = 0;
376 goto out_device_destroy;
377 }
378 }
379
380 return sdev;
381
382 out_device_destroy:
383 __scsi_remove_device(sdev);
384 out:
385 if (display_failure_msg)
386 printk(ALLOC_FAILURE_MSG, __func__);
387 return NULL;
388 }
389
scsi_target_destroy(struct scsi_target * starget)390 static void scsi_target_destroy(struct scsi_target *starget)
391 {
392 struct device *dev = &starget->dev;
393 struct Scsi_Host *shost = dev_to_shost(dev->parent);
394 unsigned long flags;
395
396 BUG_ON(starget->state == STARGET_DEL);
397 starget->state = STARGET_DEL;
398 transport_destroy_device(dev);
399 spin_lock_irqsave(shost->host_lock, flags);
400 if (shost->hostt->target_destroy)
401 shost->hostt->target_destroy(starget);
402 list_del_init(&starget->siblings);
403 spin_unlock_irqrestore(shost->host_lock, flags);
404 put_device(dev);
405 }
406
scsi_target_dev_release(struct device * dev)407 static void scsi_target_dev_release(struct device *dev)
408 {
409 struct device *parent = dev->parent;
410 struct scsi_target *starget = to_scsi_target(dev);
411
412 kfree(starget);
413 put_device(parent);
414 }
415
416 static const struct device_type scsi_target_type = {
417 .name = "scsi_target",
418 .release = scsi_target_dev_release,
419 };
420
scsi_is_target_device(const struct device * dev)421 int scsi_is_target_device(const struct device *dev)
422 {
423 return dev->type == &scsi_target_type;
424 }
425 EXPORT_SYMBOL(scsi_is_target_device);
426
__scsi_find_target(struct device * parent,int channel,uint id)427 static struct scsi_target *__scsi_find_target(struct device *parent,
428 int channel, uint id)
429 {
430 struct scsi_target *starget, *found_starget = NULL;
431 struct Scsi_Host *shost = dev_to_shost(parent);
432 /*
433 * Search for an existing target for this sdev.
434 */
435 list_for_each_entry(starget, &shost->__targets, siblings) {
436 if (starget->id == id &&
437 starget->channel == channel) {
438 found_starget = starget;
439 break;
440 }
441 }
442 if (found_starget)
443 get_device(&found_starget->dev);
444
445 return found_starget;
446 }
447
448 /**
449 * scsi_target_reap_ref_release - remove target from visibility
450 * @kref: the reap_ref in the target being released
451 *
452 * Called on last put of reap_ref, which is the indication that no device
453 * under this target is visible anymore, so render the target invisible in
454 * sysfs. Note: we have to be in user context here because the target reaps
455 * should be done in places where the scsi device visibility is being removed.
456 */
scsi_target_reap_ref_release(struct kref * kref)457 static void scsi_target_reap_ref_release(struct kref *kref)
458 {
459 struct scsi_target *starget
460 = container_of(kref, struct scsi_target, reap_ref);
461
462 /*
463 * if we get here and the target is still in a CREATED state that
464 * means it was allocated but never made visible (because a scan
465 * turned up no LUNs), so don't call device_del() on it.
466 */
467 if ((starget->state != STARGET_CREATED) &&
468 (starget->state != STARGET_CREATED_REMOVE)) {
469 transport_remove_device(&starget->dev);
470 device_del(&starget->dev);
471 }
472 scsi_target_destroy(starget);
473 }
474
scsi_target_reap_ref_put(struct scsi_target * starget)475 static void scsi_target_reap_ref_put(struct scsi_target *starget)
476 {
477 kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
478 }
479
480 /**
481 * scsi_alloc_target - allocate a new or find an existing target
482 * @parent: parent of the target (need not be a scsi host)
483 * @channel: target channel number (zero if no channels)
484 * @id: target id number
485 *
486 * Return an existing target if one exists, provided it hasn't already
487 * gone into STARGET_DEL state, otherwise allocate a new target.
488 *
489 * The target is returned with an incremented reference, so the caller
490 * is responsible for both reaping and doing a last put
491 */
scsi_alloc_target(struct device * parent,int channel,uint id)492 static struct scsi_target *scsi_alloc_target(struct device *parent,
493 int channel, uint id)
494 {
495 struct Scsi_Host *shost = dev_to_shost(parent);
496 struct device *dev = NULL;
497 unsigned long flags;
498 const int size = sizeof(struct scsi_target)
499 + shost->transportt->target_size;
500 struct scsi_target *starget;
501 struct scsi_target *found_target;
502 int error, ref_got;
503
504 starget = kzalloc(size, GFP_KERNEL);
505 if (!starget) {
506 printk(KERN_ERR "%s: allocation failure\n", __func__);
507 return NULL;
508 }
509 dev = &starget->dev;
510 device_initialize(dev);
511 kref_init(&starget->reap_ref);
512 dev->parent = get_device(parent);
513 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
514 dev->bus = &scsi_bus_type;
515 dev->type = &scsi_target_type;
516 scsi_enable_async_suspend(dev);
517 starget->id = id;
518 starget->channel = channel;
519 starget->can_queue = 0;
520 INIT_LIST_HEAD(&starget->siblings);
521 INIT_LIST_HEAD(&starget->devices);
522 starget->state = STARGET_CREATED;
523 starget->scsi_level = SCSI_2;
524 starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
525 retry:
526 spin_lock_irqsave(shost->host_lock, flags);
527
528 found_target = __scsi_find_target(parent, channel, id);
529 if (found_target)
530 goto found;
531
532 list_add_tail(&starget->siblings, &shost->__targets);
533 spin_unlock_irqrestore(shost->host_lock, flags);
534 /* allocate and add */
535 transport_setup_device(dev);
536 if (shost->hostt->target_alloc) {
537 error = shost->hostt->target_alloc(starget);
538
539 if(error) {
540 if (error != -ENXIO)
541 dev_err(dev, "target allocation failed, error %d\n", error);
542 /* don't want scsi_target_reap to do the final
543 * put because it will be under the host lock */
544 scsi_target_destroy(starget);
545 return NULL;
546 }
547 }
548 get_device(dev);
549
550 return starget;
551
552 found:
553 /*
554 * release routine already fired if kref is zero, so if we can still
555 * take the reference, the target must be alive. If we can't, it must
556 * be dying and we need to wait for a new target
557 */
558 ref_got = kref_get_unless_zero(&found_target->reap_ref);
559
560 spin_unlock_irqrestore(shost->host_lock, flags);
561 if (ref_got) {
562 put_device(dev);
563 return found_target;
564 }
565 /*
566 * Unfortunately, we found a dying target; need to wait until it's
567 * dead before we can get a new one. There is an anomaly here. We
568 * *should* call scsi_target_reap() to balance the kref_get() of the
569 * reap_ref above. However, since the target being released, it's
570 * already invisible and the reap_ref is irrelevant. If we call
571 * scsi_target_reap() we might spuriously do another device_del() on
572 * an already invisible target.
573 */
574 put_device(&found_target->dev);
575 /*
576 * length of time is irrelevant here, we just want to yield the CPU
577 * for a tick to avoid busy waiting for the target to die.
578 */
579 msleep(1);
580 goto retry;
581 }
582
583 /**
584 * scsi_target_reap - check to see if target is in use and destroy if not
585 * @starget: target to be checked
586 *
587 * This is used after removing a LUN or doing a last put of the target
588 * it checks atomically that nothing is using the target and removes
589 * it if so.
590 */
scsi_target_reap(struct scsi_target * starget)591 void scsi_target_reap(struct scsi_target *starget)
592 {
593 /*
594 * serious problem if this triggers: STARGET_DEL is only set in the if
595 * the reap_ref drops to zero, so we're trying to do another final put
596 * on an already released kref
597 */
598 BUG_ON(starget->state == STARGET_DEL);
599 scsi_target_reap_ref_put(starget);
600 }
601
602 /**
603 * scsi_sanitize_inquiry_string - remove non-graphical chars from an
604 * INQUIRY result string
605 * @s: INQUIRY result string to sanitize
606 * @len: length of the string
607 *
608 * Description:
609 * The SCSI spec says that INQUIRY vendor, product, and revision
610 * strings must consist entirely of graphic ASCII characters,
611 * padded on the right with spaces. Since not all devices obey
612 * this rule, we will replace non-graphic or non-ASCII characters
613 * with spaces. Exception: a NUL character is interpreted as a
614 * string terminator, so all the following characters are set to
615 * spaces.
616 **/
scsi_sanitize_inquiry_string(unsigned char * s,int len)617 void scsi_sanitize_inquiry_string(unsigned char *s, int len)
618 {
619 int terminated = 0;
620
621 for (; len > 0; (--len, ++s)) {
622 if (*s == 0)
623 terminated = 1;
624 if (terminated || *s < 0x20 || *s > 0x7e)
625 *s = ' ';
626 }
627 }
628 EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
629
630
631 /**
632 * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
633 * @sdev: scsi_device to probe
634 * @inq_result: area to store the INQUIRY result
635 * @result_len: len of inq_result
636 * @bflags: store any bflags found here
637 *
638 * Description:
639 * Probe the lun associated with @req using a standard SCSI INQUIRY;
640 *
641 * If the INQUIRY is successful, zero is returned and the
642 * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length
643 * are copied to the scsi_device any flags value is stored in *@bflags.
644 **/
scsi_probe_lun(struct scsi_device * sdev,unsigned char * inq_result,int result_len,blist_flags_t * bflags)645 static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
646 int result_len, blist_flags_t *bflags)
647 {
648 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
649 int first_inquiry_len, try_inquiry_len, next_inquiry_len;
650 int response_len = 0;
651 int pass, count, result, resid;
652 struct scsi_failure failure_defs[] = {
653 /*
654 * not-ready to ready transition [asc/ascq=0x28/0x0] or
655 * power-on, reset [asc/ascq=0x29/0x0], continue. INQUIRY
656 * should not yield UNIT_ATTENTION but many buggy devices do
657 * so anyway.
658 */
659 {
660 .sense = UNIT_ATTENTION,
661 .asc = 0x28,
662 .result = SAM_STAT_CHECK_CONDITION,
663 },
664 {
665 .sense = UNIT_ATTENTION,
666 .asc = 0x29,
667 .result = SAM_STAT_CHECK_CONDITION,
668 },
669 {
670 .allowed = 1,
671 .result = DID_TIME_OUT << 16,
672 },
673 {}
674 };
675 struct scsi_failures failures = {
676 .total_allowed = 3,
677 .failure_definitions = failure_defs,
678 };
679 const struct scsi_exec_args exec_args = {
680 .resid = &resid,
681 .failures = &failures,
682 };
683
684 *bflags = 0;
685
686 /* Perform up to 3 passes. The first pass uses a conservative
687 * transfer length of 36 unless sdev->inquiry_len specifies a
688 * different value. */
689 first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
690 try_inquiry_len = first_inquiry_len;
691 pass = 1;
692
693 next_pass:
694 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
695 "scsi scan: INQUIRY pass %d length %d\n",
696 pass, try_inquiry_len));
697
698 /* Each pass gets up to three chances to ignore Unit Attention */
699 scsi_failures_reset_retries(&failures);
700
701 for (count = 0; count < 3; ++count) {
702 memset(scsi_cmd, 0, 6);
703 scsi_cmd[0] = INQUIRY;
704 scsi_cmd[4] = (unsigned char) try_inquiry_len;
705
706 memset(inq_result, 0, try_inquiry_len);
707
708 result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN,
709 inq_result, try_inquiry_len,
710 HZ / 2 + HZ * scsi_inq_timeout, 3,
711 &exec_args);
712
713 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
714 "scsi scan: INQUIRY %s with code 0x%x\n",
715 result ? "failed" : "successful", result));
716
717 if (result == 0) {
718 /*
719 * if nothing was transferred, we try
720 * again. It's a workaround for some USB
721 * devices.
722 */
723 if (resid == try_inquiry_len)
724 continue;
725 }
726 break;
727 }
728
729 if (result == 0) {
730 scsi_sanitize_inquiry_string(&inq_result[8], 8);
731 scsi_sanitize_inquiry_string(&inq_result[16], 16);
732 scsi_sanitize_inquiry_string(&inq_result[32], 4);
733
734 response_len = inq_result[4] + 5;
735 if (response_len > 255)
736 response_len = first_inquiry_len; /* sanity */
737
738 /*
739 * Get any flags for this device.
740 *
741 * XXX add a bflags to scsi_device, and replace the
742 * corresponding bit fields in scsi_device, so bflags
743 * need not be passed as an argument.
744 */
745 *bflags = scsi_get_device_flags(sdev, &inq_result[8],
746 &inq_result[16]);
747
748 /* When the first pass succeeds we gain information about
749 * what larger transfer lengths might work. */
750 if (pass == 1) {
751 if (BLIST_INQUIRY_36 & *bflags)
752 next_inquiry_len = 36;
753 /*
754 * LLD specified a maximum sdev->inquiry_len
755 * but device claims it has more data. Capping
756 * the length only makes sense for legacy
757 * devices. If a device supports SPC-4 (2014)
758 * or newer, assume that it is safe to ask for
759 * as much as the device says it supports.
760 */
761 else if (sdev->inquiry_len &&
762 response_len > sdev->inquiry_len &&
763 (inq_result[2] & 0x7) < 6) /* SPC-4 */
764 next_inquiry_len = sdev->inquiry_len;
765 else
766 next_inquiry_len = response_len;
767
768 /* If more data is available perform the second pass */
769 if (next_inquiry_len > try_inquiry_len) {
770 try_inquiry_len = next_inquiry_len;
771 pass = 2;
772 goto next_pass;
773 }
774 }
775
776 } else if (pass == 2) {
777 sdev_printk(KERN_INFO, sdev,
778 "scsi scan: %d byte inquiry failed. "
779 "Consider BLIST_INQUIRY_36 for this device\n",
780 try_inquiry_len);
781
782 /* If this pass failed, the third pass goes back and transfers
783 * the same amount as we successfully got in the first pass. */
784 try_inquiry_len = first_inquiry_len;
785 pass = 3;
786 goto next_pass;
787 }
788
789 /* If the last transfer attempt got an error, assume the
790 * peripheral doesn't exist or is dead. */
791 if (result)
792 return -EIO;
793
794 /* Don't report any more data than the device says is valid */
795 sdev->inquiry_len = min(try_inquiry_len, response_len);
796
797 /*
798 * XXX Abort if the response length is less than 36? If less than
799 * 32, the lookup of the device flags (above) could be invalid,
800 * and it would be possible to take an incorrect action - we do
801 * not want to hang because of a short INQUIRY. On the flip side,
802 * if the device is spun down or becoming ready (and so it gives a
803 * short INQUIRY), an abort here prevents any further use of the
804 * device, including spin up.
805 *
806 * On the whole, the best approach seems to be to assume the first
807 * 36 bytes are valid no matter what the device says. That's
808 * better than copying < 36 bytes to the inquiry-result buffer
809 * and displaying garbage for the Vendor, Product, or Revision
810 * strings.
811 */
812 if (sdev->inquiry_len < 36) {
813 if (!sdev->host->short_inquiry) {
814 shost_printk(KERN_INFO, sdev->host,
815 "scsi scan: INQUIRY result too short (%d),"
816 " using 36\n", sdev->inquiry_len);
817 sdev->host->short_inquiry = 1;
818 }
819 sdev->inquiry_len = 36;
820 }
821
822 /*
823 * Related to the above issue:
824 *
825 * XXX Devices (disk or all?) should be sent a TEST UNIT READY,
826 * and if not ready, sent a START_STOP to start (maybe spin up) and
827 * then send the INQUIRY again, since the INQUIRY can change after
828 * a device is initialized.
829 *
830 * Ideally, start a device if explicitly asked to do so. This
831 * assumes that a device is spun up on power on, spun down on
832 * request, and then spun up on request.
833 */
834
835 /*
836 * The scanning code needs to know the scsi_level, even if no
837 * device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so
838 * non-zero LUNs can be scanned.
839 */
840 sdev->scsi_level = inq_result[2] & 0x0f;
841 if (sdev->scsi_level >= 2 ||
842 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
843 sdev->scsi_level++;
844 sdev->sdev_target->scsi_level = sdev->scsi_level;
845
846 /*
847 * If SCSI-2 or lower, and if the transport requires it,
848 * store the LUN value in CDB[1].
849 */
850 sdev->lun_in_cdb = 0;
851 if (sdev->scsi_level <= SCSI_2 &&
852 sdev->scsi_level != SCSI_UNKNOWN &&
853 !sdev->host->no_scsi2_lun_in_cdb)
854 sdev->lun_in_cdb = 1;
855
856 return 0;
857 }
858
859 /**
860 * scsi_add_lun - allocate and fully initialze a scsi_device
861 * @sdev: holds information to be stored in the new scsi_device
862 * @inq_result: holds the result of a previous INQUIRY to the LUN
863 * @bflags: black/white list flag
864 * @async: 1 if this device is being scanned asynchronously
865 *
866 * Description:
867 * Initialize the scsi_device @sdev. Optionally set fields based
868 * on values in *@bflags.
869 *
870 * Return:
871 * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
872 * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
873 **/
scsi_add_lun(struct scsi_device * sdev,unsigned char * inq_result,blist_flags_t * bflags,int async)874 static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
875 blist_flags_t *bflags, int async)
876 {
877 const struct scsi_host_template *hostt = sdev->host->hostt;
878 struct queue_limits lim;
879 int ret;
880
881 /*
882 * XXX do not save the inquiry, since it can change underneath us,
883 * save just vendor/model/rev.
884 *
885 * Rather than save it and have an ioctl that retrieves the saved
886 * value, have an ioctl that executes the same INQUIRY code used
887 * in scsi_probe_lun, let user level programs doing INQUIRY
888 * scanning run at their own risk, or supply a user level program
889 * that can correctly scan.
890 */
891
892 /*
893 * Copy at least 36 bytes of INQUIRY data, so that we don't
894 * dereference unallocated memory when accessing the Vendor,
895 * Product, and Revision strings. Badly behaved devices may set
896 * the INQUIRY Additional Length byte to a small value, indicating
897 * these strings are invalid, but often they contain plausible data
898 * nonetheless. It doesn't matter if the device sent < 36 bytes
899 * total, since scsi_probe_lun() initializes inq_result with 0s.
900 */
901 sdev->inquiry = kmemdup(inq_result,
902 max_t(size_t, sdev->inquiry_len, 36),
903 GFP_KERNEL);
904 if (sdev->inquiry == NULL)
905 return SCSI_SCAN_NO_RESPONSE;
906
907 sdev->vendor = (char *) (sdev->inquiry + 8);
908 sdev->model = (char *) (sdev->inquiry + 16);
909 sdev->rev = (char *) (sdev->inquiry + 32);
910
911 if (strncmp(sdev->vendor, "ATA ", 8) == 0) {
912 /*
913 * sata emulation layer device. This is a hack to work around
914 * the SATL power management specifications which state that
915 * when the SATL detects the device has gone into standby
916 * mode, it shall respond with NOT READY.
917 */
918 sdev->allow_restart = 1;
919 }
920
921 if (*bflags & BLIST_ISROM) {
922 sdev->type = TYPE_ROM;
923 sdev->removable = 1;
924 } else {
925 sdev->type = (inq_result[0] & 0x1f);
926 sdev->removable = (inq_result[1] & 0x80) >> 7;
927
928 /*
929 * some devices may respond with wrong type for
930 * well-known logical units. Force well-known type
931 * to enumerate them correctly.
932 */
933 if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) {
934 sdev_printk(KERN_WARNING, sdev,
935 "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n",
936 __func__, sdev->type, (unsigned int)sdev->lun);
937 sdev->type = TYPE_WLUN;
938 }
939
940 }
941
942 if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
943 /* RBC and MMC devices can return SCSI-3 compliance and yet
944 * still not support REPORT LUNS, so make them act as
945 * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is
946 * specifically set */
947 if ((*bflags & BLIST_REPORTLUN2) == 0)
948 *bflags |= BLIST_NOREPORTLUN;
949 }
950
951 /*
952 * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI
953 * spec says: The device server is capable of supporting the
954 * specified peripheral device type on this logical unit. However,
955 * the physical device is not currently connected to this logical
956 * unit.
957 *
958 * The above is vague, as it implies that we could treat 001 and
959 * 011 the same. Stay compatible with previous code, and create a
960 * scsi_device for a PQ of 1
961 *
962 * Don't set the device offline here; rather let the upper
963 * level drivers eval the PQ to decide whether they should
964 * attach. So remove ((inq_result[0] >> 5) & 7) == 1 check.
965 */
966
967 sdev->inq_periph_qual = (inq_result[0] >> 5) & 7;
968 sdev->lockable = sdev->removable;
969 sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2);
970
971 if (sdev->scsi_level >= SCSI_3 ||
972 (sdev->inquiry_len > 56 && inq_result[56] & 0x04))
973 sdev->ppr = 1;
974 if (inq_result[7] & 0x60)
975 sdev->wdtr = 1;
976 if (inq_result[7] & 0x10)
977 sdev->sdtr = 1;
978
979 sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d "
980 "ANSI: %d%s\n", scsi_device_type(sdev->type),
981 sdev->vendor, sdev->model, sdev->rev,
982 sdev->inq_periph_qual, inq_result[2] & 0x07,
983 (inq_result[3] & 0x0f) == 1 ? " CCS" : "");
984
985 if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) &&
986 !(*bflags & BLIST_NOTQ)) {
987 sdev->tagged_supported = 1;
988 sdev->simple_tags = 1;
989 }
990
991 /*
992 * Some devices (Texel CD ROM drives) have handshaking problems
993 * when used with the Seagate controllers. borken is initialized
994 * to 1, and then set it to 0 here.
995 */
996 if ((*bflags & BLIST_BORKEN) == 0)
997 sdev->borken = 0;
998
999 if (*bflags & BLIST_NO_ULD_ATTACH)
1000 sdev->no_uld_attach = 1;
1001
1002 /*
1003 * Apparently some really broken devices (contrary to the SCSI
1004 * standards) need to be selected without asserting ATN
1005 */
1006 if (*bflags & BLIST_SELECT_NO_ATN)
1007 sdev->select_no_atn = 1;
1008
1009 /*
1010 * Some devices may not want to have a start command automatically
1011 * issued when a device is added.
1012 */
1013 if (*bflags & BLIST_NOSTARTONADD)
1014 sdev->no_start_on_add = 1;
1015
1016 if (*bflags & BLIST_SINGLELUN)
1017 scsi_target(sdev)->single_lun = 1;
1018
1019 sdev->use_10_for_rw = 1;
1020
1021 /* some devices don't like REPORT SUPPORTED OPERATION CODES
1022 * and will simply timeout causing sd_mod init to take a very
1023 * very long time */
1024 if (*bflags & BLIST_NO_RSOC)
1025 sdev->no_report_opcodes = 1;
1026
1027 /* set the device running here so that slave configure
1028 * may do I/O */
1029 mutex_lock(&sdev->state_mutex);
1030 ret = scsi_device_set_state(sdev, SDEV_RUNNING);
1031 if (ret)
1032 ret = scsi_device_set_state(sdev, SDEV_BLOCK);
1033 mutex_unlock(&sdev->state_mutex);
1034
1035 if (ret) {
1036 sdev_printk(KERN_ERR, sdev,
1037 "in wrong state %s to complete scan\n",
1038 scsi_device_state_name(sdev->sdev_state));
1039 return SCSI_SCAN_NO_RESPONSE;
1040 }
1041
1042 if (*bflags & BLIST_NOT_LOCKABLE)
1043 sdev->lockable = 0;
1044
1045 if (*bflags & BLIST_RETRY_HWERROR)
1046 sdev->retry_hwerror = 1;
1047
1048 if (*bflags & BLIST_NO_DIF)
1049 sdev->no_dif = 1;
1050
1051 if (*bflags & BLIST_UNMAP_LIMIT_WS)
1052 sdev->unmap_limit_for_ws = 1;
1053
1054 if (*bflags & BLIST_IGN_MEDIA_CHANGE)
1055 sdev->ignore_media_change = 1;
1056
1057 sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
1058
1059 if (*bflags & BLIST_TRY_VPD_PAGES)
1060 sdev->try_vpd_pages = 1;
1061 else if (*bflags & BLIST_SKIP_VPD_PAGES)
1062 sdev->skip_vpd_pages = 1;
1063
1064 if (*bflags & BLIST_NO_VPD_SIZE)
1065 sdev->no_vpd_size = 1;
1066
1067 transport_configure_device(&sdev->sdev_gendev);
1068
1069 /*
1070 * No need to freeze the queue as it isn't reachable to anyone else yet.
1071 */
1072 lim = queue_limits_start_update(sdev->request_queue);
1073 if (*bflags & BLIST_MAX_512)
1074 lim.max_hw_sectors = 512;
1075 else if (*bflags & BLIST_MAX_1024)
1076 lim.max_hw_sectors = 1024;
1077
1078 if (hostt->sdev_configure)
1079 ret = hostt->sdev_configure(sdev, &lim);
1080 if (ret) {
1081 queue_limits_cancel_update(sdev->request_queue);
1082 /*
1083 * If the LLDD reports device not present, don't clutter the
1084 * console with failure messages.
1085 */
1086 if (ret != -ENXIO)
1087 sdev_printk(KERN_ERR, sdev,
1088 "failed to configure device\n");
1089 return SCSI_SCAN_NO_RESPONSE;
1090 }
1091
1092 ret = queue_limits_commit_update(sdev->request_queue, &lim);
1093 if (ret) {
1094 sdev_printk(KERN_ERR, sdev, "failed to apply queue limits.\n");
1095 return SCSI_SCAN_NO_RESPONSE;
1096 }
1097
1098 /*
1099 * The queue_depth is often changed in ->sdev_configure.
1100 *
1101 * Set up budget map again since memory consumption of the map depends
1102 * on actual queue depth.
1103 */
1104 if (hostt->sdev_configure)
1105 scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
1106
1107 if (sdev->scsi_level >= SCSI_3)
1108 scsi_attach_vpd(sdev);
1109
1110 scsi_cdl_check(sdev);
1111
1112 sdev->max_queue_depth = sdev->queue_depth;
1113 WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth);
1114 sdev->sdev_bflags = *bflags;
1115
1116 /*
1117 * Ok, the device is now all set up, we can
1118 * register it and tell the rest of the kernel
1119 * about it.
1120 */
1121 if (!async && scsi_sysfs_add_sdev(sdev) != 0)
1122 return SCSI_SCAN_NO_RESPONSE;
1123
1124 return SCSI_SCAN_LUN_PRESENT;
1125 }
1126
1127 #ifdef CONFIG_SCSI_LOGGING
1128 /**
1129 * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace
1130 * @buf: Output buffer with at least end-first+1 bytes of space
1131 * @inq: Inquiry buffer (input)
1132 * @first: Offset of string into inq
1133 * @end: Index after last character in inq
1134 */
scsi_inq_str(unsigned char * buf,unsigned char * inq,unsigned first,unsigned end)1135 static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
1136 unsigned first, unsigned end)
1137 {
1138 unsigned term = 0, idx;
1139
1140 for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) {
1141 if (inq[idx+first] > ' ') {
1142 buf[idx] = inq[idx+first];
1143 term = idx+1;
1144 } else {
1145 buf[idx] = ' ';
1146 }
1147 }
1148 buf[term] = 0;
1149 return buf;
1150 }
1151 #endif
1152
1153 /**
1154 * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it
1155 * @starget: pointer to target device structure
1156 * @lun: LUN of target device
1157 * @bflagsp: store bflags here if not NULL
1158 * @sdevp: probe the LUN corresponding to this scsi_device
1159 * @rescan: if not equal to SCSI_SCAN_INITIAL skip some code only
1160 * needed on first scan
1161 * @hostdata: passed to scsi_alloc_sdev()
1162 *
1163 * Description:
1164 * Call scsi_probe_lun, if a LUN with an attached device is found,
1165 * allocate and set it up by calling scsi_add_lun.
1166 *
1167 * Return:
1168 *
1169 * - SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
1170 * - SCSI_SCAN_TARGET_PRESENT: target responded, but no device is
1171 * attached at the LUN
1172 * - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
1173 **/
scsi_probe_and_add_lun(struct scsi_target * starget,u64 lun,blist_flags_t * bflagsp,struct scsi_device ** sdevp,enum scsi_scan_mode rescan,void * hostdata)1174 static int scsi_probe_and_add_lun(struct scsi_target *starget,
1175 u64 lun, blist_flags_t *bflagsp,
1176 struct scsi_device **sdevp,
1177 enum scsi_scan_mode rescan,
1178 void *hostdata)
1179 {
1180 struct scsi_device *sdev;
1181 unsigned char *result;
1182 blist_flags_t bflags;
1183 int res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
1184 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1185
1186 /*
1187 * The rescan flag is used as an optimization, the first scan of a
1188 * host adapter calls into here with rescan == 0.
1189 */
1190 sdev = scsi_device_lookup_by_target(starget, lun);
1191 if (sdev) {
1192 if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) {
1193 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1194 "scsi scan: device exists on %s\n",
1195 dev_name(&sdev->sdev_gendev)));
1196 if (sdevp)
1197 *sdevp = sdev;
1198 else
1199 scsi_device_put(sdev);
1200
1201 if (bflagsp)
1202 *bflagsp = scsi_get_device_flags(sdev,
1203 sdev->vendor,
1204 sdev->model);
1205 return SCSI_SCAN_LUN_PRESENT;
1206 }
1207 scsi_device_put(sdev);
1208 } else
1209 sdev = scsi_alloc_sdev(starget, lun, hostdata);
1210 if (!sdev)
1211 goto out;
1212
1213 result = kmalloc(result_len, GFP_KERNEL);
1214 if (!result)
1215 goto out_free_sdev;
1216
1217 if (scsi_probe_lun(sdev, result, result_len, &bflags))
1218 goto out_free_result;
1219
1220 if (bflagsp)
1221 *bflagsp = bflags;
1222 /*
1223 * result contains valid SCSI INQUIRY data.
1224 */
1225 if ((result[0] >> 5) == 3) {
1226 /*
1227 * For a Peripheral qualifier 3 (011b), the SCSI
1228 * spec says: The device server is not capable of
1229 * supporting a physical device on this logical
1230 * unit.
1231 *
1232 * For disks, this implies that there is no
1233 * logical disk configured at sdev->lun, but there
1234 * is a target id responding.
1235 */
1236 SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:"
1237 " peripheral qualifier of 3, device not"
1238 " added\n"))
1239 if (lun == 0) {
1240 SCSI_LOG_SCAN_BUS(1, {
1241 unsigned char vend[9];
1242 unsigned char mod[17];
1243
1244 sdev_printk(KERN_INFO, sdev,
1245 "scsi scan: consider passing scsi_mod."
1246 "dev_flags=%s:%s:0x240 or 0x1000240\n",
1247 scsi_inq_str(vend, result, 8, 16),
1248 scsi_inq_str(mod, result, 16, 32));
1249 });
1250
1251 }
1252
1253 res = SCSI_SCAN_TARGET_PRESENT;
1254 goto out_free_result;
1255 }
1256
1257 /*
1258 * Some targets may set slight variations of PQ and PDT to signal
1259 * that no LUN is present, so don't add sdev in these cases.
1260 * Two specific examples are:
1261 * 1) NetApp targets: return PQ=1, PDT=0x1f
1262 * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
1263 * in the UFI 1.0 spec (we cannot rely on reserved bits).
1264 *
1265 * References:
1266 * 1) SCSI SPC-3, pp. 145-146
1267 * PQ=1: "A peripheral device having the specified peripheral
1268 * device type is not connected to this logical unit. However, the
1269 * device server is capable of supporting the specified peripheral
1270 * device type on this logical unit."
1271 * PDT=0x1f: "Unknown or no device type"
1272 * 2) USB UFI 1.0, p. 20
1273 * PDT=00h Direct-access device (floppy)
1274 * PDT=1Fh none (no FDD connected to the requested logical unit)
1275 */
1276 if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
1277 (result[0] & 0x1f) == 0x1f &&
1278 !scsi_is_wlun(lun)) {
1279 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1280 "scsi scan: peripheral device type"
1281 " of 31, no device added\n"));
1282 res = SCSI_SCAN_TARGET_PRESENT;
1283 goto out_free_result;
1284 }
1285
1286 res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
1287 if (res == SCSI_SCAN_LUN_PRESENT) {
1288 if (bflags & BLIST_KEY) {
1289 sdev->lockable = 0;
1290 scsi_unlock_floptical(sdev, result);
1291 }
1292 }
1293
1294 out_free_result:
1295 kfree(result);
1296 out_free_sdev:
1297 if (res == SCSI_SCAN_LUN_PRESENT) {
1298 if (sdevp) {
1299 if (scsi_device_get(sdev) == 0) {
1300 *sdevp = sdev;
1301 } else {
1302 __scsi_remove_device(sdev);
1303 res = SCSI_SCAN_NO_RESPONSE;
1304 }
1305 }
1306 } else
1307 __scsi_remove_device(sdev);
1308 out:
1309 return res;
1310 }
1311
1312 /**
1313 * scsi_sequential_lun_scan - sequentially scan a SCSI target
1314 * @starget: pointer to target structure to scan
1315 * @bflags: black/white list flag for LUN 0
1316 * @scsi_level: Which version of the standard does this device adhere to
1317 * @rescan: passed to scsi_probe_add_lun()
1318 *
1319 * Description:
1320 * Generally, scan from LUN 1 (LUN 0 is assumed to already have been
1321 * scanned) to some maximum lun until a LUN is found with no device
1322 * attached. Use the bflags to figure out any oddities.
1323 *
1324 * Modifies sdevscan->lun.
1325 **/
scsi_sequential_lun_scan(struct scsi_target * starget,blist_flags_t bflags,int scsi_level,enum scsi_scan_mode rescan)1326 static void scsi_sequential_lun_scan(struct scsi_target *starget,
1327 blist_flags_t bflags, int scsi_level,
1328 enum scsi_scan_mode rescan)
1329 {
1330 uint max_dev_lun;
1331 u64 sparse_lun, lun;
1332 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1333
1334 SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget,
1335 "scsi scan: Sequential scan\n"));
1336
1337 max_dev_lun = min(max_scsi_luns, shost->max_lun);
1338 /*
1339 * If this device is known to support sparse multiple units,
1340 * override the other settings, and scan all of them. Normally,
1341 * SCSI-3 devices should be scanned via the REPORT LUNS.
1342 */
1343 if (bflags & BLIST_SPARSELUN) {
1344 max_dev_lun = shost->max_lun;
1345 sparse_lun = 1;
1346 } else
1347 sparse_lun = 0;
1348
1349 /*
1350 * If less than SCSI_1_CCS, and no special lun scanning, stop
1351 * scanning; this matches 2.4 behaviour, but could just be a bug
1352 * (to continue scanning a SCSI_1_CCS device).
1353 *
1354 * This test is broken. We might not have any device on lun0 for
1355 * a sparselun device, and if that's the case then how would we
1356 * know the real scsi_level, eh? It might make sense to just not
1357 * scan any SCSI_1 device for non-0 luns, but that check would best
1358 * go into scsi_alloc_sdev() and just have it return null when asked
1359 * to alloc an sdev for lun > 0 on an already found SCSI_1 device.
1360 *
1361 if ((sdevscan->scsi_level < SCSI_1_CCS) &&
1362 ((bflags & (BLIST_FORCELUN | BLIST_SPARSELUN | BLIST_MAX5LUN))
1363 == 0))
1364 return;
1365 */
1366 /*
1367 * If this device is known to support multiple units, override
1368 * the other settings, and scan all of them.
1369 */
1370 if (bflags & BLIST_FORCELUN)
1371 max_dev_lun = shost->max_lun;
1372 /*
1373 * REGAL CDC-4X: avoid hang after LUN 4
1374 */
1375 if (bflags & BLIST_MAX5LUN)
1376 max_dev_lun = min(5U, max_dev_lun);
1377 /*
1378 * Do not scan SCSI-2 or lower device past LUN 7, unless
1379 * BLIST_LARGELUN.
1380 */
1381 if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN))
1382 max_dev_lun = min(8U, max_dev_lun);
1383 else
1384 max_dev_lun = min(256U, max_dev_lun);
1385
1386 /*
1387 * We have already scanned LUN 0, so start at LUN 1. Keep scanning
1388 * until we reach the max, or no LUN is found and we are not
1389 * sparse_lun.
1390 */
1391 for (lun = 1; lun < max_dev_lun; ++lun)
1392 if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan,
1393 NULL) != SCSI_SCAN_LUN_PRESENT) &&
1394 !sparse_lun)
1395 return;
1396 }
1397
1398 /**
1399 * scsi_report_lun_scan - Scan using SCSI REPORT LUN results
1400 * @starget: which target
1401 * @bflags: Zero or a mix of BLIST_NOLUN, BLIST_REPORTLUN2, or BLIST_NOREPORTLUN
1402 * @rescan: nonzero if we can skip code only needed on first scan
1403 *
1404 * Description:
1405 * Fast scanning for modern (SCSI-3) devices by sending a REPORT LUN command.
1406 * Scan the resulting list of LUNs by calling scsi_probe_and_add_lun.
1407 *
1408 * If BLINK_REPORTLUN2 is set, scan a target that supports more than 8
1409 * LUNs even if it's older than SCSI-3.
1410 * If BLIST_NOREPORTLUN is set, return 1 always.
1411 * If BLIST_NOLUN is set, return 0 always.
1412 * If starget->no_report_luns is set, return 1 always.
1413 *
1414 * Return:
1415 * 0: scan completed (or no memory, so further scanning is futile)
1416 * 1: could not scan with REPORT LUN
1417 **/
scsi_report_lun_scan(struct scsi_target * starget,blist_flags_t bflags,enum scsi_scan_mode rescan)1418 static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags,
1419 enum scsi_scan_mode rescan)
1420 {
1421 unsigned char scsi_cmd[MAX_COMMAND_SIZE];
1422 unsigned int length;
1423 u64 lun;
1424 unsigned int num_luns;
1425 int result;
1426 struct scsi_lun *lunp, *lun_data;
1427 struct scsi_device *sdev;
1428 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1429 struct scsi_failure failure_defs[] = {
1430 {
1431 .sense = UNIT_ATTENTION,
1432 .asc = SCMD_FAILURE_ASC_ANY,
1433 .ascq = SCMD_FAILURE_ASCQ_ANY,
1434 .result = SAM_STAT_CHECK_CONDITION,
1435 },
1436 /* Fail all CCs except the UA above */
1437 {
1438 .sense = SCMD_FAILURE_SENSE_ANY,
1439 .result = SAM_STAT_CHECK_CONDITION,
1440 },
1441 /* Retry any other errors not listed above */
1442 {
1443 .result = SCMD_FAILURE_RESULT_ANY,
1444 },
1445 {}
1446 };
1447 struct scsi_failures failures = {
1448 .total_allowed = 3,
1449 .failure_definitions = failure_defs,
1450 };
1451 const struct scsi_exec_args exec_args = {
1452 .failures = &failures,
1453 };
1454 int ret = 0;
1455
1456 /*
1457 * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set.
1458 * Also allow SCSI-2 if BLIST_REPORTLUN2 is set and host adapter does
1459 * support more than 8 LUNs.
1460 * Don't attempt if the target doesn't support REPORT LUNS.
1461 */
1462 if (bflags & BLIST_NOREPORTLUN)
1463 return 1;
1464 if (starget->scsi_level < SCSI_2 &&
1465 starget->scsi_level != SCSI_UNKNOWN)
1466 return 1;
1467 if (starget->scsi_level < SCSI_3 &&
1468 (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8))
1469 return 1;
1470 if (bflags & BLIST_NOLUN)
1471 return 0;
1472 if (starget->no_report_luns)
1473 return 1;
1474
1475 if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
1476 sdev = scsi_alloc_sdev(starget, 0, NULL);
1477 if (!sdev)
1478 return 0;
1479 if (scsi_device_get(sdev)) {
1480 __scsi_remove_device(sdev);
1481 return 0;
1482 }
1483 }
1484
1485 /*
1486 * Allocate enough to hold the header (the same size as one scsi_lun)
1487 * plus the number of luns we are requesting. 511 was the default
1488 * value of the now removed max_report_luns parameter.
1489 */
1490 length = (511 + 1) * sizeof(struct scsi_lun);
1491 retry:
1492 lun_data = kmalloc(length, GFP_KERNEL);
1493 if (!lun_data) {
1494 printk(ALLOC_FAILURE_MSG, __func__);
1495 goto out;
1496 }
1497
1498 scsi_cmd[0] = REPORT_LUNS;
1499
1500 /*
1501 * bytes 1 - 5: reserved, set to zero.
1502 */
1503 memset(&scsi_cmd[1], 0, 5);
1504
1505 /*
1506 * bytes 6 - 9: length of the command.
1507 */
1508 put_unaligned_be32(length, &scsi_cmd[6]);
1509
1510 scsi_cmd[10] = 0; /* reserved */
1511 scsi_cmd[11] = 0; /* control */
1512
1513 /*
1514 * We can get a UNIT ATTENTION, for example a power on/reset, so
1515 * retry a few times (like sd.c does for TEST UNIT READY).
1516 * Experience shows some combinations of adapter/devices get at
1517 * least two power on/resets.
1518 *
1519 * Illegal requests (for devices that do not support REPORT LUNS)
1520 * should come through as a check condition, and will not generate
1521 * a retry.
1522 */
1523 scsi_failures_reset_retries(&failures);
1524
1525 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1526 "scsi scan: Sending REPORT LUNS\n"));
1527
1528 result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, lun_data,
1529 length, SCSI_REPORT_LUNS_TIMEOUT, 3,
1530 &exec_args);
1531
1532 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1533 "scsi scan: REPORT LUNS %s result 0x%x\n",
1534 result ? "failed" : "successful", result));
1535 if (result) {
1536 /*
1537 * The device probably does not support a REPORT LUN command
1538 */
1539 ret = 1;
1540 goto out_err;
1541 }
1542
1543 /*
1544 * Get the length from the first four bytes of lun_data.
1545 */
1546 if (get_unaligned_be32(lun_data->scsi_lun) +
1547 sizeof(struct scsi_lun) > length) {
1548 length = get_unaligned_be32(lun_data->scsi_lun) +
1549 sizeof(struct scsi_lun);
1550 kfree(lun_data);
1551 goto retry;
1552 }
1553 length = get_unaligned_be32(lun_data->scsi_lun);
1554
1555 num_luns = (length / sizeof(struct scsi_lun));
1556
1557 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1558 "scsi scan: REPORT LUN scan\n"));
1559
1560 /*
1561 * Scan the luns in lun_data. The entry at offset 0 is really
1562 * the header, so start at 1 and go up to and including num_luns.
1563 */
1564 for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) {
1565 lun = scsilun_to_int(lunp);
1566
1567 if (lun > sdev->host->max_lun) {
1568 sdev_printk(KERN_WARNING, sdev,
1569 "lun%llu has a LUN larger than"
1570 " allowed by the host adapter\n", lun);
1571 } else {
1572 int res;
1573
1574 res = scsi_probe_and_add_lun(starget,
1575 lun, NULL, NULL, rescan, NULL);
1576 if (res == SCSI_SCAN_NO_RESPONSE) {
1577 /*
1578 * Got some results, but now none, abort.
1579 */
1580 sdev_printk(KERN_ERR, sdev,
1581 "Unexpected response"
1582 " from lun %llu while scanning, scan"
1583 " aborted\n", (unsigned long long)lun);
1584 break;
1585 }
1586 }
1587 }
1588
1589 out_err:
1590 kfree(lun_data);
1591 out:
1592 if (scsi_device_created(sdev))
1593 /*
1594 * the sdev we used didn't appear in the report luns scan
1595 */
1596 __scsi_remove_device(sdev);
1597 scsi_device_put(sdev);
1598 return ret;
1599 }
1600
__scsi_add_device(struct Scsi_Host * shost,uint channel,uint id,u64 lun,void * hostdata)1601 struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1602 uint id, u64 lun, void *hostdata)
1603 {
1604 struct scsi_device *sdev = ERR_PTR(-ENODEV);
1605 struct device *parent = &shost->shost_gendev;
1606 struct scsi_target *starget;
1607
1608 if (strncmp(scsi_scan_type, "none", 4) == 0)
1609 return ERR_PTR(-ENODEV);
1610
1611 starget = scsi_alloc_target(parent, channel, id);
1612 if (!starget)
1613 return ERR_PTR(-ENOMEM);
1614 scsi_autopm_get_target(starget);
1615
1616 mutex_lock(&shost->scan_mutex);
1617 if (!shost->async_scan)
1618 scsi_complete_async_scans();
1619
1620 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1621 scsi_probe_and_add_lun(starget, lun, NULL, &sdev,
1622 SCSI_SCAN_RESCAN, hostdata);
1623 scsi_autopm_put_host(shost);
1624 }
1625 mutex_unlock(&shost->scan_mutex);
1626 scsi_autopm_put_target(starget);
1627 /*
1628 * paired with scsi_alloc_target(). Target will be destroyed unless
1629 * scsi_probe_and_add_lun made an underlying device visible
1630 */
1631 scsi_target_reap(starget);
1632 put_device(&starget->dev);
1633
1634 return sdev;
1635 }
1636 EXPORT_SYMBOL(__scsi_add_device);
1637
1638 /**
1639 * scsi_add_device - creates a new SCSI (LU) instance
1640 * @host: the &Scsi_Host instance where the device is located
1641 * @channel: target channel number (rarely other than %0)
1642 * @target: target id number
1643 * @lun: LUN of target device
1644 *
1645 * Probe for a specific LUN and add it if found.
1646 *
1647 * Notes: This call is usually performed internally during a SCSI
1648 * bus scan when an HBA is added (i.e. scsi_scan_host()). So it
1649 * should only be called if the HBA becomes aware of a new SCSI
1650 * device (LU) after scsi_scan_host() has completed. If successful
1651 * this call can lead to sdev_init() and sdev_configure() callbacks
1652 * into the LLD.
1653 *
1654 * Return: %0 on success or negative error code on failure
1655 */
scsi_add_device(struct Scsi_Host * host,uint channel,uint target,u64 lun)1656 int scsi_add_device(struct Scsi_Host *host, uint channel,
1657 uint target, u64 lun)
1658 {
1659 struct scsi_device *sdev =
1660 __scsi_add_device(host, channel, target, lun, NULL);
1661 if (IS_ERR(sdev))
1662 return PTR_ERR(sdev);
1663
1664 scsi_device_put(sdev);
1665 return 0;
1666 }
1667 EXPORT_SYMBOL(scsi_add_device);
1668
scsi_resume_device(struct scsi_device * sdev)1669 int scsi_resume_device(struct scsi_device *sdev)
1670 {
1671 struct device *dev = &sdev->sdev_gendev;
1672 int ret = 0;
1673
1674 device_lock(dev);
1675
1676 /*
1677 * Bail out if the device or its queue are not running. Otherwise,
1678 * the rescan may block waiting for commands to be executed, with us
1679 * holding the device lock. This can result in a potential deadlock
1680 * in the power management core code when system resume is on-going.
1681 */
1682 if (sdev->sdev_state != SDEV_RUNNING ||
1683 blk_queue_pm_only(sdev->request_queue)) {
1684 ret = -EWOULDBLOCK;
1685 goto unlock;
1686 }
1687
1688 if (dev->driver && try_module_get(dev->driver->owner)) {
1689 struct scsi_driver *drv = to_scsi_driver(dev->driver);
1690
1691 if (drv->resume)
1692 ret = drv->resume(dev);
1693 module_put(dev->driver->owner);
1694 }
1695
1696 unlock:
1697 device_unlock(dev);
1698
1699 return ret;
1700 }
1701 EXPORT_SYMBOL(scsi_resume_device);
1702
scsi_rescan_device(struct scsi_device * sdev)1703 int scsi_rescan_device(struct scsi_device *sdev)
1704 {
1705 struct device *dev = &sdev->sdev_gendev;
1706 int ret = 0;
1707
1708 device_lock(dev);
1709
1710 /*
1711 * Bail out if the device or its queue are not running. Otherwise,
1712 * the rescan may block waiting for commands to be executed, with us
1713 * holding the device lock. This can result in a potential deadlock
1714 * in the power management core code when system resume is on-going.
1715 */
1716 if (sdev->sdev_state != SDEV_RUNNING ||
1717 blk_queue_pm_only(sdev->request_queue)) {
1718 ret = -EWOULDBLOCK;
1719 goto unlock;
1720 }
1721
1722 scsi_attach_vpd(sdev);
1723 scsi_cdl_check(sdev);
1724
1725 if (sdev->handler && sdev->handler->rescan)
1726 sdev->handler->rescan(sdev);
1727
1728 if (dev->driver && try_module_get(dev->driver->owner)) {
1729 struct scsi_driver *drv = to_scsi_driver(dev->driver);
1730
1731 if (drv->rescan)
1732 drv->rescan(dev);
1733 module_put(dev->driver->owner);
1734 }
1735
1736 unlock:
1737 device_unlock(dev);
1738
1739 return ret;
1740 }
1741 EXPORT_SYMBOL(scsi_rescan_device);
1742
__scsi_scan_target(struct device * parent,unsigned int channel,unsigned int id,u64 lun,enum scsi_scan_mode rescan)1743 static void __scsi_scan_target(struct device *parent, unsigned int channel,
1744 unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1745 {
1746 struct Scsi_Host *shost = dev_to_shost(parent);
1747 blist_flags_t bflags = 0;
1748 int res;
1749 struct scsi_target *starget;
1750
1751 if (shost->this_id == id)
1752 /*
1753 * Don't scan the host adapter
1754 */
1755 return;
1756
1757 starget = scsi_alloc_target(parent, channel, id);
1758 if (!starget)
1759 return;
1760 scsi_autopm_get_target(starget);
1761
1762 if (lun != SCAN_WILD_CARD) {
1763 /*
1764 * Scan for a specific host/chan/id/lun.
1765 */
1766 scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL);
1767 goto out_reap;
1768 }
1769
1770 /*
1771 * Scan LUN 0, if there is some response, scan further. Ideally, we
1772 * would not configure LUN 0 until all LUNs are scanned.
1773 */
1774 res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL);
1775 if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) {
1776 if (scsi_report_lun_scan(starget, bflags, rescan) != 0)
1777 /*
1778 * The REPORT LUN did not scan the target,
1779 * do a sequential scan.
1780 */
1781 scsi_sequential_lun_scan(starget, bflags,
1782 starget->scsi_level, rescan);
1783 }
1784
1785 out_reap:
1786 scsi_autopm_put_target(starget);
1787 /*
1788 * paired with scsi_alloc_target(): determine if the target has
1789 * any children at all and if not, nuke it
1790 */
1791 scsi_target_reap(starget);
1792
1793 put_device(&starget->dev);
1794 }
1795
1796 /**
1797 * scsi_scan_target - scan a target id, possibly including all LUNs on the target.
1798 * @parent: host to scan
1799 * @channel: channel to scan
1800 * @id: target id to scan
1801 * @lun: Specific LUN to scan or SCAN_WILD_CARD
1802 * @rescan: passed to LUN scanning routines; SCSI_SCAN_INITIAL for
1803 * no rescan, SCSI_SCAN_RESCAN to rescan existing LUNs,
1804 * and SCSI_SCAN_MANUAL to force scanning even if
1805 * 'scan=manual' is set.
1806 *
1807 * Description:
1808 * Scan the target id on @parent, @channel, and @id. Scan at least LUN 0,
1809 * and possibly all LUNs on the target id.
1810 *
1811 * First try a REPORT LUN scan, if that does not scan the target, do a
1812 * sequential scan of LUNs on the target id.
1813 **/
scsi_scan_target(struct device * parent,unsigned int channel,unsigned int id,u64 lun,enum scsi_scan_mode rescan)1814 void scsi_scan_target(struct device *parent, unsigned int channel,
1815 unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1816 {
1817 struct Scsi_Host *shost = dev_to_shost(parent);
1818
1819 if (strncmp(scsi_scan_type, "none", 4) == 0)
1820 return;
1821
1822 if (rescan != SCSI_SCAN_MANUAL &&
1823 strncmp(scsi_scan_type, "manual", 6) == 0)
1824 return;
1825
1826 mutex_lock(&shost->scan_mutex);
1827 if (!shost->async_scan)
1828 scsi_complete_async_scans();
1829
1830 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1831 __scsi_scan_target(parent, channel, id, lun, rescan);
1832 scsi_autopm_put_host(shost);
1833 }
1834 mutex_unlock(&shost->scan_mutex);
1835 }
1836 EXPORT_SYMBOL(scsi_scan_target);
1837
scsi_scan_channel(struct Scsi_Host * shost,unsigned int channel,unsigned int id,u64 lun,enum scsi_scan_mode rescan)1838 static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
1839 unsigned int id, u64 lun,
1840 enum scsi_scan_mode rescan)
1841 {
1842 uint order_id;
1843
1844 if (id == SCAN_WILD_CARD)
1845 for (id = 0; id < shost->max_id; ++id) {
1846 /*
1847 * XXX adapter drivers when possible (FCP, iSCSI)
1848 * could modify max_id to match the current max,
1849 * not the absolute max.
1850 *
1851 * XXX add a shost id iterator, so for example,
1852 * the FC ID can be the same as a target id
1853 * without a huge overhead of sparse id's.
1854 */
1855 if (shost->reverse_ordering)
1856 /*
1857 * Scan from high to low id.
1858 */
1859 order_id = shost->max_id - id - 1;
1860 else
1861 order_id = id;
1862 __scsi_scan_target(&shost->shost_gendev, channel,
1863 order_id, lun, rescan);
1864 }
1865 else
1866 __scsi_scan_target(&shost->shost_gendev, channel,
1867 id, lun, rescan);
1868 }
1869
scsi_scan_host_selected(struct Scsi_Host * shost,unsigned int channel,unsigned int id,u64 lun,enum scsi_scan_mode rescan)1870 int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1871 unsigned int id, u64 lun,
1872 enum scsi_scan_mode rescan)
1873 {
1874 SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
1875 "%s: <%u:%u:%llu>\n",
1876 __func__, channel, id, lun));
1877
1878 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
1879 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
1880 ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun)))
1881 return -EINVAL;
1882
1883 mutex_lock(&shost->scan_mutex);
1884 if (!shost->async_scan)
1885 scsi_complete_async_scans();
1886
1887 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1888 if (channel == SCAN_WILD_CARD)
1889 for (channel = 0; channel <= shost->max_channel;
1890 channel++)
1891 scsi_scan_channel(shost, channel, id, lun,
1892 rescan);
1893 else
1894 scsi_scan_channel(shost, channel, id, lun, rescan);
1895 scsi_autopm_put_host(shost);
1896 }
1897 mutex_unlock(&shost->scan_mutex);
1898
1899 return 0;
1900 }
1901
scsi_sysfs_add_devices(struct Scsi_Host * shost)1902 static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
1903 {
1904 struct scsi_device *sdev;
1905 shost_for_each_device(sdev, shost) {
1906 /* target removed before the device could be added */
1907 if (sdev->sdev_state == SDEV_DEL)
1908 continue;
1909 /* If device is already visible, skip adding it to sysfs */
1910 if (sdev->is_visible)
1911 continue;
1912 if (!scsi_host_scan_allowed(shost) ||
1913 scsi_sysfs_add_sdev(sdev) != 0)
1914 __scsi_remove_device(sdev);
1915 }
1916 }
1917
1918 /**
1919 * scsi_prep_async_scan - prepare for an async scan
1920 * @shost: the host which will be scanned
1921 * Returns: a cookie to be passed to scsi_finish_async_scan()
1922 *
1923 * Tells the midlayer this host is going to do an asynchronous scan.
1924 * It reserves the host's position in the scanning list and ensures
1925 * that other asynchronous scans started after this one won't affect the
1926 * ordering of the discovered devices.
1927 */
scsi_prep_async_scan(struct Scsi_Host * shost)1928 static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
1929 {
1930 struct async_scan_data *data = NULL;
1931 unsigned long flags;
1932
1933 if (strncmp(scsi_scan_type, "sync", 4) == 0)
1934 return NULL;
1935
1936 mutex_lock(&shost->scan_mutex);
1937 if (shost->async_scan) {
1938 shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
1939 goto err;
1940 }
1941
1942 data = kmalloc(sizeof(*data), GFP_KERNEL);
1943 if (!data)
1944 goto err;
1945 data->shost = scsi_host_get(shost);
1946 if (!data->shost)
1947 goto err;
1948 init_completion(&data->prev_finished);
1949
1950 spin_lock_irqsave(shost->host_lock, flags);
1951 shost->async_scan = 1;
1952 spin_unlock_irqrestore(shost->host_lock, flags);
1953 mutex_unlock(&shost->scan_mutex);
1954
1955 spin_lock(&async_scan_lock);
1956 if (list_empty(&scanning_hosts))
1957 complete(&data->prev_finished);
1958 list_add_tail(&data->list, &scanning_hosts);
1959 spin_unlock(&async_scan_lock);
1960
1961 return data;
1962
1963 err:
1964 mutex_unlock(&shost->scan_mutex);
1965 kfree(data);
1966 return NULL;
1967 }
1968
1969 /**
1970 * scsi_finish_async_scan - asynchronous scan has finished
1971 * @data: cookie returned from earlier call to scsi_prep_async_scan()
1972 *
1973 * All the devices currently attached to this host have been found.
1974 * This function announces all the devices it has found to the rest
1975 * of the system.
1976 */
scsi_finish_async_scan(struct async_scan_data * data)1977 static void scsi_finish_async_scan(struct async_scan_data *data)
1978 {
1979 struct Scsi_Host *shost;
1980 unsigned long flags;
1981
1982 if (!data)
1983 return;
1984
1985 shost = data->shost;
1986
1987 mutex_lock(&shost->scan_mutex);
1988
1989 if (!shost->async_scan) {
1990 shost_printk(KERN_INFO, shost, "%s called twice\n", __func__);
1991 dump_stack();
1992 mutex_unlock(&shost->scan_mutex);
1993 return;
1994 }
1995
1996 wait_for_completion(&data->prev_finished);
1997
1998 scsi_sysfs_add_devices(shost);
1999
2000 spin_lock_irqsave(shost->host_lock, flags);
2001 shost->async_scan = 0;
2002 spin_unlock_irqrestore(shost->host_lock, flags);
2003
2004 mutex_unlock(&shost->scan_mutex);
2005
2006 spin_lock(&async_scan_lock);
2007 list_del(&data->list);
2008 if (!list_empty(&scanning_hosts)) {
2009 struct async_scan_data *next = list_entry(scanning_hosts.next,
2010 struct async_scan_data, list);
2011 complete(&next->prev_finished);
2012 }
2013 spin_unlock(&async_scan_lock);
2014
2015 scsi_autopm_put_host(shost);
2016 scsi_host_put(shost);
2017 kfree(data);
2018 }
2019
do_scsi_scan_host(struct Scsi_Host * shost)2020 static void do_scsi_scan_host(struct Scsi_Host *shost)
2021 {
2022 if (shost->hostt->scan_finished) {
2023 unsigned long start = jiffies;
2024 if (shost->hostt->scan_start)
2025 shost->hostt->scan_start(shost);
2026
2027 while (!shost->hostt->scan_finished(shost, jiffies - start))
2028 msleep(10);
2029 } else {
2030 scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
2031 SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
2032 }
2033 }
2034
do_scan_async(void * _data,async_cookie_t c)2035 static void do_scan_async(void *_data, async_cookie_t c)
2036 {
2037 struct async_scan_data *data = _data;
2038 struct Scsi_Host *shost = data->shost;
2039
2040 do_scsi_scan_host(shost);
2041 scsi_finish_async_scan(data);
2042 }
2043
2044 /**
2045 * scsi_scan_host - scan the given adapter
2046 * @shost: adapter to scan
2047 *
2048 * Notes: Should be called after scsi_add_host()
2049 **/
scsi_scan_host(struct Scsi_Host * shost)2050 void scsi_scan_host(struct Scsi_Host *shost)
2051 {
2052 struct async_scan_data *data;
2053
2054 if (strncmp(scsi_scan_type, "none", 4) == 0 ||
2055 strncmp(scsi_scan_type, "manual", 6) == 0)
2056 return;
2057 if (scsi_autopm_get_host(shost) < 0)
2058 return;
2059
2060 data = scsi_prep_async_scan(shost);
2061 if (!data) {
2062 do_scsi_scan_host(shost);
2063 scsi_autopm_put_host(shost);
2064 return;
2065 }
2066
2067 /* register with the async subsystem so wait_for_device_probe()
2068 * will flush this work
2069 */
2070 async_schedule(do_scan_async, data);
2071
2072 /* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */
2073 }
2074 EXPORT_SYMBOL(scsi_scan_host);
2075
scsi_forget_host(struct Scsi_Host * shost)2076 void scsi_forget_host(struct Scsi_Host *shost)
2077 {
2078 struct scsi_device *sdev;
2079 unsigned long flags;
2080
2081 restart:
2082 spin_lock_irqsave(shost->host_lock, flags);
2083 list_for_each_entry(sdev, &shost->__devices, siblings) {
2084 if (sdev->sdev_state == SDEV_DEL)
2085 continue;
2086 spin_unlock_irqrestore(shost->host_lock, flags);
2087 __scsi_remove_device(sdev);
2088 goto restart;
2089 }
2090 spin_unlock_irqrestore(shost->host_lock, flags);
2091 }
2092
2093