scsi_lib.c (accba5f3965d6a9d1bf7c1e1a7995d17e9d521b6) scsi_lib.c (f0c0a376d0fcd4c5579ecf5e95f88387cba85211)
1/*
2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale
3 *
4 * SCSI queueing library.
5 * Initial versions: Eric Youngdale (eric@andante.org).
6 * Based upon conversations with large numbers
7 * of people at Linux Expo.
8 */

--- 100 unchanged lines hidden (view full) ---

109 * commands.
110 * Notes: This could be called either from an interrupt context or a
111 * normal process context.
112 */
113int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
114{
115 struct Scsi_Host *host = cmd->device->host;
116 struct scsi_device *device = cmd->device;
1/*
2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale
3 *
4 * SCSI queueing library.
5 * Initial versions: Eric Youngdale (eric@andante.org).
6 * Based upon conversations with large numbers
7 * of people at Linux Expo.
8 */

--- 100 unchanged lines hidden (view full) ---

109 * commands.
110 * Notes: This could be called either from an interrupt context or a
111 * normal process context.
112 */
113int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
114{
115 struct Scsi_Host *host = cmd->device->host;
116 struct scsi_device *device = cmd->device;
117 struct scsi_target *starget = scsi_target(device);
117 struct request_queue *q = device->request_queue;
118 unsigned long flags;
119
120 SCSI_LOG_MLQUEUE(1,
121 printk("Inserting command %p into mlqueue\n", cmd));
122
123 /*
124 * Set the appropriate busy bit for the device/host.
125 *
126 * If the host/device isn't busy, assume that something actually
127 * completed, and that we should be able to queue a command now.
128 *
129 * Note that the prior mid-layer assumption that any host could
130 * always queue at least one command is now broken. The mid-layer
131 * will implement a user specifiable stall (see
132 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
133 * if a command is requeued with no other commands outstanding
134 * either for the device or for the host.
135 */
118 struct request_queue *q = device->request_queue;
119 unsigned long flags;
120
121 SCSI_LOG_MLQUEUE(1,
122 printk("Inserting command %p into mlqueue\n", cmd));
123
124 /*
125 * Set the appropriate busy bit for the device/host.
126 *
127 * If the host/device isn't busy, assume that something actually
128 * completed, and that we should be able to queue a command now.
129 *
130 * Note that the prior mid-layer assumption that any host could
131 * always queue at least one command is now broken. The mid-layer
132 * will implement a user specifiable stall (see
133 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
134 * if a command is requeued with no other commands outstanding
135 * either for the device or for the host.
136 */
136 if (reason == SCSI_MLQUEUE_HOST_BUSY)
137 switch (reason) {
138 case SCSI_MLQUEUE_HOST_BUSY:
137 host->host_blocked = host->max_host_blocked;
139 host->host_blocked = host->max_host_blocked;
138 else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
140 break;
141 case SCSI_MLQUEUE_DEVICE_BUSY:
139 device->device_blocked = device->max_device_blocked;
142 device->device_blocked = device->max_device_blocked;
143 break;
144 case SCSI_MLQUEUE_TARGET_BUSY:
145 starget->target_blocked = starget->max_target_blocked;
146 break;
147 }
140
141 /*
142 * Decrement the counters, since these commands are no longer
143 * active on the host/device.
144 */
145 scsi_device_unbusy(device);
146
147 /*

--- 307 unchanged lines hidden (view full) ---

455 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
456 if (cmd->cmd_len == 0)
457 cmd->cmd_len = scsi_command_size(cmd->cmnd);
458}
459
460void scsi_device_unbusy(struct scsi_device *sdev)
461{
462 struct Scsi_Host *shost = sdev->host;
148
149 /*
150 * Decrement the counters, since these commands are no longer
151 * active on the host/device.
152 */
153 scsi_device_unbusy(device);
154
155 /*

--- 307 unchanged lines hidden (view full) ---

463 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
464 if (cmd->cmd_len == 0)
465 cmd->cmd_len = scsi_command_size(cmd->cmnd);
466}
467
468void scsi_device_unbusy(struct scsi_device *sdev)
469{
470 struct Scsi_Host *shost = sdev->host;
471 struct scsi_target *starget = scsi_target(sdev);
463 unsigned long flags;
464
465 spin_lock_irqsave(shost->host_lock, flags);
466 shost->host_busy--;
472 unsigned long flags;
473
474 spin_lock_irqsave(shost->host_lock, flags);
475 shost->host_busy--;
476 starget->target_busy--;
467 if (unlikely(scsi_host_in_recovery(shost) &&
468 (shost->host_failed || shost->host_eh_scheduled)))
469 scsi_eh_wakeup(shost);
470 spin_unlock(shost->host_lock);
471 spin_lock(sdev->request_queue->queue_lock);
472 sdev->device_busy--;
473 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
474}

--- 39 unchanged lines hidden (view full) ---

514 spin_lock_irqsave(shost->host_lock, flags);
515
516 scsi_device_put(sdev);
517 }
518 out:
519 spin_unlock_irqrestore(shost->host_lock, flags);
520}
521
477 if (unlikely(scsi_host_in_recovery(shost) &&
478 (shost->host_failed || shost->host_eh_scheduled)))
479 scsi_eh_wakeup(shost);
480 spin_unlock(shost->host_lock);
481 spin_lock(sdev->request_queue->queue_lock);
482 sdev->device_busy--;
483 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
484}

--- 39 unchanged lines hidden (view full) ---

524 spin_lock_irqsave(shost->host_lock, flags);
525
526 scsi_device_put(sdev);
527 }
528 out:
529 spin_unlock_irqrestore(shost->host_lock, flags);
530}
531
532static inline int scsi_target_is_busy(struct scsi_target *starget)
533{
534 return ((starget->can_queue > 0 &&
535 starget->target_busy >= starget->can_queue) ||
536 starget->target_blocked);
537}
538
522/*
523 * Function: scsi_run_queue()
524 *
525 * Purpose: Select a proper request queue to serve next
526 *
527 * Arguments: q - last request's queue
528 *
529 * Returns: Nothing
530 *
531 * Notes: The previous command was completely finished, start
532 * a new one if possible.
533 */
534static void scsi_run_queue(struct request_queue *q)
535{
539/*
540 * Function: scsi_run_queue()
541 *
542 * Purpose: Select a proper request queue to serve next
543 *
544 * Arguments: q - last request's queue
545 *
546 * Returns: Nothing
547 *
548 * Notes: The previous command was completely finished, start
549 * a new one if possible.
550 */
551static void scsi_run_queue(struct request_queue *q)
552{
536 struct scsi_device *sdev = q->queuedata;
553 struct scsi_device *starved_head = NULL, *sdev = q->queuedata;
537 struct Scsi_Host *shost = sdev->host;
538 unsigned long flags;
539
540 if (scsi_target(sdev)->single_lun)
541 scsi_single_lun_run(sdev);
542
543 spin_lock_irqsave(shost->host_lock, flags);
544 while (!list_empty(&shost->starved_list) &&

--- 10 unchanged lines hidden (view full) ---

555 * starved_list.
556 *
557 * host_lock protects the starved_list and starved_entry.
558 * scsi_request_fn must get the host_lock before checking
559 * or modifying starved_list or starved_entry.
560 */
561 sdev = list_entry(shost->starved_list.next,
562 struct scsi_device, starved_entry);
554 struct Scsi_Host *shost = sdev->host;
555 unsigned long flags;
556
557 if (scsi_target(sdev)->single_lun)
558 scsi_single_lun_run(sdev);
559
560 spin_lock_irqsave(shost->host_lock, flags);
561 while (!list_empty(&shost->starved_list) &&

--- 10 unchanged lines hidden (view full) ---

572 * starved_list.
573 *
574 * host_lock protects the starved_list and starved_entry.
575 * scsi_request_fn must get the host_lock before checking
576 * or modifying starved_list or starved_entry.
577 */
578 sdev = list_entry(shost->starved_list.next,
579 struct scsi_device, starved_entry);
580 /*
581 * The *queue_ready functions can add a device back onto the
582 * starved list's tail, so we must check for a infinite loop.
583 */
584 if (sdev == starved_head)
585 break;
586 if (!starved_head)
587 starved_head = sdev;
588
589 if (scsi_target_is_busy(scsi_target(sdev))) {
590 list_move_tail(&sdev->starved_entry,
591 &shost->starved_list);
592 continue;
593 }
594
563 list_del_init(&sdev->starved_entry);
564 spin_unlock(shost->host_lock);
565
566 spin_lock(sdev->request_queue->queue_lock);
567 flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
568 !test_bit(QUEUE_FLAG_REENTER,
569 &sdev->request_queue->queue_flags);
570 if (flagset)
571 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
572 __blk_run_queue(sdev->request_queue);
573 if (flagset)
574 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
575 spin_unlock(sdev->request_queue->queue_lock);
576
577 spin_lock(shost->host_lock);
595 list_del_init(&sdev->starved_entry);
596 spin_unlock(shost->host_lock);
597
598 spin_lock(sdev->request_queue->queue_lock);
599 flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
600 !test_bit(QUEUE_FLAG_REENTER,
601 &sdev->request_queue->queue_flags);
602 if (flagset)
603 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
604 __blk_run_queue(sdev->request_queue);
605 if (flagset)
606 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
607 spin_unlock(sdev->request_queue->queue_lock);
608
609 spin_lock(shost->host_lock);
578 if (unlikely(!list_empty(&sdev->starved_entry)))
579 /*
580 * sdev lost a race, and was put back on the
581 * starved list. This is unlikely but without this
582 * in theory we could loop forever.
583 */
584 break;
585 }
586 spin_unlock_irqrestore(shost->host_lock, flags);
587
588 blk_run_queue(q);
589}
590
591/*
592 * Function: scsi_requeue_command()

--- 746 unchanged lines hidden (view full) ---

1339 }
1340 }
1341 if (sdev->device_blocked)
1342 return 0;
1343
1344 return 1;
1345}
1346
610 }
611 spin_unlock_irqrestore(shost->host_lock, flags);
612
613 blk_run_queue(q);
614}
615
616/*
617 * Function: scsi_requeue_command()

--- 746 unchanged lines hidden (view full) ---

1364 }
1365 }
1366 if (sdev->device_blocked)
1367 return 0;
1368
1369 return 1;
1370}
1371
1372
1347/*
1373/*
1374 * scsi_target_queue_ready: checks if there we can send commands to target
1375 * @sdev: scsi device on starget to check.
1376 *
1377 * Called with the host lock held.
1378 */
1379static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1380 struct scsi_device *sdev)
1381{
1382 struct scsi_target *starget = scsi_target(sdev);
1383
1384 if (starget->single_lun) {
1385 if (starget->starget_sdev_user &&
1386 starget->starget_sdev_user != sdev)
1387 return 0;
1388 starget->starget_sdev_user = sdev;
1389 }
1390
1391 if (starget->target_busy == 0 && starget->target_blocked) {
1392 /*
1393 * unblock after target_blocked iterates to zero
1394 */
1395 if (--starget->target_blocked == 0) {
1396 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1397 "unblocking target at zero depth\n"));
1398 } else {
1399 blk_plug_device(sdev->request_queue);
1400 return 0;
1401 }
1402 }
1403
1404 if (scsi_target_is_busy(starget)) {
1405 if (list_empty(&sdev->starved_entry)) {
1406 list_add_tail(&sdev->starved_entry,
1407 &shost->starved_list);
1408 return 0;
1409 }
1410 }
1411
1412 /* We're OK to process the command, so we can't be starved */
1413 if (!list_empty(&sdev->starved_entry))
1414 list_del_init(&sdev->starved_entry);
1415 return 1;
1416}
1417
1418/*
1348 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1349 * return 0. We must end up running the queue again whenever 0 is
1350 * returned, else IO can hang.
1351 *
1352 * Called with host_lock held.
1353 */
1354static inline int scsi_host_queue_ready(struct request_queue *q,
1355 struct Scsi_Host *shost,

--- 29 unchanged lines hidden (view full) ---

1385
1386/*
1387 * Kill a request for a dead device
1388 */
1389static void scsi_kill_request(struct request *req, struct request_queue *q)
1390{
1391 struct scsi_cmnd *cmd = req->special;
1392 struct scsi_device *sdev = cmd->device;
1419 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1420 * return 0. We must end up running the queue again whenever 0 is
1421 * returned, else IO can hang.
1422 *
1423 * Called with host_lock held.
1424 */
1425static inline int scsi_host_queue_ready(struct request_queue *q,
1426 struct Scsi_Host *shost,

--- 29 unchanged lines hidden (view full) ---

1456
1457/*
1458 * Kill a request for a dead device
1459 */
1460static void scsi_kill_request(struct request *req, struct request_queue *q)
1461{
1462 struct scsi_cmnd *cmd = req->special;
1463 struct scsi_device *sdev = cmd->device;
1464 struct scsi_target *starget = scsi_target(sdev);
1393 struct Scsi_Host *shost = sdev->host;
1394
1395 blkdev_dequeue_request(req);
1396
1397 if (unlikely(cmd == NULL)) {
1398 printk(KERN_CRIT "impossible request in %s.\n",
1399 __func__);
1400 BUG();

--- 7 unchanged lines hidden (view full) ---

1408 * SCSI request completion path will do scsi_device_unbusy(),
1409 * bump busy counts. To bump the counters, we need to dance
1410 * with the locks as normal issue path does.
1411 */
1412 sdev->device_busy++;
1413 spin_unlock(sdev->request_queue->queue_lock);
1414 spin_lock(shost->host_lock);
1415 shost->host_busy++;
1465 struct Scsi_Host *shost = sdev->host;
1466
1467 blkdev_dequeue_request(req);
1468
1469 if (unlikely(cmd == NULL)) {
1470 printk(KERN_CRIT "impossible request in %s.\n",
1471 __func__);
1472 BUG();

--- 7 unchanged lines hidden (view full) ---

1480 * SCSI request completion path will do scsi_device_unbusy(),
1481 * bump busy counts. To bump the counters, we need to dance
1482 * with the locks as normal issue path does.
1483 */
1484 sdev->device_busy++;
1485 spin_unlock(sdev->request_queue->queue_lock);
1486 spin_lock(shost->host_lock);
1487 shost->host_busy++;
1488 starget->target_busy++;
1416 spin_unlock(shost->host_lock);
1417 spin_lock(sdev->request_queue->queue_lock);
1418
1419 blk_complete_request(req);
1420}
1421
1422static void scsi_softirq_done(struct request *rq)
1423{

--- 121 unchanged lines hidden (view full) ---

1545 */
1546 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
1547 if (list_empty(&sdev->starved_entry))
1548 list_add_tail(&sdev->starved_entry,
1549 &shost->starved_list);
1550 goto not_ready;
1551 }
1552
1489 spin_unlock(shost->host_lock);
1490 spin_lock(sdev->request_queue->queue_lock);
1491
1492 blk_complete_request(req);
1493}
1494
1495static void scsi_softirq_done(struct request *rq)
1496{

--- 121 unchanged lines hidden (view full) ---

1618 */
1619 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
1620 if (list_empty(&sdev->starved_entry))
1621 list_add_tail(&sdev->starved_entry,
1622 &shost->starved_list);
1623 goto not_ready;
1624 }
1625
1626 if (!scsi_target_queue_ready(shost, sdev))
1627 goto not_ready;
1628
1553 if (!scsi_host_queue_ready(q, shost, sdev))
1554 goto not_ready;
1629 if (!scsi_host_queue_ready(q, shost, sdev))
1630 goto not_ready;
1555 if (scsi_target(sdev)->single_lun) {
1556 if (scsi_target(sdev)->starget_sdev_user &&
1557 scsi_target(sdev)->starget_sdev_user != sdev)
1558 goto not_ready;
1559 scsi_target(sdev)->starget_sdev_user = sdev;
1560 }
1631
1632 scsi_target(sdev)->target_busy++;
1561 shost->host_busy++;
1562
1563 /*
1564 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1565 * take the lock again.
1566 */
1567 spin_unlock_irq(shost->host_lock);
1568

--- 1024 unchanged lines hidden ---
1633 shost->host_busy++;
1634
1635 /*
1636 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1637 * take the lock again.
1638 */
1639 spin_unlock_irq(shost->host_lock);
1640

--- 1024 unchanged lines hidden ---