xref: /linux/drivers/s390/block/dasd.c (revision d39d0ed196aa1685bb24771e92f78633c66ac9cb)
1 /*
2  * File...........: linux/drivers/s390/block/dasd.c
3  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4  *		    Horst Hummel <Horst.Hummel@de.ibm.com>
5  *		    Carsten Otte <Cotte@de.ibm.com>
6  *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
7  * Bugreports.to..: <Linux390@de.ibm.com>
8  * Copyright IBM Corp. 1999, 2009
9  */
10 
11 #define KMSG_COMPONENT "dasd"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 
14 #include <linux/kmod.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/ctype.h>
18 #include <linux/major.h>
19 #include <linux/slab.h>
20 #include <linux/buffer_head.h>
21 #include <linux/hdreg.h>
22 #include <linux/async.h>
23 #include <linux/mutex.h>
24 #include <linux/smp_lock.h>
25 
26 #include <asm/ccwdev.h>
27 #include <asm/ebcdic.h>
28 #include <asm/idals.h>
29 #include <asm/itcw.h>
30 #include <asm/diag.h>
31 
32 /* This is ugly... */
33 #define PRINTK_HEADER "dasd:"
34 
35 #include "dasd_int.h"
36 /*
37  * SECTION: Constant definitions to be used within this file
38  */
39 #define DASD_CHANQ_MAX_SIZE 4
40 
41 #define DASD_SLEEPON_START_TAG	(void *) 1
42 #define DASD_SLEEPON_END_TAG	(void *) 2
43 
44 /*
45  * SECTION: exported variables of dasd.c
46  */
47 debug_info_t *dasd_debug_area;
48 struct dasd_discipline *dasd_diag_discipline_pointer;
49 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
50 
51 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
52 MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
53 		   " Copyright 2000 IBM Corporation");
54 MODULE_SUPPORTED_DEVICE("dasd");
55 MODULE_LICENSE("GPL");
56 
57 /*
58  * SECTION: prototypes for static functions of dasd.c
59  */
60 static int  dasd_alloc_queue(struct dasd_block *);
61 static void dasd_setup_queue(struct dasd_block *);
62 static void dasd_free_queue(struct dasd_block *);
63 static void dasd_flush_request_queue(struct dasd_block *);
64 static int dasd_flush_block_queue(struct dasd_block *);
65 static void dasd_device_tasklet(struct dasd_device *);
66 static void dasd_block_tasklet(struct dasd_block *);
67 static void do_kick_device(struct work_struct *);
68 static void do_restore_device(struct work_struct *);
69 static void do_reload_device(struct work_struct *);
70 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
71 static void dasd_device_timeout(unsigned long);
72 static void dasd_block_timeout(unsigned long);
73 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
74 
75 /*
76  * SECTION: Operations on the device structure.
77  */
78 static wait_queue_head_t dasd_init_waitq;
79 static wait_queue_head_t dasd_flush_wq;
80 static wait_queue_head_t generic_waitq;
81 
82 /*
83  * Allocate memory for a new device structure.
84  */
85 struct dasd_device *dasd_alloc_device(void)
86 {
87 	struct dasd_device *device;
88 
89 	device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
90 	if (!device)
91 		return ERR_PTR(-ENOMEM);
92 
93 	/* Get two pages for normal block device operations. */
94 	device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
95 	if (!device->ccw_mem) {
96 		kfree(device);
97 		return ERR_PTR(-ENOMEM);
98 	}
99 	/* Get one page for error recovery. */
100 	device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
101 	if (!device->erp_mem) {
102 		free_pages((unsigned long) device->ccw_mem, 1);
103 		kfree(device);
104 		return ERR_PTR(-ENOMEM);
105 	}
106 
107 	dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
108 	dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
109 	spin_lock_init(&device->mem_lock);
110 	atomic_set(&device->tasklet_scheduled, 0);
111 	tasklet_init(&device->tasklet,
112 		     (void (*)(unsigned long)) dasd_device_tasklet,
113 		     (unsigned long) device);
114 	INIT_LIST_HEAD(&device->ccw_queue);
115 	init_timer(&device->timer);
116 	device->timer.function = dasd_device_timeout;
117 	device->timer.data = (unsigned long) device;
118 	INIT_WORK(&device->kick_work, do_kick_device);
119 	INIT_WORK(&device->restore_device, do_restore_device);
120 	INIT_WORK(&device->reload_device, do_reload_device);
121 	device->state = DASD_STATE_NEW;
122 	device->target = DASD_STATE_NEW;
123 	mutex_init(&device->state_mutex);
124 
125 	return device;
126 }
127 
128 /*
129  * Free memory of a device structure.
130  */
131 void dasd_free_device(struct dasd_device *device)
132 {
133 	kfree(device->private);
134 	free_page((unsigned long) device->erp_mem);
135 	free_pages((unsigned long) device->ccw_mem, 1);
136 	kfree(device);
137 }
138 
139 /*
140  * Allocate memory for a new device structure.
141  */
142 struct dasd_block *dasd_alloc_block(void)
143 {
144 	struct dasd_block *block;
145 
146 	block = kzalloc(sizeof(*block), GFP_ATOMIC);
147 	if (!block)
148 		return ERR_PTR(-ENOMEM);
149 	/* open_count = 0 means device online but not in use */
150 	atomic_set(&block->open_count, -1);
151 
152 	spin_lock_init(&block->request_queue_lock);
153 	atomic_set(&block->tasklet_scheduled, 0);
154 	tasklet_init(&block->tasklet,
155 		     (void (*)(unsigned long)) dasd_block_tasklet,
156 		     (unsigned long) block);
157 	INIT_LIST_HEAD(&block->ccw_queue);
158 	spin_lock_init(&block->queue_lock);
159 	init_timer(&block->timer);
160 	block->timer.function = dasd_block_timeout;
161 	block->timer.data = (unsigned long) block;
162 
163 	return block;
164 }
165 
166 /*
167  * Free memory of a device structure.
168  */
169 void dasd_free_block(struct dasd_block *block)
170 {
171 	kfree(block);
172 }
173 
174 /*
175  * Make a new device known to the system.
176  */
177 static int dasd_state_new_to_known(struct dasd_device *device)
178 {
179 	int rc;
180 
181 	/*
182 	 * As long as the device is not in state DASD_STATE_NEW we want to
183 	 * keep the reference count > 0.
184 	 */
185 	dasd_get_device(device);
186 
187 	if (device->block) {
188 		rc = dasd_alloc_queue(device->block);
189 		if (rc) {
190 			dasd_put_device(device);
191 			return rc;
192 		}
193 	}
194 	device->state = DASD_STATE_KNOWN;
195 	return 0;
196 }
197 
198 /*
199  * Let the system forget about a device.
200  */
201 static int dasd_state_known_to_new(struct dasd_device *device)
202 {
203 	/* Disable extended error reporting for this device. */
204 	dasd_eer_disable(device);
205 	/* Forget the discipline information. */
206 	if (device->discipline) {
207 		if (device->discipline->uncheck_device)
208 			device->discipline->uncheck_device(device);
209 		module_put(device->discipline->owner);
210 	}
211 	device->discipline = NULL;
212 	if (device->base_discipline)
213 		module_put(device->base_discipline->owner);
214 	device->base_discipline = NULL;
215 	device->state = DASD_STATE_NEW;
216 
217 	if (device->block)
218 		dasd_free_queue(device->block);
219 
220 	/* Give up reference we took in dasd_state_new_to_known. */
221 	dasd_put_device(device);
222 	return 0;
223 }
224 
225 /*
226  * Request the irq line for the device.
227  */
228 static int dasd_state_known_to_basic(struct dasd_device *device)
229 {
230 	int rc;
231 
232 	/* Allocate and register gendisk structure. */
233 	if (device->block) {
234 		rc = dasd_gendisk_alloc(device->block);
235 		if (rc)
236 			return rc;
237 	}
238 	/* register 'device' debug area, used for all DBF_DEV_XXX calls */
239 	device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
240 					    8 * sizeof(long));
241 	debug_register_view(device->debug_area, &debug_sprintf_view);
242 	debug_set_level(device->debug_area, DBF_WARNING);
243 	DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
244 
245 	device->state = DASD_STATE_BASIC;
246 	return 0;
247 }
248 
249 /*
250  * Release the irq line for the device. Terminate any running i/o.
251  */
252 static int dasd_state_basic_to_known(struct dasd_device *device)
253 {
254 	int rc;
255 	if (device->block) {
256 		dasd_gendisk_free(device->block);
257 		dasd_block_clear_timer(device->block);
258 	}
259 	rc = dasd_flush_device_queue(device);
260 	if (rc)
261 		return rc;
262 	dasd_device_clear_timer(device);
263 
264 	DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
265 	if (device->debug_area != NULL) {
266 		debug_unregister(device->debug_area);
267 		device->debug_area = NULL;
268 	}
269 	device->state = DASD_STATE_KNOWN;
270 	return 0;
271 }
272 
273 /*
274  * Do the initial analysis. The do_analysis function may return
275  * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
276  * until the discipline decides to continue the startup sequence
277  * by calling the function dasd_change_state. The eckd disciplines
278  * uses this to start a ccw that detects the format. The completion
279  * interrupt for this detection ccw uses the kernel event daemon to
280  * trigger the call to dasd_change_state. All this is done in the
281  * discipline code, see dasd_eckd.c.
282  * After the analysis ccw is done (do_analysis returned 0) the block
283  * device is setup.
284  * In case the analysis returns an error, the device setup is stopped
285  * (a fake disk was already added to allow formatting).
286  */
287 static int dasd_state_basic_to_ready(struct dasd_device *device)
288 {
289 	int rc;
290 	struct dasd_block *block;
291 
292 	rc = 0;
293 	block = device->block;
294 	/* make disk known with correct capacity */
295 	if (block) {
296 		if (block->base->discipline->do_analysis != NULL)
297 			rc = block->base->discipline->do_analysis(block);
298 		if (rc) {
299 			if (rc != -EAGAIN)
300 				device->state = DASD_STATE_UNFMT;
301 			return rc;
302 		}
303 		dasd_setup_queue(block);
304 		set_capacity(block->gdp,
305 			     block->blocks << block->s2b_shift);
306 		device->state = DASD_STATE_READY;
307 		rc = dasd_scan_partitions(block);
308 		if (rc)
309 			device->state = DASD_STATE_BASIC;
310 	} else {
311 		device->state = DASD_STATE_READY;
312 	}
313 	return rc;
314 }
315 
316 /*
317  * Remove device from block device layer. Destroy dirty buffers.
318  * Forget format information. Check if the target level is basic
319  * and if it is create fake disk for formatting.
320  */
321 static int dasd_state_ready_to_basic(struct dasd_device *device)
322 {
323 	int rc;
324 
325 	device->state = DASD_STATE_BASIC;
326 	if (device->block) {
327 		struct dasd_block *block = device->block;
328 		rc = dasd_flush_block_queue(block);
329 		if (rc) {
330 			device->state = DASD_STATE_READY;
331 			return rc;
332 		}
333 		dasd_flush_request_queue(block);
334 		dasd_destroy_partitions(block);
335 		block->blocks = 0;
336 		block->bp_block = 0;
337 		block->s2b_shift = 0;
338 	}
339 	return 0;
340 }
341 
342 /*
343  * Back to basic.
344  */
345 static int dasd_state_unfmt_to_basic(struct dasd_device *device)
346 {
347 	device->state = DASD_STATE_BASIC;
348 	return 0;
349 }
350 
351 /*
352  * Make the device online and schedule the bottom half to start
353  * the requeueing of requests from the linux request queue to the
354  * ccw queue.
355  */
356 static int
357 dasd_state_ready_to_online(struct dasd_device * device)
358 {
359 	int rc;
360 	struct gendisk *disk;
361 	struct disk_part_iter piter;
362 	struct hd_struct *part;
363 
364 	if (device->discipline->ready_to_online) {
365 		rc = device->discipline->ready_to_online(device);
366 		if (rc)
367 			return rc;
368 	}
369 	device->state = DASD_STATE_ONLINE;
370 	if (device->block) {
371 		dasd_schedule_block_bh(device->block);
372 		disk = device->block->bdev->bd_disk;
373 		disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
374 		while ((part = disk_part_iter_next(&piter)))
375 			kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
376 		disk_part_iter_exit(&piter);
377 	}
378 	return 0;
379 }
380 
381 /*
382  * Stop the requeueing of requests again.
383  */
384 static int dasd_state_online_to_ready(struct dasd_device *device)
385 {
386 	int rc;
387 	struct gendisk *disk;
388 	struct disk_part_iter piter;
389 	struct hd_struct *part;
390 
391 	if (device->discipline->online_to_ready) {
392 		rc = device->discipline->online_to_ready(device);
393 		if (rc)
394 			return rc;
395 	}
396 	device->state = DASD_STATE_READY;
397 	if (device->block) {
398 		disk = device->block->bdev->bd_disk;
399 		disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
400 		while ((part = disk_part_iter_next(&piter)))
401 			kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
402 		disk_part_iter_exit(&piter);
403 	}
404 	return 0;
405 }
406 
407 /*
408  * Device startup state changes.
409  */
410 static int dasd_increase_state(struct dasd_device *device)
411 {
412 	int rc;
413 
414 	rc = 0;
415 	if (device->state == DASD_STATE_NEW &&
416 	    device->target >= DASD_STATE_KNOWN)
417 		rc = dasd_state_new_to_known(device);
418 
419 	if (!rc &&
420 	    device->state == DASD_STATE_KNOWN &&
421 	    device->target >= DASD_STATE_BASIC)
422 		rc = dasd_state_known_to_basic(device);
423 
424 	if (!rc &&
425 	    device->state == DASD_STATE_BASIC &&
426 	    device->target >= DASD_STATE_READY)
427 		rc = dasd_state_basic_to_ready(device);
428 
429 	if (!rc &&
430 	    device->state == DASD_STATE_UNFMT &&
431 	    device->target > DASD_STATE_UNFMT)
432 		rc = -EPERM;
433 
434 	if (!rc &&
435 	    device->state == DASD_STATE_READY &&
436 	    device->target >= DASD_STATE_ONLINE)
437 		rc = dasd_state_ready_to_online(device);
438 
439 	return rc;
440 }
441 
442 /*
443  * Device shutdown state changes.
444  */
445 static int dasd_decrease_state(struct dasd_device *device)
446 {
447 	int rc;
448 
449 	rc = 0;
450 	if (device->state == DASD_STATE_ONLINE &&
451 	    device->target <= DASD_STATE_READY)
452 		rc = dasd_state_online_to_ready(device);
453 
454 	if (!rc &&
455 	    device->state == DASD_STATE_READY &&
456 	    device->target <= DASD_STATE_BASIC)
457 		rc = dasd_state_ready_to_basic(device);
458 
459 	if (!rc &&
460 	    device->state == DASD_STATE_UNFMT &&
461 	    device->target <= DASD_STATE_BASIC)
462 		rc = dasd_state_unfmt_to_basic(device);
463 
464 	if (!rc &&
465 	    device->state == DASD_STATE_BASIC &&
466 	    device->target <= DASD_STATE_KNOWN)
467 		rc = dasd_state_basic_to_known(device);
468 
469 	if (!rc &&
470 	    device->state == DASD_STATE_KNOWN &&
471 	    device->target <= DASD_STATE_NEW)
472 		rc = dasd_state_known_to_new(device);
473 
474 	return rc;
475 }
476 
477 /*
478  * This is the main startup/shutdown routine.
479  */
480 static void dasd_change_state(struct dasd_device *device)
481 {
482 	int rc;
483 
484 	if (device->state == device->target)
485 		/* Already where we want to go today... */
486 		return;
487 	if (device->state < device->target)
488 		rc = dasd_increase_state(device);
489 	else
490 		rc = dasd_decrease_state(device);
491 	if (rc == -EAGAIN)
492 		return;
493 	if (rc)
494 		device->target = device->state;
495 
496 	if (device->state == device->target)
497 		wake_up(&dasd_init_waitq);
498 
499 	/* let user-space know that the device status changed */
500 	kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
501 }
502 
503 /*
504  * Kick starter for devices that did not complete the startup/shutdown
505  * procedure or were sleeping because of a pending state.
506  * dasd_kick_device will schedule a call do do_kick_device to the kernel
507  * event daemon.
508  */
509 static void do_kick_device(struct work_struct *work)
510 {
511 	struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
512 	mutex_lock(&device->state_mutex);
513 	dasd_change_state(device);
514 	mutex_unlock(&device->state_mutex);
515 	dasd_schedule_device_bh(device);
516 	dasd_put_device(device);
517 }
518 
519 void dasd_kick_device(struct dasd_device *device)
520 {
521 	dasd_get_device(device);
522 	/* queue call to dasd_kick_device to the kernel event daemon. */
523 	schedule_work(&device->kick_work);
524 }
525 
526 /*
527  * dasd_reload_device will schedule a call do do_reload_device to the kernel
528  * event daemon.
529  */
530 static void do_reload_device(struct work_struct *work)
531 {
532 	struct dasd_device *device = container_of(work, struct dasd_device,
533 						  reload_device);
534 	device->discipline->reload(device);
535 	dasd_put_device(device);
536 }
537 
538 void dasd_reload_device(struct dasd_device *device)
539 {
540 	dasd_get_device(device);
541 	/* queue call to dasd_reload_device to the kernel event daemon. */
542 	schedule_work(&device->reload_device);
543 }
544 EXPORT_SYMBOL(dasd_reload_device);
545 
546 /*
547  * dasd_restore_device will schedule a call do do_restore_device to the kernel
548  * event daemon.
549  */
550 static void do_restore_device(struct work_struct *work)
551 {
552 	struct dasd_device *device = container_of(work, struct dasd_device,
553 						  restore_device);
554 	device->cdev->drv->restore(device->cdev);
555 	dasd_put_device(device);
556 }
557 
558 void dasd_restore_device(struct dasd_device *device)
559 {
560 	dasd_get_device(device);
561 	/* queue call to dasd_restore_device to the kernel event daemon. */
562 	schedule_work(&device->restore_device);
563 }
564 
565 /*
566  * Set the target state for a device and starts the state change.
567  */
568 void dasd_set_target_state(struct dasd_device *device, int target)
569 {
570 	dasd_get_device(device);
571 	mutex_lock(&device->state_mutex);
572 	/* If we are in probeonly mode stop at DASD_STATE_READY. */
573 	if (dasd_probeonly && target > DASD_STATE_READY)
574 		target = DASD_STATE_READY;
575 	if (device->target != target) {
576 		if (device->state == target)
577 			wake_up(&dasd_init_waitq);
578 		device->target = target;
579 	}
580 	if (device->state != device->target)
581 		dasd_change_state(device);
582 	mutex_unlock(&device->state_mutex);
583 	dasd_put_device(device);
584 }
585 
586 /*
587  * Enable devices with device numbers in [from..to].
588  */
589 static inline int _wait_for_device(struct dasd_device *device)
590 {
591 	return (device->state == device->target);
592 }
593 
594 void dasd_enable_device(struct dasd_device *device)
595 {
596 	dasd_set_target_state(device, DASD_STATE_ONLINE);
597 	if (device->state <= DASD_STATE_KNOWN)
598 		/* No discipline for device found. */
599 		dasd_set_target_state(device, DASD_STATE_NEW);
600 	/* Now wait for the devices to come up. */
601 	wait_event(dasd_init_waitq, _wait_for_device(device));
602 }
603 
604 /*
605  * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
606  */
607 #ifdef CONFIG_DASD_PROFILE
608 
609 struct dasd_profile_info_t dasd_global_profile;
610 unsigned int dasd_profile_level = DASD_PROFILE_OFF;
611 
612 /*
613  * Increments counter in global and local profiling structures.
614  */
615 #define dasd_profile_counter(value, counter, block) \
616 { \
617 	int index; \
618 	for (index = 0; index < 31 && value >> (2+index); index++); \
619 	dasd_global_profile.counter[index]++; \
620 	block->profile.counter[index]++; \
621 }
622 
623 /*
624  * Add profiling information for cqr before execution.
625  */
626 static void dasd_profile_start(struct dasd_block *block,
627 			       struct dasd_ccw_req *cqr,
628 			       struct request *req)
629 {
630 	struct list_head *l;
631 	unsigned int counter;
632 
633 	if (dasd_profile_level != DASD_PROFILE_ON)
634 		return;
635 
636 	/* count the length of the chanq for statistics */
637 	counter = 0;
638 	list_for_each(l, &block->ccw_queue)
639 		if (++counter >= 31)
640 			break;
641 	dasd_global_profile.dasd_io_nr_req[counter]++;
642 	block->profile.dasd_io_nr_req[counter]++;
643 }
644 
645 /*
646  * Add profiling information for cqr after execution.
647  */
648 static void dasd_profile_end(struct dasd_block *block,
649 			     struct dasd_ccw_req *cqr,
650 			     struct request *req)
651 {
652 	long strtime, irqtime, endtime, tottime;	/* in microseconds */
653 	long tottimeps, sectors;
654 
655 	if (dasd_profile_level != DASD_PROFILE_ON)
656 		return;
657 
658 	sectors = blk_rq_sectors(req);
659 	if (!cqr->buildclk || !cqr->startclk ||
660 	    !cqr->stopclk || !cqr->endclk ||
661 	    !sectors)
662 		return;
663 
664 	strtime = ((cqr->startclk - cqr->buildclk) >> 12);
665 	irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
666 	endtime = ((cqr->endclk - cqr->stopclk) >> 12);
667 	tottime = ((cqr->endclk - cqr->buildclk) >> 12);
668 	tottimeps = tottime / sectors;
669 
670 	if (!dasd_global_profile.dasd_io_reqs)
671 		memset(&dasd_global_profile, 0,
672 		       sizeof(struct dasd_profile_info_t));
673 	dasd_global_profile.dasd_io_reqs++;
674 	dasd_global_profile.dasd_io_sects += sectors;
675 
676 	if (!block->profile.dasd_io_reqs)
677 		memset(&block->profile, 0,
678 		       sizeof(struct dasd_profile_info_t));
679 	block->profile.dasd_io_reqs++;
680 	block->profile.dasd_io_sects += sectors;
681 
682 	dasd_profile_counter(sectors, dasd_io_secs, block);
683 	dasd_profile_counter(tottime, dasd_io_times, block);
684 	dasd_profile_counter(tottimeps, dasd_io_timps, block);
685 	dasd_profile_counter(strtime, dasd_io_time1, block);
686 	dasd_profile_counter(irqtime, dasd_io_time2, block);
687 	dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block);
688 	dasd_profile_counter(endtime, dasd_io_time3, block);
689 }
690 #else
691 #define dasd_profile_start(block, cqr, req) do {} while (0)
692 #define dasd_profile_end(block, cqr, req) do {} while (0)
693 #endif				/* CONFIG_DASD_PROFILE */
694 
695 /*
696  * Allocate memory for a channel program with 'cplength' channel
697  * command words and 'datasize' additional space. There are two
698  * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
699  * memory and 2) dasd_smalloc_request uses the static ccw memory
700  * that gets allocated for each device.
701  */
702 struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
703 					  int datasize,
704 					  struct dasd_device *device)
705 {
706 	struct dasd_ccw_req *cqr;
707 
708 	/* Sanity checks */
709 	BUG_ON(datasize > PAGE_SIZE ||
710 	     (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
711 
712 	cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
713 	if (cqr == NULL)
714 		return ERR_PTR(-ENOMEM);
715 	cqr->cpaddr = NULL;
716 	if (cplength > 0) {
717 		cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
718 				      GFP_ATOMIC | GFP_DMA);
719 		if (cqr->cpaddr == NULL) {
720 			kfree(cqr);
721 			return ERR_PTR(-ENOMEM);
722 		}
723 	}
724 	cqr->data = NULL;
725 	if (datasize > 0) {
726 		cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
727 		if (cqr->data == NULL) {
728 			kfree(cqr->cpaddr);
729 			kfree(cqr);
730 			return ERR_PTR(-ENOMEM);
731 		}
732 	}
733 	cqr->magic =  magic;
734 	set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
735 	dasd_get_device(device);
736 	return cqr;
737 }
738 
739 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
740 					  int datasize,
741 					  struct dasd_device *device)
742 {
743 	unsigned long flags;
744 	struct dasd_ccw_req *cqr;
745 	char *data;
746 	int size;
747 
748 	/* Sanity checks */
749 	BUG_ON(datasize > PAGE_SIZE ||
750 	     (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
751 
752 	size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
753 	if (cplength > 0)
754 		size += cplength * sizeof(struct ccw1);
755 	if (datasize > 0)
756 		size += datasize;
757 	spin_lock_irqsave(&device->mem_lock, flags);
758 	cqr = (struct dasd_ccw_req *)
759 		dasd_alloc_chunk(&device->ccw_chunks, size);
760 	spin_unlock_irqrestore(&device->mem_lock, flags);
761 	if (cqr == NULL)
762 		return ERR_PTR(-ENOMEM);
763 	memset(cqr, 0, sizeof(struct dasd_ccw_req));
764 	data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
765 	cqr->cpaddr = NULL;
766 	if (cplength > 0) {
767 		cqr->cpaddr = (struct ccw1 *) data;
768 		data += cplength*sizeof(struct ccw1);
769 		memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
770 	}
771 	cqr->data = NULL;
772 	if (datasize > 0) {
773 		cqr->data = data;
774  		memset(cqr->data, 0, datasize);
775 	}
776 	cqr->magic = magic;
777 	set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
778 	dasd_get_device(device);
779 	return cqr;
780 }
781 
782 /*
783  * Free memory of a channel program. This function needs to free all the
784  * idal lists that might have been created by dasd_set_cda and the
785  * struct dasd_ccw_req itself.
786  */
787 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
788 {
789 #ifdef CONFIG_64BIT
790 	struct ccw1 *ccw;
791 
792 	/* Clear any idals used for the request. */
793 	ccw = cqr->cpaddr;
794 	do {
795 		clear_normalized_cda(ccw);
796 	} while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
797 #endif
798 	kfree(cqr->cpaddr);
799 	kfree(cqr->data);
800 	kfree(cqr);
801 	dasd_put_device(device);
802 }
803 
804 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
805 {
806 	unsigned long flags;
807 
808 	spin_lock_irqsave(&device->mem_lock, flags);
809 	dasd_free_chunk(&device->ccw_chunks, cqr);
810 	spin_unlock_irqrestore(&device->mem_lock, flags);
811 	dasd_put_device(device);
812 }
813 
814 /*
815  * Check discipline magic in cqr.
816  */
817 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
818 {
819 	struct dasd_device *device;
820 
821 	if (cqr == NULL)
822 		return -EINVAL;
823 	device = cqr->startdev;
824 	if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
825 		DBF_DEV_EVENT(DBF_WARNING, device,
826 			    " dasd_ccw_req 0x%08x magic doesn't match"
827 			    " discipline 0x%08x",
828 			    cqr->magic,
829 			    *(unsigned int *) device->discipline->name);
830 		return -EINVAL;
831 	}
832 	return 0;
833 }
834 
835 /*
836  * Terminate the current i/o and set the request to clear_pending.
837  * Timer keeps device runnig.
838  * ccw_device_clear can fail if the i/o subsystem
839  * is in a bad mood.
840  */
841 int dasd_term_IO(struct dasd_ccw_req *cqr)
842 {
843 	struct dasd_device *device;
844 	int retries, rc;
845 	char errorstring[ERRORLENGTH];
846 
847 	/* Check the cqr */
848 	rc = dasd_check_cqr(cqr);
849 	if (rc)
850 		return rc;
851 	retries = 0;
852 	device = (struct dasd_device *) cqr->startdev;
853 	while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
854 		rc = ccw_device_clear(device->cdev, (long) cqr);
855 		switch (rc) {
856 		case 0:	/* termination successful */
857 			cqr->retries--;
858 			cqr->status = DASD_CQR_CLEAR_PENDING;
859 			cqr->stopclk = get_clock();
860 			cqr->starttime = 0;
861 			DBF_DEV_EVENT(DBF_DEBUG, device,
862 				      "terminate cqr %p successful",
863 				      cqr);
864 			break;
865 		case -ENODEV:
866 			DBF_DEV_EVENT(DBF_ERR, device, "%s",
867 				      "device gone, retry");
868 			break;
869 		case -EIO:
870 			DBF_DEV_EVENT(DBF_ERR, device, "%s",
871 				      "I/O error, retry");
872 			break;
873 		case -EINVAL:
874 		case -EBUSY:
875 			DBF_DEV_EVENT(DBF_ERR, device, "%s",
876 				      "device busy, retry later");
877 			break;
878 		default:
879 			/* internal error 10 - unknown rc*/
880 			snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
881 			dev_err(&device->cdev->dev, "An error occurred in the "
882 				"DASD device driver, reason=%s\n", errorstring);
883 			BUG();
884 			break;
885 		}
886 		retries++;
887 	}
888 	dasd_schedule_device_bh(device);
889 	return rc;
890 }
891 
892 /*
893  * Start the i/o. This start_IO can fail if the channel is really busy.
894  * In that case set up a timer to start the request later.
895  */
896 int dasd_start_IO(struct dasd_ccw_req *cqr)
897 {
898 	struct dasd_device *device;
899 	int rc;
900 	char errorstring[ERRORLENGTH];
901 
902 	/* Check the cqr */
903 	rc = dasd_check_cqr(cqr);
904 	if (rc) {
905 		cqr->intrc = rc;
906 		return rc;
907 	}
908 	device = (struct dasd_device *) cqr->startdev;
909 	if (cqr->retries < 0) {
910 		/* internal error 14 - start_IO run out of retries */
911 		sprintf(errorstring, "14 %p", cqr);
912 		dev_err(&device->cdev->dev, "An error occurred in the DASD "
913 			"device driver, reason=%s\n", errorstring);
914 		cqr->status = DASD_CQR_ERROR;
915 		return -EIO;
916 	}
917 	cqr->startclk = get_clock();
918 	cqr->starttime = jiffies;
919 	cqr->retries--;
920 	if (cqr->cpmode == 1) {
921 		rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
922 					 (long) cqr, cqr->lpm);
923 	} else {
924 		rc = ccw_device_start(device->cdev, cqr->cpaddr,
925 				      (long) cqr, cqr->lpm, 0);
926 	}
927 	switch (rc) {
928 	case 0:
929 		cqr->status = DASD_CQR_IN_IO;
930 		break;
931 	case -EBUSY:
932 		DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
933 			      "start_IO: device busy, retry later");
934 		break;
935 	case -ETIMEDOUT:
936 		DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
937 			      "start_IO: request timeout, retry later");
938 		break;
939 	case -EACCES:
940 		/* -EACCES indicates that the request used only a
941 		 * subset of the available pathes and all these
942 		 * pathes are gone.
943 		 * Do a retry with all available pathes.
944 		 */
945 		cqr->lpm = LPM_ANYPATH;
946 		DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
947 			      "start_IO: selected pathes gone,"
948 			      " retry on all pathes");
949 		break;
950 	case -ENODEV:
951 		DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
952 			      "start_IO: -ENODEV device gone, retry");
953 		break;
954 	case -EIO:
955 		DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
956 			      "start_IO: -EIO device gone, retry");
957 		break;
958 	case -EINVAL:
959 		/* most likely caused in power management context */
960 		DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
961 			      "start_IO: -EINVAL device currently "
962 			      "not accessible");
963 		break;
964 	default:
965 		/* internal error 11 - unknown rc */
966 		snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
967 		dev_err(&device->cdev->dev,
968 			"An error occurred in the DASD device driver, "
969 			"reason=%s\n", errorstring);
970 		BUG();
971 		break;
972 	}
973 	cqr->intrc = rc;
974 	return rc;
975 }
976 
977 /*
978  * Timeout function for dasd devices. This is used for different purposes
979  *  1) missing interrupt handler for normal operation
980  *  2) delayed start of request where start_IO failed with -EBUSY
981  *  3) timeout for missing state change interrupts
982  * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
983  * DASD_CQR_QUEUED for 2) and 3).
984  */
985 static void dasd_device_timeout(unsigned long ptr)
986 {
987 	unsigned long flags;
988 	struct dasd_device *device;
989 
990 	device = (struct dasd_device *) ptr;
991 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
992 	/* re-activate request queue */
993 	dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
994 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
995 	dasd_schedule_device_bh(device);
996 }
997 
998 /*
999  * Setup timeout for a device in jiffies.
1000  */
1001 void dasd_device_set_timer(struct dasd_device *device, int expires)
1002 {
1003 	if (expires == 0)
1004 		del_timer(&device->timer);
1005 	else
1006 		mod_timer(&device->timer, jiffies + expires);
1007 }
1008 
1009 /*
1010  * Clear timeout for a device.
1011  */
1012 void dasd_device_clear_timer(struct dasd_device *device)
1013 {
1014 	del_timer(&device->timer);
1015 }
1016 
1017 static void dasd_handle_killed_request(struct ccw_device *cdev,
1018 				       unsigned long intparm)
1019 {
1020 	struct dasd_ccw_req *cqr;
1021 	struct dasd_device *device;
1022 
1023 	if (!intparm)
1024 		return;
1025 	cqr = (struct dasd_ccw_req *) intparm;
1026 	if (cqr->status != DASD_CQR_IN_IO) {
1027 		DBF_EVENT_DEVID(DBF_DEBUG, cdev,
1028 				"invalid status in handle_killed_request: "
1029 				"%02x", cqr->status);
1030 		return;
1031 	}
1032 
1033 	device = dasd_device_from_cdev_locked(cdev);
1034 	if (IS_ERR(device)) {
1035 		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1036 				"unable to get device from cdev");
1037 		return;
1038 	}
1039 
1040 	if (!cqr->startdev ||
1041 	    device != cqr->startdev ||
1042 	    strncmp(cqr->startdev->discipline->ebcname,
1043 		    (char *) &cqr->magic, 4)) {
1044 		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1045 				"invalid device in request");
1046 		dasd_put_device(device);
1047 		return;
1048 	}
1049 
1050 	/* Schedule request to be retried. */
1051 	cqr->status = DASD_CQR_QUEUED;
1052 
1053 	dasd_device_clear_timer(device);
1054 	dasd_schedule_device_bh(device);
1055 	dasd_put_device(device);
1056 }
1057 
1058 void dasd_generic_handle_state_change(struct dasd_device *device)
1059 {
1060 	/* First of all start sense subsystem status request. */
1061 	dasd_eer_snss(device);
1062 
1063 	dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1064 	dasd_schedule_device_bh(device);
1065 	if (device->block)
1066 		dasd_schedule_block_bh(device->block);
1067 }
1068 
1069 /*
1070  * Interrupt handler for "normal" ssch-io based dasd devices.
1071  */
1072 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1073 		      struct irb *irb)
1074 {
1075 	struct dasd_ccw_req *cqr, *next;
1076 	struct dasd_device *device;
1077 	unsigned long long now;
1078 	int expires;
1079 
1080 	if (IS_ERR(irb)) {
1081 		switch (PTR_ERR(irb)) {
1082 		case -EIO:
1083 			break;
1084 		case -ETIMEDOUT:
1085 			DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1086 					"request timed out\n", __func__);
1087 			break;
1088 		default:
1089 			DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1090 					"unknown error %ld\n", __func__,
1091 					PTR_ERR(irb));
1092 		}
1093 		dasd_handle_killed_request(cdev, intparm);
1094 		return;
1095 	}
1096 
1097 	now = get_clock();
1098 
1099 	/* check for unsolicited interrupts */
1100 	cqr = (struct dasd_ccw_req *) intparm;
1101 	if (!cqr || ((scsw_cc(&irb->scsw) == 1) &&
1102 		     (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
1103 		     (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))) {
1104 		if (cqr && cqr->status == DASD_CQR_IN_IO)
1105 			cqr->status = DASD_CQR_QUEUED;
1106 		device = dasd_device_from_cdev_locked(cdev);
1107 		if (!IS_ERR(device)) {
1108 			dasd_device_clear_timer(device);
1109 			device->discipline->handle_unsolicited_interrupt(device,
1110 									 irb);
1111 			dasd_put_device(device);
1112 		}
1113 		return;
1114 	}
1115 
1116 	device = (struct dasd_device *) cqr->startdev;
1117 	if (!device ||
1118 	    strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1119 		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1120 				"invalid device in request");
1121 		return;
1122 	}
1123 
1124 	/* Check for clear pending */
1125 	if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1126 	    scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
1127 		cqr->status = DASD_CQR_CLEARED;
1128 		dasd_device_clear_timer(device);
1129 		wake_up(&dasd_flush_wq);
1130 		dasd_schedule_device_bh(device);
1131 		return;
1132 	}
1133 
1134 	/* check status - the request might have been killed by dyn detach */
1135 	if (cqr->status != DASD_CQR_IN_IO) {
1136 		DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
1137 			      "status %02x", dev_name(&cdev->dev), cqr->status);
1138 		return;
1139 	}
1140 
1141 	next = NULL;
1142 	expires = 0;
1143 	if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1144 	    scsw_cstat(&irb->scsw) == 0) {
1145 		/* request was completed successfully */
1146 		cqr->status = DASD_CQR_SUCCESS;
1147 		cqr->stopclk = now;
1148 		/* Start first request on queue if possible -> fast_io. */
1149 		if (cqr->devlist.next != &device->ccw_queue) {
1150 			next = list_entry(cqr->devlist.next,
1151 					  struct dasd_ccw_req, devlist);
1152 		}
1153 	} else {  /* error */
1154 		memcpy(&cqr->irb, irb, sizeof(struct irb));
1155 		/* log sense for every failed I/O to s390 debugfeature */
1156 		dasd_log_sense_dbf(cqr, irb);
1157 		if (device->features & DASD_FEATURE_ERPLOG) {
1158 			dasd_log_sense(cqr, irb);
1159 		}
1160 
1161 		/*
1162 		 * If we don't want complex ERP for this request, then just
1163 		 * reset this and retry it in the fastpath
1164 		 */
1165 		if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1166 		    cqr->retries > 0) {
1167 			if (cqr->lpm == LPM_ANYPATH)
1168 				DBF_DEV_EVENT(DBF_DEBUG, device,
1169 					      "default ERP in fastpath "
1170 					      "(%i retries left)",
1171 					      cqr->retries);
1172 			cqr->lpm    = LPM_ANYPATH;
1173 			cqr->status = DASD_CQR_QUEUED;
1174 			next = cqr;
1175 		} else
1176 			cqr->status = DASD_CQR_ERROR;
1177 	}
1178 	if (next && (next->status == DASD_CQR_QUEUED) &&
1179 	    (!device->stopped)) {
1180 		if (device->discipline->start_IO(next) == 0)
1181 			expires = next->expires;
1182 	}
1183 	if (expires != 0)
1184 		dasd_device_set_timer(device, expires);
1185 	else
1186 		dasd_device_clear_timer(device);
1187 	dasd_schedule_device_bh(device);
1188 }
1189 
1190 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
1191 {
1192 	struct dasd_device *device;
1193 
1194 	device = dasd_device_from_cdev_locked(cdev);
1195 
1196 	if (IS_ERR(device))
1197 		goto out;
1198 	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1199 	   device->state != device->target ||
1200 	   !device->discipline->handle_unsolicited_interrupt){
1201 		dasd_put_device(device);
1202 		goto out;
1203 	}
1204 
1205 	dasd_device_clear_timer(device);
1206 	device->discipline->handle_unsolicited_interrupt(device, irb);
1207 	dasd_put_device(device);
1208 out:
1209 	return UC_TODO_RETRY;
1210 }
1211 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
1212 
1213 /*
1214  * If we have an error on a dasd_block layer request then we cancel
1215  * and return all further requests from the same dasd_block as well.
1216  */
1217 static void __dasd_device_recovery(struct dasd_device *device,
1218 				   struct dasd_ccw_req *ref_cqr)
1219 {
1220 	struct list_head *l, *n;
1221 	struct dasd_ccw_req *cqr;
1222 
1223 	/*
1224 	 * only requeue request that came from the dasd_block layer
1225 	 */
1226 	if (!ref_cqr->block)
1227 		return;
1228 
1229 	list_for_each_safe(l, n, &device->ccw_queue) {
1230 		cqr = list_entry(l, struct dasd_ccw_req, devlist);
1231 		if (cqr->status == DASD_CQR_QUEUED &&
1232 		    ref_cqr->block == cqr->block) {
1233 			cqr->status = DASD_CQR_CLEARED;
1234 		}
1235 	}
1236 };
1237 
1238 /*
1239  * Remove those ccw requests from the queue that need to be returned
1240  * to the upper layer.
1241  */
1242 static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1243 					    struct list_head *final_queue)
1244 {
1245 	struct list_head *l, *n;
1246 	struct dasd_ccw_req *cqr;
1247 
1248 	/* Process request with final status. */
1249 	list_for_each_safe(l, n, &device->ccw_queue) {
1250 		cqr = list_entry(l, struct dasd_ccw_req, devlist);
1251 
1252 		/* Stop list processing at the first non-final request. */
1253 		if (cqr->status == DASD_CQR_QUEUED ||
1254 		    cqr->status == DASD_CQR_IN_IO ||
1255 		    cqr->status == DASD_CQR_CLEAR_PENDING)
1256 			break;
1257 		if (cqr->status == DASD_CQR_ERROR) {
1258 			__dasd_device_recovery(device, cqr);
1259 		}
1260 		/* Rechain finished requests to final queue */
1261 		list_move_tail(&cqr->devlist, final_queue);
1262 	}
1263 }
1264 
1265 /*
1266  * the cqrs from the final queue are returned to the upper layer
1267  * by setting a dasd_block state and calling the callback function
1268  */
1269 static void __dasd_device_process_final_queue(struct dasd_device *device,
1270 					      struct list_head *final_queue)
1271 {
1272 	struct list_head *l, *n;
1273 	struct dasd_ccw_req *cqr;
1274 	struct dasd_block *block;
1275 	void (*callback)(struct dasd_ccw_req *, void *data);
1276 	void *callback_data;
1277 	char errorstring[ERRORLENGTH];
1278 
1279 	list_for_each_safe(l, n, final_queue) {
1280 		cqr = list_entry(l, struct dasd_ccw_req, devlist);
1281 		list_del_init(&cqr->devlist);
1282 		block = cqr->block;
1283 		callback = cqr->callback;
1284 		callback_data = cqr->callback_data;
1285 		if (block)
1286 			spin_lock_bh(&block->queue_lock);
1287 		switch (cqr->status) {
1288 		case DASD_CQR_SUCCESS:
1289 			cqr->status = DASD_CQR_DONE;
1290 			break;
1291 		case DASD_CQR_ERROR:
1292 			cqr->status = DASD_CQR_NEED_ERP;
1293 			break;
1294 		case DASD_CQR_CLEARED:
1295 			cqr->status = DASD_CQR_TERMINATED;
1296 			break;
1297 		default:
1298 			/* internal error 12 - wrong cqr status*/
1299 			snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
1300 			dev_err(&device->cdev->dev,
1301 				"An error occurred in the DASD device driver, "
1302 				"reason=%s\n", errorstring);
1303 			BUG();
1304 		}
1305 		if (cqr->callback != NULL)
1306 			(callback)(cqr, callback_data);
1307 		if (block)
1308 			spin_unlock_bh(&block->queue_lock);
1309 	}
1310 }
1311 
1312 /*
1313  * Take a look at the first request on the ccw queue and check
1314  * if it reached its expire time. If so, terminate the IO.
1315  */
1316 static void __dasd_device_check_expire(struct dasd_device *device)
1317 {
1318 	struct dasd_ccw_req *cqr;
1319 
1320 	if (list_empty(&device->ccw_queue))
1321 		return;
1322 	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1323 	if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1324 	    (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1325 		if (device->discipline->term_IO(cqr) != 0) {
1326 			/* Hmpf, try again in 5 sec */
1327 			dev_err(&device->cdev->dev,
1328 				"cqr %p timed out (%lus) but cannot be "
1329 				"ended, retrying in 5 s\n",
1330 				cqr, (cqr->expires/HZ));
1331 			cqr->expires += 5*HZ;
1332 			dasd_device_set_timer(device, 5*HZ);
1333 		} else {
1334 			dev_err(&device->cdev->dev,
1335 				"cqr %p timed out (%lus), %i retries "
1336 				"remaining\n", cqr, (cqr->expires/HZ),
1337 				cqr->retries);
1338 		}
1339 	}
1340 }
1341 
1342 /*
1343  * Take a look at the first request on the ccw queue and check
1344  * if it needs to be started.
1345  */
1346 static void __dasd_device_start_head(struct dasd_device *device)
1347 {
1348 	struct dasd_ccw_req *cqr;
1349 	int rc;
1350 
1351 	if (list_empty(&device->ccw_queue))
1352 		return;
1353 	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1354 	if (cqr->status != DASD_CQR_QUEUED)
1355 		return;
1356 	/* when device is stopped, return request to previous layer */
1357 	if (device->stopped) {
1358 		cqr->status = DASD_CQR_CLEARED;
1359 		dasd_schedule_device_bh(device);
1360 		return;
1361 	}
1362 
1363 	rc = device->discipline->start_IO(cqr);
1364 	if (rc == 0)
1365 		dasd_device_set_timer(device, cqr->expires);
1366 	else if (rc == -EACCES) {
1367 		dasd_schedule_device_bh(device);
1368 	} else
1369 		/* Hmpf, try again in 1/2 sec */
1370 		dasd_device_set_timer(device, 50);
1371 }
1372 
1373 /*
1374  * Go through all request on the dasd_device request queue,
1375  * terminate them on the cdev if necessary, and return them to the
1376  * submitting layer via callback.
1377  * Note:
1378  * Make sure that all 'submitting layers' still exist when
1379  * this function is called!. In other words, when 'device' is a base
1380  * device then all block layer requests must have been removed before
1381  * via dasd_flush_block_queue.
1382  */
1383 int dasd_flush_device_queue(struct dasd_device *device)
1384 {
1385 	struct dasd_ccw_req *cqr, *n;
1386 	int rc;
1387 	struct list_head flush_queue;
1388 
1389 	INIT_LIST_HEAD(&flush_queue);
1390 	spin_lock_irq(get_ccwdev_lock(device->cdev));
1391 	rc = 0;
1392 	list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
1393 		/* Check status and move request to flush_queue */
1394 		switch (cqr->status) {
1395 		case DASD_CQR_IN_IO:
1396 			rc = device->discipline->term_IO(cqr);
1397 			if (rc) {
1398 				/* unable to terminate requeust */
1399 				dev_err(&device->cdev->dev,
1400 					"Flushing the DASD request queue "
1401 					"failed for request %p\n", cqr);
1402 				/* stop flush processing */
1403 				goto finished;
1404 			}
1405 			break;
1406 		case DASD_CQR_QUEUED:
1407 			cqr->stopclk = get_clock();
1408 			cqr->status = DASD_CQR_CLEARED;
1409 			break;
1410 		default: /* no need to modify the others */
1411 			break;
1412 		}
1413 		list_move_tail(&cqr->devlist, &flush_queue);
1414 	}
1415 finished:
1416 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
1417 	/*
1418 	 * After this point all requests must be in state CLEAR_PENDING,
1419 	 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
1420 	 * one of the others.
1421 	 */
1422 	list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
1423 		wait_event(dasd_flush_wq,
1424 			   (cqr->status != DASD_CQR_CLEAR_PENDING));
1425 	/*
1426 	 * Now set each request back to TERMINATED, DONE or NEED_ERP
1427 	 * and call the callback function of flushed requests
1428 	 */
1429 	__dasd_device_process_final_queue(device, &flush_queue);
1430 	return rc;
1431 }
1432 
1433 /*
1434  * Acquire the device lock and process queues for the device.
1435  */
1436 static void dasd_device_tasklet(struct dasd_device *device)
1437 {
1438 	struct list_head final_queue;
1439 
1440 	atomic_set (&device->tasklet_scheduled, 0);
1441 	INIT_LIST_HEAD(&final_queue);
1442 	spin_lock_irq(get_ccwdev_lock(device->cdev));
1443 	/* Check expire time of first request on the ccw queue. */
1444 	__dasd_device_check_expire(device);
1445 	/* find final requests on ccw queue */
1446 	__dasd_device_process_ccw_queue(device, &final_queue);
1447 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
1448 	/* Now call the callback function of requests with final status */
1449 	__dasd_device_process_final_queue(device, &final_queue);
1450 	spin_lock_irq(get_ccwdev_lock(device->cdev));
1451 	/* Now check if the head of the ccw queue needs to be started. */
1452 	__dasd_device_start_head(device);
1453 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
1454 	dasd_put_device(device);
1455 }
1456 
1457 /*
1458  * Schedules a call to dasd_tasklet over the device tasklet.
1459  */
1460 void dasd_schedule_device_bh(struct dasd_device *device)
1461 {
1462 	/* Protect against rescheduling. */
1463 	if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
1464 		return;
1465 	dasd_get_device(device);
1466 	tasklet_hi_schedule(&device->tasklet);
1467 }
1468 
1469 void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
1470 {
1471 	device->stopped |= bits;
1472 }
1473 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
1474 
1475 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
1476 {
1477 	device->stopped &= ~bits;
1478 	if (!device->stopped)
1479 		wake_up(&generic_waitq);
1480 }
1481 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
1482 
1483 /*
1484  * Queue a request to the head of the device ccw_queue.
1485  * Start the I/O if possible.
1486  */
1487 void dasd_add_request_head(struct dasd_ccw_req *cqr)
1488 {
1489 	struct dasd_device *device;
1490 	unsigned long flags;
1491 
1492 	device = cqr->startdev;
1493 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1494 	cqr->status = DASD_CQR_QUEUED;
1495 	list_add(&cqr->devlist, &device->ccw_queue);
1496 	/* let the bh start the request to keep them in order */
1497 	dasd_schedule_device_bh(device);
1498 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1499 }
1500 
1501 /*
1502  * Queue a request to the tail of the device ccw_queue.
1503  * Start the I/O if possible.
1504  */
1505 void dasd_add_request_tail(struct dasd_ccw_req *cqr)
1506 {
1507 	struct dasd_device *device;
1508 	unsigned long flags;
1509 
1510 	device = cqr->startdev;
1511 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1512 	cqr->status = DASD_CQR_QUEUED;
1513 	list_add_tail(&cqr->devlist, &device->ccw_queue);
1514 	/* let the bh start the request to keep them in order */
1515 	dasd_schedule_device_bh(device);
1516 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1517 }
1518 
1519 /*
1520  * Wakeup helper for the 'sleep_on' functions.
1521  */
1522 static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1523 {
1524 	spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
1525 	cqr->callback_data = DASD_SLEEPON_END_TAG;
1526 	spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
1527 	wake_up(&generic_waitq);
1528 }
1529 
1530 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
1531 {
1532 	struct dasd_device *device;
1533 	int rc;
1534 
1535 	device = cqr->startdev;
1536 	spin_lock_irq(get_ccwdev_lock(device->cdev));
1537 	rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
1538 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
1539 	return rc;
1540 }
1541 
1542 /*
1543  * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
1544  */
1545 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
1546 {
1547 	struct dasd_device *device;
1548 	dasd_erp_fn_t erp_fn;
1549 
1550 	if (cqr->status == DASD_CQR_FILLED)
1551 		return 0;
1552 	device = cqr->startdev;
1553 	if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
1554 		if (cqr->status == DASD_CQR_TERMINATED) {
1555 			device->discipline->handle_terminated_request(cqr);
1556 			return 1;
1557 		}
1558 		if (cqr->status == DASD_CQR_NEED_ERP) {
1559 			erp_fn = device->discipline->erp_action(cqr);
1560 			erp_fn(cqr);
1561 			return 1;
1562 		}
1563 		if (cqr->status == DASD_CQR_FAILED)
1564 			dasd_log_sense(cqr, &cqr->irb);
1565 		if (cqr->refers) {
1566 			__dasd_process_erp(device, cqr);
1567 			return 1;
1568 		}
1569 	}
1570 	return 0;
1571 }
1572 
1573 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
1574 {
1575 	if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
1576 		if (cqr->refers) /* erp is not done yet */
1577 			return 1;
1578 		return ((cqr->status != DASD_CQR_DONE) &&
1579 			(cqr->status != DASD_CQR_FAILED));
1580 	} else
1581 		return (cqr->status == DASD_CQR_FILLED);
1582 }
1583 
1584 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
1585 {
1586 	struct dasd_device *device;
1587 	int rc;
1588 	struct list_head ccw_queue;
1589 	struct dasd_ccw_req *cqr;
1590 
1591 	INIT_LIST_HEAD(&ccw_queue);
1592 	maincqr->status = DASD_CQR_FILLED;
1593 	device = maincqr->startdev;
1594 	list_add(&maincqr->blocklist, &ccw_queue);
1595 	for (cqr = maincqr;  __dasd_sleep_on_loop_condition(cqr);
1596 	     cqr = list_first_entry(&ccw_queue,
1597 				    struct dasd_ccw_req, blocklist)) {
1598 
1599 		if (__dasd_sleep_on_erp(cqr))
1600 			continue;
1601 		if (cqr->status != DASD_CQR_FILLED) /* could be failed */
1602 			continue;
1603 
1604 		/* Non-temporary stop condition will trigger fail fast */
1605 		if (device->stopped & ~DASD_STOPPED_PENDING &&
1606 		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1607 		    (!dasd_eer_enabled(device))) {
1608 			cqr->status = DASD_CQR_FAILED;
1609 			continue;
1610 		}
1611 
1612 		/* Don't try to start requests if device is stopped */
1613 		if (interruptible) {
1614 			rc = wait_event_interruptible(
1615 				generic_waitq, !(device->stopped));
1616 			if (rc == -ERESTARTSYS) {
1617 				cqr->status = DASD_CQR_FAILED;
1618 				maincqr->intrc = rc;
1619 				continue;
1620 			}
1621 		} else
1622 			wait_event(generic_waitq, !(device->stopped));
1623 
1624 		cqr->callback = dasd_wakeup_cb;
1625 		cqr->callback_data = DASD_SLEEPON_START_TAG;
1626 		dasd_add_request_tail(cqr);
1627 		if (interruptible) {
1628 			rc = wait_event_interruptible(
1629 				generic_waitq, _wait_for_wakeup(cqr));
1630 			if (rc == -ERESTARTSYS) {
1631 				dasd_cancel_req(cqr);
1632 				/* wait (non-interruptible) for final status */
1633 				wait_event(generic_waitq,
1634 					   _wait_for_wakeup(cqr));
1635 				cqr->status = DASD_CQR_FAILED;
1636 				maincqr->intrc = rc;
1637 				continue;
1638 			}
1639 		} else
1640 			wait_event(generic_waitq, _wait_for_wakeup(cqr));
1641 	}
1642 
1643 	maincqr->endclk = get_clock();
1644 	if ((maincqr->status != DASD_CQR_DONE) &&
1645 	    (maincqr->intrc != -ERESTARTSYS))
1646 		dasd_log_sense(maincqr, &maincqr->irb);
1647 	if (maincqr->status == DASD_CQR_DONE)
1648 		rc = 0;
1649 	else if (maincqr->intrc)
1650 		rc = maincqr->intrc;
1651 	else
1652 		rc = -EIO;
1653 	return rc;
1654 }
1655 
1656 /*
1657  * Queue a request to the tail of the device ccw_queue and wait for
1658  * it's completion.
1659  */
1660 int dasd_sleep_on(struct dasd_ccw_req *cqr)
1661 {
1662 	return _dasd_sleep_on(cqr, 0);
1663 }
1664 
1665 /*
1666  * Queue a request to the tail of the device ccw_queue and wait
1667  * interruptible for it's completion.
1668  */
1669 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
1670 {
1671 	return _dasd_sleep_on(cqr, 1);
1672 }
1673 
1674 /*
1675  * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
1676  * for eckd devices) the currently running request has to be terminated
1677  * and be put back to status queued, before the special request is added
1678  * to the head of the queue. Then the special request is waited on normally.
1679  */
1680 static inline int _dasd_term_running_cqr(struct dasd_device *device)
1681 {
1682 	struct dasd_ccw_req *cqr;
1683 
1684 	if (list_empty(&device->ccw_queue))
1685 		return 0;
1686 	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1687 	return device->discipline->term_IO(cqr);
1688 }
1689 
1690 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1691 {
1692 	struct dasd_device *device;
1693 	int rc;
1694 
1695 	device = cqr->startdev;
1696 	spin_lock_irq(get_ccwdev_lock(device->cdev));
1697 	rc = _dasd_term_running_cqr(device);
1698 	if (rc) {
1699 		spin_unlock_irq(get_ccwdev_lock(device->cdev));
1700 		return rc;
1701 	}
1702 
1703 	cqr->callback = dasd_wakeup_cb;
1704 	cqr->callback_data = DASD_SLEEPON_START_TAG;
1705 	cqr->status = DASD_CQR_QUEUED;
1706 	list_add(&cqr->devlist, &device->ccw_queue);
1707 
1708 	/* let the bh start the request to keep them in order */
1709 	dasd_schedule_device_bh(device);
1710 
1711 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
1712 
1713 	wait_event(generic_waitq, _wait_for_wakeup(cqr));
1714 
1715 	if (cqr->status == DASD_CQR_DONE)
1716 		rc = 0;
1717 	else if (cqr->intrc)
1718 		rc = cqr->intrc;
1719 	else
1720 		rc = -EIO;
1721 	return rc;
1722 }
1723 
1724 /*
1725  * Cancels a request that was started with dasd_sleep_on_req.
1726  * This is useful to timeout requests. The request will be
1727  * terminated if it is currently in i/o.
1728  * Returns 1 if the request has been terminated.
1729  *	   0 if there was no need to terminate the request (not started yet)
1730  *	   negative error code if termination failed
1731  * Cancellation of a request is an asynchronous operation! The calling
1732  * function has to wait until the request is properly returned via callback.
1733  */
1734 int dasd_cancel_req(struct dasd_ccw_req *cqr)
1735 {
1736 	struct dasd_device *device = cqr->startdev;
1737 	unsigned long flags;
1738 	int rc;
1739 
1740 	rc = 0;
1741 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1742 	switch (cqr->status) {
1743 	case DASD_CQR_QUEUED:
1744 		/* request was not started - just set to cleared */
1745 		cqr->status = DASD_CQR_CLEARED;
1746 		break;
1747 	case DASD_CQR_IN_IO:
1748 		/* request in IO - terminate IO and release again */
1749 		rc = device->discipline->term_IO(cqr);
1750 		if (rc) {
1751 			dev_err(&device->cdev->dev,
1752 				"Cancelling request %p failed with rc=%d\n",
1753 				cqr, rc);
1754 		} else {
1755 			cqr->stopclk = get_clock();
1756 		}
1757 		break;
1758 	default: /* already finished or clear pending - do nothing */
1759 		break;
1760 	}
1761 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1762 	dasd_schedule_device_bh(device);
1763 	return rc;
1764 }
1765 
1766 
1767 /*
1768  * SECTION: Operations of the dasd_block layer.
1769  */
1770 
1771 /*
1772  * Timeout function for dasd_block. This is used when the block layer
1773  * is waiting for something that may not come reliably, (e.g. a state
1774  * change interrupt)
1775  */
1776 static void dasd_block_timeout(unsigned long ptr)
1777 {
1778 	unsigned long flags;
1779 	struct dasd_block *block;
1780 
1781 	block = (struct dasd_block *) ptr;
1782 	spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
1783 	/* re-activate request queue */
1784 	dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
1785 	spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
1786 	dasd_schedule_block_bh(block);
1787 }
1788 
1789 /*
1790  * Setup timeout for a dasd_block in jiffies.
1791  */
1792 void dasd_block_set_timer(struct dasd_block *block, int expires)
1793 {
1794 	if (expires == 0)
1795 		del_timer(&block->timer);
1796 	else
1797 		mod_timer(&block->timer, jiffies + expires);
1798 }
1799 
1800 /*
1801  * Clear timeout for a dasd_block.
1802  */
1803 void dasd_block_clear_timer(struct dasd_block *block)
1804 {
1805 	del_timer(&block->timer);
1806 }
1807 
1808 /*
1809  * Process finished error recovery ccw.
1810  */
1811 static void __dasd_process_erp(struct dasd_device *device,
1812 			       struct dasd_ccw_req *cqr)
1813 {
1814 	dasd_erp_fn_t erp_fn;
1815 
1816 	if (cqr->status == DASD_CQR_DONE)
1817 		DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1818 	else
1819 		dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
1820 	erp_fn = device->discipline->erp_postaction(cqr);
1821 	erp_fn(cqr);
1822 }
1823 
1824 /*
1825  * Fetch requests from the block device queue.
1826  */
1827 static void __dasd_process_request_queue(struct dasd_block *block)
1828 {
1829 	struct request_queue *queue;
1830 	struct request *req;
1831 	struct dasd_ccw_req *cqr;
1832 	struct dasd_device *basedev;
1833 	unsigned long flags;
1834 	queue = block->request_queue;
1835 	basedev = block->base;
1836 	/* No queue ? Then there is nothing to do. */
1837 	if (queue == NULL)
1838 		return;
1839 
1840 	/*
1841 	 * We requeue request from the block device queue to the ccw
1842 	 * queue only in two states. In state DASD_STATE_READY the
1843 	 * partition detection is done and we need to requeue requests
1844 	 * for that. State DASD_STATE_ONLINE is normal block device
1845 	 * operation.
1846 	 */
1847 	if (basedev->state < DASD_STATE_READY) {
1848 		while ((req = blk_fetch_request(block->request_queue)))
1849 			__blk_end_request_all(req, -EIO);
1850 		return;
1851 	}
1852 	/* Now we try to fetch requests from the request queue */
1853 	while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
1854 		if (basedev->features & DASD_FEATURE_READONLY &&
1855 		    rq_data_dir(req) == WRITE) {
1856 			DBF_DEV_EVENT(DBF_ERR, basedev,
1857 				      "Rejecting write request %p",
1858 				      req);
1859 			blk_start_request(req);
1860 			__blk_end_request_all(req, -EIO);
1861 			continue;
1862 		}
1863 		cqr = basedev->discipline->build_cp(basedev, block, req);
1864 		if (IS_ERR(cqr)) {
1865 			if (PTR_ERR(cqr) == -EBUSY)
1866 				break;	/* normal end condition */
1867 			if (PTR_ERR(cqr) == -ENOMEM)
1868 				break;	/* terminate request queue loop */
1869 			if (PTR_ERR(cqr) == -EAGAIN) {
1870 				/*
1871 				 * The current request cannot be build right
1872 				 * now, we have to try later. If this request
1873 				 * is the head-of-queue we stop the device
1874 				 * for 1/2 second.
1875 				 */
1876 				if (!list_empty(&block->ccw_queue))
1877 					break;
1878 				spin_lock_irqsave(
1879 					get_ccwdev_lock(basedev->cdev), flags);
1880 				dasd_device_set_stop_bits(basedev,
1881 							  DASD_STOPPED_PENDING);
1882 				spin_unlock_irqrestore(
1883 					get_ccwdev_lock(basedev->cdev), flags);
1884 				dasd_block_set_timer(block, HZ/2);
1885 				break;
1886 			}
1887 			DBF_DEV_EVENT(DBF_ERR, basedev,
1888 				      "CCW creation failed (rc=%ld) "
1889 				      "on request %p",
1890 				      PTR_ERR(cqr), req);
1891 			blk_start_request(req);
1892 			__blk_end_request_all(req, -EIO);
1893 			continue;
1894 		}
1895 		/*
1896 		 *  Note: callback is set to dasd_return_cqr_cb in
1897 		 * __dasd_block_start_head to cover erp requests as well
1898 		 */
1899 		cqr->callback_data = (void *) req;
1900 		cqr->status = DASD_CQR_FILLED;
1901 		blk_start_request(req);
1902 		list_add_tail(&cqr->blocklist, &block->ccw_queue);
1903 		dasd_profile_start(block, cqr, req);
1904 	}
1905 }
1906 
1907 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1908 {
1909 	struct request *req;
1910 	int status;
1911 	int error = 0;
1912 
1913 	req = (struct request *) cqr->callback_data;
1914 	dasd_profile_end(cqr->block, cqr, req);
1915 	status = cqr->block->base->discipline->free_cp(cqr, req);
1916 	if (status <= 0)
1917 		error = status ? status : -EIO;
1918 	__blk_end_request_all(req, error);
1919 }
1920 
1921 /*
1922  * Process ccw request queue.
1923  */
1924 static void __dasd_process_block_ccw_queue(struct dasd_block *block,
1925 					   struct list_head *final_queue)
1926 {
1927 	struct list_head *l, *n;
1928 	struct dasd_ccw_req *cqr;
1929 	dasd_erp_fn_t erp_fn;
1930 	unsigned long flags;
1931 	struct dasd_device *base = block->base;
1932 
1933 restart:
1934 	/* Process request with final status. */
1935 	list_for_each_safe(l, n, &block->ccw_queue) {
1936 		cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1937 		if (cqr->status != DASD_CQR_DONE &&
1938 		    cqr->status != DASD_CQR_FAILED &&
1939 		    cqr->status != DASD_CQR_NEED_ERP &&
1940 		    cqr->status != DASD_CQR_TERMINATED)
1941 			continue;
1942 
1943 		if (cqr->status == DASD_CQR_TERMINATED) {
1944 			base->discipline->handle_terminated_request(cqr);
1945 			goto restart;
1946 		}
1947 
1948 		/*  Process requests that may be recovered */
1949 		if (cqr->status == DASD_CQR_NEED_ERP) {
1950 			erp_fn = base->discipline->erp_action(cqr);
1951 			if (IS_ERR(erp_fn(cqr)))
1952 				continue;
1953 			goto restart;
1954 		}
1955 
1956 		/* log sense for fatal error */
1957 		if (cqr->status == DASD_CQR_FAILED) {
1958 			dasd_log_sense(cqr, &cqr->irb);
1959 		}
1960 
1961 		/* First of all call extended error reporting. */
1962 		if (dasd_eer_enabled(base) &&
1963 		    cqr->status == DASD_CQR_FAILED) {
1964 			dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
1965 
1966 			/* restart request  */
1967 			cqr->status = DASD_CQR_FILLED;
1968 			cqr->retries = 255;
1969 			spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
1970 			dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
1971 			spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
1972 					       flags);
1973 			goto restart;
1974 		}
1975 
1976 		/* Process finished ERP request. */
1977 		if (cqr->refers) {
1978 			__dasd_process_erp(base, cqr);
1979 			goto restart;
1980 		}
1981 
1982 		/* Rechain finished requests to final queue */
1983 		cqr->endclk = get_clock();
1984 		list_move_tail(&cqr->blocklist, final_queue);
1985 	}
1986 }
1987 
1988 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
1989 {
1990 	dasd_schedule_block_bh(cqr->block);
1991 }
1992 
1993 static void __dasd_block_start_head(struct dasd_block *block)
1994 {
1995 	struct dasd_ccw_req *cqr;
1996 
1997 	if (list_empty(&block->ccw_queue))
1998 		return;
1999 	/* We allways begin with the first requests on the queue, as some
2000 	 * of previously started requests have to be enqueued on a
2001 	 * dasd_device again for error recovery.
2002 	 */
2003 	list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
2004 		if (cqr->status != DASD_CQR_FILLED)
2005 			continue;
2006 		/* Non-temporary stop condition will trigger fail fast */
2007 		if (block->base->stopped & ~DASD_STOPPED_PENDING &&
2008 		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2009 		    (!dasd_eer_enabled(block->base))) {
2010 			cqr->status = DASD_CQR_FAILED;
2011 			dasd_schedule_block_bh(block);
2012 			continue;
2013 		}
2014 		/* Don't try to start requests if device is stopped */
2015 		if (block->base->stopped)
2016 			return;
2017 
2018 		/* just a fail safe check, should not happen */
2019 		if (!cqr->startdev)
2020 			cqr->startdev = block->base;
2021 
2022 		/* make sure that the requests we submit find their way back */
2023 		cqr->callback = dasd_return_cqr_cb;
2024 
2025 		dasd_add_request_tail(cqr);
2026 	}
2027 }
2028 
2029 /*
2030  * Central dasd_block layer routine. Takes requests from the generic
2031  * block layer request queue, creates ccw requests, enqueues them on
2032  * a dasd_device and processes ccw requests that have been returned.
2033  */
2034 static void dasd_block_tasklet(struct dasd_block *block)
2035 {
2036 	struct list_head final_queue;
2037 	struct list_head *l, *n;
2038 	struct dasd_ccw_req *cqr;
2039 
2040 	atomic_set(&block->tasklet_scheduled, 0);
2041 	INIT_LIST_HEAD(&final_queue);
2042 	spin_lock(&block->queue_lock);
2043 	/* Finish off requests on ccw queue */
2044 	__dasd_process_block_ccw_queue(block, &final_queue);
2045 	spin_unlock(&block->queue_lock);
2046 	/* Now call the callback function of requests with final status */
2047 	spin_lock_irq(&block->request_queue_lock);
2048 	list_for_each_safe(l, n, &final_queue) {
2049 		cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2050 		list_del_init(&cqr->blocklist);
2051 		__dasd_cleanup_cqr(cqr);
2052 	}
2053 	spin_lock(&block->queue_lock);
2054 	/* Get new request from the block device request queue */
2055 	__dasd_process_request_queue(block);
2056 	/* Now check if the head of the ccw queue needs to be started. */
2057 	__dasd_block_start_head(block);
2058 	spin_unlock(&block->queue_lock);
2059 	spin_unlock_irq(&block->request_queue_lock);
2060 	dasd_put_device(block->base);
2061 }
2062 
2063 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
2064 {
2065 	wake_up(&dasd_flush_wq);
2066 }
2067 
2068 /*
2069  * Go through all request on the dasd_block request queue, cancel them
2070  * on the respective dasd_device, and return them to the generic
2071  * block layer.
2072  */
2073 static int dasd_flush_block_queue(struct dasd_block *block)
2074 {
2075 	struct dasd_ccw_req *cqr, *n;
2076 	int rc, i;
2077 	struct list_head flush_queue;
2078 
2079 	INIT_LIST_HEAD(&flush_queue);
2080 	spin_lock_bh(&block->queue_lock);
2081 	rc = 0;
2082 restart:
2083 	list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
2084 		/* if this request currently owned by a dasd_device cancel it */
2085 		if (cqr->status >= DASD_CQR_QUEUED)
2086 			rc = dasd_cancel_req(cqr);
2087 		if (rc < 0)
2088 			break;
2089 		/* Rechain request (including erp chain) so it won't be
2090 		 * touched by the dasd_block_tasklet anymore.
2091 		 * Replace the callback so we notice when the request
2092 		 * is returned from the dasd_device layer.
2093 		 */
2094 		cqr->callback = _dasd_wake_block_flush_cb;
2095 		for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
2096 			list_move_tail(&cqr->blocklist, &flush_queue);
2097 		if (i > 1)
2098 			/* moved more than one request - need to restart */
2099 			goto restart;
2100 	}
2101 	spin_unlock_bh(&block->queue_lock);
2102 	/* Now call the callback function of flushed requests */
2103 restart_cb:
2104 	list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
2105 		wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
2106 		/* Process finished ERP request. */
2107 		if (cqr->refers) {
2108 			spin_lock_bh(&block->queue_lock);
2109 			__dasd_process_erp(block->base, cqr);
2110 			spin_unlock_bh(&block->queue_lock);
2111 			/* restart list_for_xx loop since dasd_process_erp
2112 			 * might remove multiple elements */
2113 			goto restart_cb;
2114 		}
2115 		/* call the callback function */
2116 		spin_lock_irq(&block->request_queue_lock);
2117 		cqr->endclk = get_clock();
2118 		list_del_init(&cqr->blocklist);
2119 		__dasd_cleanup_cqr(cqr);
2120 		spin_unlock_irq(&block->request_queue_lock);
2121 	}
2122 	return rc;
2123 }
2124 
2125 /*
2126  * Schedules a call to dasd_tasklet over the device tasklet.
2127  */
2128 void dasd_schedule_block_bh(struct dasd_block *block)
2129 {
2130 	/* Protect against rescheduling. */
2131 	if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
2132 		return;
2133 	/* life cycle of block is bound to it's base device */
2134 	dasd_get_device(block->base);
2135 	tasklet_hi_schedule(&block->tasklet);
2136 }
2137 
2138 
2139 /*
2140  * SECTION: external block device operations
2141  * (request queue handling, open, release, etc.)
2142  */
2143 
2144 /*
2145  * Dasd request queue function. Called from ll_rw_blk.c
2146  */
2147 static void do_dasd_request(struct request_queue *queue)
2148 {
2149 	struct dasd_block *block;
2150 
2151 	block = queue->queuedata;
2152 	spin_lock(&block->queue_lock);
2153 	/* Get new request from the block device request queue */
2154 	__dasd_process_request_queue(block);
2155 	/* Now check if the head of the ccw queue needs to be started. */
2156 	__dasd_block_start_head(block);
2157 	spin_unlock(&block->queue_lock);
2158 }
2159 
2160 /*
2161  * Allocate and initialize request queue and default I/O scheduler.
2162  */
2163 static int dasd_alloc_queue(struct dasd_block *block)
2164 {
2165 	int rc;
2166 
2167 	block->request_queue = blk_init_queue(do_dasd_request,
2168 					       &block->request_queue_lock);
2169 	if (block->request_queue == NULL)
2170 		return -ENOMEM;
2171 
2172 	block->request_queue->queuedata = block;
2173 
2174 	elevator_exit(block->request_queue->elevator);
2175 	block->request_queue->elevator = NULL;
2176 	rc = elevator_init(block->request_queue, "deadline");
2177 	if (rc) {
2178 		blk_cleanup_queue(block->request_queue);
2179 		return rc;
2180 	}
2181 	return 0;
2182 }
2183 
2184 /*
2185  * Allocate and initialize request queue.
2186  */
2187 static void dasd_setup_queue(struct dasd_block *block)
2188 {
2189 	int max;
2190 
2191 	blk_queue_logical_block_size(block->request_queue, block->bp_block);
2192 	max = block->base->discipline->max_blocks << block->s2b_shift;
2193 	blk_queue_max_hw_sectors(block->request_queue, max);
2194 	blk_queue_max_segments(block->request_queue, -1L);
2195 	/* with page sized segments we can translate each segement into
2196 	 * one idaw/tidaw
2197 	 */
2198 	blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
2199 	blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
2200 	blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN);
2201 }
2202 
2203 /*
2204  * Deactivate and free request queue.
2205  */
2206 static void dasd_free_queue(struct dasd_block *block)
2207 {
2208 	if (block->request_queue) {
2209 		blk_cleanup_queue(block->request_queue);
2210 		block->request_queue = NULL;
2211 	}
2212 }
2213 
2214 /*
2215  * Flush request on the request queue.
2216  */
2217 static void dasd_flush_request_queue(struct dasd_block *block)
2218 {
2219 	struct request *req;
2220 
2221 	if (!block->request_queue)
2222 		return;
2223 
2224 	spin_lock_irq(&block->request_queue_lock);
2225 	while ((req = blk_fetch_request(block->request_queue)))
2226 		__blk_end_request_all(req, -EIO);
2227 	spin_unlock_irq(&block->request_queue_lock);
2228 }
2229 
2230 static int dasd_open(struct block_device *bdev, fmode_t mode)
2231 {
2232 	struct dasd_block *block = bdev->bd_disk->private_data;
2233 	struct dasd_device *base;
2234 	int rc;
2235 
2236 	if (!block)
2237 		return -ENODEV;
2238 
2239 	lock_kernel();
2240 	base = block->base;
2241 	atomic_inc(&block->open_count);
2242 	if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
2243 		rc = -ENODEV;
2244 		goto unlock;
2245 	}
2246 
2247 	if (!try_module_get(base->discipline->owner)) {
2248 		rc = -EINVAL;
2249 		goto unlock;
2250 	}
2251 
2252 	if (dasd_probeonly) {
2253 		dev_info(&base->cdev->dev,
2254 			 "Accessing the DASD failed because it is in "
2255 			 "probeonly mode\n");
2256 		rc = -EPERM;
2257 		goto out;
2258 	}
2259 
2260 	if (base->state <= DASD_STATE_BASIC) {
2261 		DBF_DEV_EVENT(DBF_ERR, base, " %s",
2262 			      " Cannot open unrecognized device");
2263 		rc = -ENODEV;
2264 		goto out;
2265 	}
2266 
2267 	if ((mode & FMODE_WRITE) &&
2268 	    (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
2269 	     (base->features & DASD_FEATURE_READONLY))) {
2270 		rc = -EROFS;
2271 		goto out;
2272 	}
2273 
2274 	unlock_kernel();
2275 	return 0;
2276 
2277 out:
2278 	module_put(base->discipline->owner);
2279 unlock:
2280 	atomic_dec(&block->open_count);
2281 	unlock_kernel();
2282 	return rc;
2283 }
2284 
2285 static int dasd_release(struct gendisk *disk, fmode_t mode)
2286 {
2287 	struct dasd_block *block = disk->private_data;
2288 
2289 	lock_kernel();
2290 	atomic_dec(&block->open_count);
2291 	module_put(block->base->discipline->owner);
2292 	unlock_kernel();
2293 	return 0;
2294 }
2295 
2296 /*
2297  * Return disk geometry.
2298  */
2299 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
2300 {
2301 	struct dasd_block *block;
2302 	struct dasd_device *base;
2303 
2304 	block = bdev->bd_disk->private_data;
2305 	if (!block)
2306 		return -ENODEV;
2307 	base = block->base;
2308 
2309 	if (!base->discipline ||
2310 	    !base->discipline->fill_geometry)
2311 		return -EINVAL;
2312 
2313 	base->discipline->fill_geometry(block, geo);
2314 	geo->start = get_start_sect(bdev) >> block->s2b_shift;
2315 	return 0;
2316 }
2317 
2318 const struct block_device_operations
2319 dasd_device_operations = {
2320 	.owner		= THIS_MODULE,
2321 	.open		= dasd_open,
2322 	.release	= dasd_release,
2323 	.ioctl		= dasd_ioctl,
2324 	.compat_ioctl	= dasd_ioctl,
2325 	.getgeo		= dasd_getgeo,
2326 };
2327 
2328 /*******************************************************************************
2329  * end of block device operations
2330  */
2331 
2332 static void
2333 dasd_exit(void)
2334 {
2335 #ifdef CONFIG_PROC_FS
2336 	dasd_proc_exit();
2337 #endif
2338 	dasd_eer_exit();
2339         if (dasd_page_cache != NULL) {
2340 		kmem_cache_destroy(dasd_page_cache);
2341 		dasd_page_cache = NULL;
2342 	}
2343 	dasd_gendisk_exit();
2344 	dasd_devmap_exit();
2345 	if (dasd_debug_area != NULL) {
2346 		debug_unregister(dasd_debug_area);
2347 		dasd_debug_area = NULL;
2348 	}
2349 }
2350 
2351 /*
2352  * SECTION: common functions for ccw_driver use
2353  */
2354 
2355 /*
2356  * Is the device read-only?
2357  * Note that this function does not report the setting of the
2358  * readonly device attribute, but how it is configured in z/VM.
2359  */
2360 int dasd_device_is_ro(struct dasd_device *device)
2361 {
2362 	struct ccw_dev_id dev_id;
2363 	struct diag210 diag_data;
2364 	int rc;
2365 
2366 	if (!MACHINE_IS_VM)
2367 		return 0;
2368 	ccw_device_get_id(device->cdev, &dev_id);
2369 	memset(&diag_data, 0, sizeof(diag_data));
2370 	diag_data.vrdcdvno = dev_id.devno;
2371 	diag_data.vrdclen = sizeof(diag_data);
2372 	rc = diag210(&diag_data);
2373 	if (rc == 0 || rc == 2) {
2374 		return diag_data.vrdcvfla & 0x80;
2375 	} else {
2376 		DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
2377 			  dev_id.devno, rc);
2378 		return 0;
2379 	}
2380 }
2381 EXPORT_SYMBOL_GPL(dasd_device_is_ro);
2382 
2383 static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
2384 {
2385 	struct ccw_device *cdev = data;
2386 	int ret;
2387 
2388 	ret = ccw_device_set_online(cdev);
2389 	if (ret)
2390 		pr_warning("%s: Setting the DASD online failed with rc=%d\n",
2391 			   dev_name(&cdev->dev), ret);
2392 }
2393 
2394 /*
2395  * Initial attempt at a probe function. this can be simplified once
2396  * the other detection code is gone.
2397  */
2398 int dasd_generic_probe(struct ccw_device *cdev,
2399 		       struct dasd_discipline *discipline)
2400 {
2401 	int ret;
2402 
2403 	ret = dasd_add_sysfs_files(cdev);
2404 	if (ret) {
2405 		DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
2406 				"dasd_generic_probe: could not add "
2407 				"sysfs entries");
2408 		return ret;
2409 	}
2410 	cdev->handler = &dasd_int_handler;
2411 
2412 	/*
2413 	 * Automatically online either all dasd devices (dasd_autodetect)
2414 	 * or all devices specified with dasd= parameters during
2415 	 * initial probe.
2416 	 */
2417 	if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
2418 	    (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
2419 		async_schedule(dasd_generic_auto_online, cdev);
2420 	return 0;
2421 }
2422 
2423 /*
2424  * This will one day be called from a global not_oper handler.
2425  * It is also used by driver_unregister during module unload.
2426  */
2427 void dasd_generic_remove(struct ccw_device *cdev)
2428 {
2429 	struct dasd_device *device;
2430 	struct dasd_block *block;
2431 
2432 	cdev->handler = NULL;
2433 
2434 	dasd_remove_sysfs_files(cdev);
2435 	device = dasd_device_from_cdev(cdev);
2436 	if (IS_ERR(device))
2437 		return;
2438 	if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2439 		/* Already doing offline processing */
2440 		dasd_put_device(device);
2441 		return;
2442 	}
2443 	/*
2444 	 * This device is removed unconditionally. Set offline
2445 	 * flag to prevent dasd_open from opening it while it is
2446 	 * no quite down yet.
2447 	 */
2448 	dasd_set_target_state(device, DASD_STATE_NEW);
2449 	/* dasd_delete_device destroys the device reference. */
2450 	block = device->block;
2451 	device->block = NULL;
2452 	dasd_delete_device(device);
2453 	/*
2454 	 * life cycle of block is bound to device, so delete it after
2455 	 * device was safely removed
2456 	 */
2457 	if (block)
2458 		dasd_free_block(block);
2459 }
2460 
2461 /*
2462  * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
2463  * the device is detected for the first time and is supposed to be used
2464  * or the user has started activation through sysfs.
2465  */
2466 int dasd_generic_set_online(struct ccw_device *cdev,
2467 			    struct dasd_discipline *base_discipline)
2468 {
2469 	struct dasd_discipline *discipline;
2470 	struct dasd_device *device;
2471 	int rc;
2472 
2473 	/* first online clears initial online feature flag */
2474 	dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
2475 	device = dasd_create_device(cdev);
2476 	if (IS_ERR(device))
2477 		return PTR_ERR(device);
2478 
2479 	discipline = base_discipline;
2480 	if (device->features & DASD_FEATURE_USEDIAG) {
2481 	  	if (!dasd_diag_discipline_pointer) {
2482 			pr_warning("%s Setting the DASD online failed because "
2483 				   "of missing DIAG discipline\n",
2484 				   dev_name(&cdev->dev));
2485 			dasd_delete_device(device);
2486 			return -ENODEV;
2487 		}
2488 		discipline = dasd_diag_discipline_pointer;
2489 	}
2490 	if (!try_module_get(base_discipline->owner)) {
2491 		dasd_delete_device(device);
2492 		return -EINVAL;
2493 	}
2494 	if (!try_module_get(discipline->owner)) {
2495 		module_put(base_discipline->owner);
2496 		dasd_delete_device(device);
2497 		return -EINVAL;
2498 	}
2499 	device->base_discipline = base_discipline;
2500 	device->discipline = discipline;
2501 
2502 	/* check_device will allocate block device if necessary */
2503 	rc = discipline->check_device(device);
2504 	if (rc) {
2505 		pr_warning("%s Setting the DASD online with discipline %s "
2506 			   "failed with rc=%i\n",
2507 			   dev_name(&cdev->dev), discipline->name, rc);
2508 		module_put(discipline->owner);
2509 		module_put(base_discipline->owner);
2510 		dasd_delete_device(device);
2511 		return rc;
2512 	}
2513 
2514 	dasd_set_target_state(device, DASD_STATE_ONLINE);
2515 	if (device->state <= DASD_STATE_KNOWN) {
2516 		pr_warning("%s Setting the DASD online failed because of a "
2517 			   "missing discipline\n", dev_name(&cdev->dev));
2518 		rc = -ENODEV;
2519 		dasd_set_target_state(device, DASD_STATE_NEW);
2520 		if (device->block)
2521 			dasd_free_block(device->block);
2522 		dasd_delete_device(device);
2523 	} else
2524 		pr_debug("dasd_generic device %s found\n",
2525 				dev_name(&cdev->dev));
2526 
2527 	wait_event(dasd_init_waitq, _wait_for_device(device));
2528 
2529 	dasd_put_device(device);
2530 	return rc;
2531 }
2532 
2533 int dasd_generic_set_offline(struct ccw_device *cdev)
2534 {
2535 	struct dasd_device *device;
2536 	struct dasd_block *block;
2537 	int max_count, open_count;
2538 
2539 	device = dasd_device_from_cdev(cdev);
2540 	if (IS_ERR(device))
2541 		return PTR_ERR(device);
2542 	if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2543 		/* Already doing offline processing */
2544 		dasd_put_device(device);
2545 		return 0;
2546 	}
2547 	/*
2548 	 * We must make sure that this device is currently not in use.
2549 	 * The open_count is increased for every opener, that includes
2550 	 * the blkdev_get in dasd_scan_partitions. We are only interested
2551 	 * in the other openers.
2552 	 */
2553 	if (device->block) {
2554 		max_count = device->block->bdev ? 0 : -1;
2555 		open_count = atomic_read(&device->block->open_count);
2556 		if (open_count > max_count) {
2557 			if (open_count > 0)
2558 				pr_warning("%s: The DASD cannot be set offline "
2559 					   "with open count %i\n",
2560 					   dev_name(&cdev->dev), open_count);
2561 			else
2562 				pr_warning("%s: The DASD cannot be set offline "
2563 					   "while it is in use\n",
2564 					   dev_name(&cdev->dev));
2565 			clear_bit(DASD_FLAG_OFFLINE, &device->flags);
2566 			dasd_put_device(device);
2567 			return -EBUSY;
2568 		}
2569 	}
2570 	dasd_set_target_state(device, DASD_STATE_NEW);
2571 	/* dasd_delete_device destroys the device reference. */
2572 	block = device->block;
2573 	device->block = NULL;
2574 	dasd_delete_device(device);
2575 	/*
2576 	 * life cycle of block is bound to device, so delete it after
2577 	 * device was safely removed
2578 	 */
2579 	if (block)
2580 		dasd_free_block(block);
2581 	return 0;
2582 }
2583 
2584 int dasd_generic_notify(struct ccw_device *cdev, int event)
2585 {
2586 	struct dasd_device *device;
2587 	struct dasd_ccw_req *cqr;
2588 	int ret;
2589 
2590 	device = dasd_device_from_cdev_locked(cdev);
2591 	if (IS_ERR(device))
2592 		return 0;
2593 	ret = 0;
2594 	switch (event) {
2595 	case CIO_GONE:
2596 	case CIO_BOXED:
2597 	case CIO_NO_PATH:
2598 		/* First of all call extended error reporting. */
2599 		dasd_eer_write(device, NULL, DASD_EER_NOPATH);
2600 
2601 		if (device->state < DASD_STATE_BASIC)
2602 			break;
2603 		/* Device is active. We want to keep it. */
2604 		list_for_each_entry(cqr, &device->ccw_queue, devlist)
2605 			if (cqr->status == DASD_CQR_IN_IO) {
2606 				cqr->status = DASD_CQR_QUEUED;
2607 				cqr->retries++;
2608 			}
2609 		dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
2610 		dasd_device_clear_timer(device);
2611 		dasd_schedule_device_bh(device);
2612 		ret = 1;
2613 		break;
2614 	case CIO_OPER:
2615 		/* FIXME: add a sanity check. */
2616 		dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
2617 		if (device->stopped & DASD_UNRESUMED_PM) {
2618 			dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
2619 			dasd_restore_device(device);
2620 			ret = 1;
2621 			break;
2622 		}
2623 		dasd_schedule_device_bh(device);
2624 		if (device->block)
2625 			dasd_schedule_block_bh(device->block);
2626 		ret = 1;
2627 		break;
2628 	}
2629 	dasd_put_device(device);
2630 	return ret;
2631 }
2632 
2633 int dasd_generic_pm_freeze(struct ccw_device *cdev)
2634 {
2635 	struct dasd_ccw_req *cqr, *n;
2636 	int rc;
2637 	struct list_head freeze_queue;
2638 	struct dasd_device *device = dasd_device_from_cdev(cdev);
2639 
2640 	if (IS_ERR(device))
2641 		return PTR_ERR(device);
2642 	/* disallow new I/O  */
2643 	dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
2644 	/* clear active requests */
2645 	INIT_LIST_HEAD(&freeze_queue);
2646 	spin_lock_irq(get_ccwdev_lock(cdev));
2647 	rc = 0;
2648 	list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
2649 		/* Check status and move request to flush_queue */
2650 		if (cqr->status == DASD_CQR_IN_IO) {
2651 			rc = device->discipline->term_IO(cqr);
2652 			if (rc) {
2653 				/* unable to terminate requeust */
2654 				dev_err(&device->cdev->dev,
2655 					"Unable to terminate request %p "
2656 					"on suspend\n", cqr);
2657 				spin_unlock_irq(get_ccwdev_lock(cdev));
2658 				dasd_put_device(device);
2659 				return rc;
2660 			}
2661 		}
2662 		list_move_tail(&cqr->devlist, &freeze_queue);
2663 	}
2664 
2665 	spin_unlock_irq(get_ccwdev_lock(cdev));
2666 
2667 	list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
2668 		wait_event(dasd_flush_wq,
2669 			   (cqr->status != DASD_CQR_CLEAR_PENDING));
2670 		if (cqr->status == DASD_CQR_CLEARED)
2671 			cqr->status = DASD_CQR_QUEUED;
2672 	}
2673 	/* move freeze_queue to start of the ccw_queue */
2674 	spin_lock_irq(get_ccwdev_lock(cdev));
2675 	list_splice_tail(&freeze_queue, &device->ccw_queue);
2676 	spin_unlock_irq(get_ccwdev_lock(cdev));
2677 
2678 	if (device->discipline->freeze)
2679 		rc = device->discipline->freeze(device);
2680 
2681 	dasd_put_device(device);
2682 	return rc;
2683 }
2684 EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
2685 
2686 int dasd_generic_restore_device(struct ccw_device *cdev)
2687 {
2688 	struct dasd_device *device = dasd_device_from_cdev(cdev);
2689 	int rc = 0;
2690 
2691 	if (IS_ERR(device))
2692 		return PTR_ERR(device);
2693 
2694 	/* allow new IO again */
2695 	dasd_device_remove_stop_bits(device,
2696 				     (DASD_STOPPED_PM | DASD_UNRESUMED_PM));
2697 
2698 	dasd_schedule_device_bh(device);
2699 
2700 	/*
2701 	 * call discipline restore function
2702 	 * if device is stopped do nothing e.g. for disconnected devices
2703 	 */
2704 	if (device->discipline->restore && !(device->stopped))
2705 		rc = device->discipline->restore(device);
2706 	if (rc || device->stopped)
2707 		/*
2708 		 * if the resume failed for the DASD we put it in
2709 		 * an UNRESUMED stop state
2710 		 */
2711 		device->stopped |= DASD_UNRESUMED_PM;
2712 
2713 	if (device->block)
2714 		dasd_schedule_block_bh(device->block);
2715 
2716 	dasd_put_device(device);
2717 	return 0;
2718 }
2719 EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
2720 
2721 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2722 						   void *rdc_buffer,
2723 						   int rdc_buffer_size,
2724 						   int magic)
2725 {
2726 	struct dasd_ccw_req *cqr;
2727 	struct ccw1 *ccw;
2728 	unsigned long *idaw;
2729 
2730 	cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
2731 
2732 	if (IS_ERR(cqr)) {
2733 		/* internal error 13 - Allocating the RDC request failed*/
2734 		dev_err(&device->cdev->dev,
2735 			 "An error occurred in the DASD device driver, "
2736 			 "reason=%s\n", "13");
2737 		return cqr;
2738 	}
2739 
2740 	ccw = cqr->cpaddr;
2741 	ccw->cmd_code = CCW_CMD_RDC;
2742 	if (idal_is_needed(rdc_buffer, rdc_buffer_size)) {
2743 		idaw = (unsigned long *) (cqr->data);
2744 		ccw->cda = (__u32)(addr_t) idaw;
2745 		ccw->flags = CCW_FLAG_IDA;
2746 		idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size);
2747 	} else {
2748 		ccw->cda = (__u32)(addr_t) rdc_buffer;
2749 		ccw->flags = 0;
2750 	}
2751 
2752 	ccw->count = rdc_buffer_size;
2753 	cqr->startdev = device;
2754 	cqr->memdev = device;
2755 	cqr->expires = 10*HZ;
2756 	cqr->retries = 256;
2757 	cqr->buildclk = get_clock();
2758 	cqr->status = DASD_CQR_FILLED;
2759 	return cqr;
2760 }
2761 
2762 
2763 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
2764 				void *rdc_buffer, int rdc_buffer_size)
2765 {
2766 	int ret;
2767 	struct dasd_ccw_req *cqr;
2768 
2769 	cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size,
2770 				     magic);
2771 	if (IS_ERR(cqr))
2772 		return PTR_ERR(cqr);
2773 
2774 	ret = dasd_sleep_on(cqr);
2775 	dasd_sfree_request(cqr, cqr->memdev);
2776 	return ret;
2777 }
2778 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
2779 
2780 /*
2781  *   In command mode and transport mode we need to look for sense
2782  *   data in different places. The sense data itself is allways
2783  *   an array of 32 bytes, so we can unify the sense data access
2784  *   for both modes.
2785  */
2786 char *dasd_get_sense(struct irb *irb)
2787 {
2788 	struct tsb *tsb = NULL;
2789 	char *sense = NULL;
2790 
2791 	if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
2792 		if (irb->scsw.tm.tcw)
2793 			tsb = tcw_get_tsb((struct tcw *)(unsigned long)
2794 					  irb->scsw.tm.tcw);
2795 		if (tsb && tsb->length == 64 && tsb->flags)
2796 			switch (tsb->flags & 0x07) {
2797 			case 1:	/* tsa_iostat */
2798 				sense = tsb->tsa.iostat.sense;
2799 				break;
2800 			case 2: /* tsa_ddpc */
2801 				sense = tsb->tsa.ddpc.sense;
2802 				break;
2803 			default:
2804 				/* currently we don't use interrogate data */
2805 				break;
2806 			}
2807 	} else if (irb->esw.esw0.erw.cons) {
2808 		sense = irb->ecw;
2809 	}
2810 	return sense;
2811 }
2812 EXPORT_SYMBOL_GPL(dasd_get_sense);
2813 
2814 static int __init dasd_init(void)
2815 {
2816 	int rc;
2817 
2818 	init_waitqueue_head(&dasd_init_waitq);
2819 	init_waitqueue_head(&dasd_flush_wq);
2820 	init_waitqueue_head(&generic_waitq);
2821 
2822 	/* register 'common' DASD debug area, used for all DBF_XXX calls */
2823 	dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
2824 	if (dasd_debug_area == NULL) {
2825 		rc = -ENOMEM;
2826 		goto failed;
2827 	}
2828 	debug_register_view(dasd_debug_area, &debug_sprintf_view);
2829 	debug_set_level(dasd_debug_area, DBF_WARNING);
2830 
2831 	DBF_EVENT(DBF_EMERG, "%s", "debug area created");
2832 
2833 	dasd_diag_discipline_pointer = NULL;
2834 
2835 	rc = dasd_devmap_init();
2836 	if (rc)
2837 		goto failed;
2838 	rc = dasd_gendisk_init();
2839 	if (rc)
2840 		goto failed;
2841 	rc = dasd_parse();
2842 	if (rc)
2843 		goto failed;
2844 	rc = dasd_eer_init();
2845 	if (rc)
2846 		goto failed;
2847 #ifdef CONFIG_PROC_FS
2848 	rc = dasd_proc_init();
2849 	if (rc)
2850 		goto failed;
2851 #endif
2852 
2853 	return 0;
2854 failed:
2855 	pr_info("The DASD device driver could not be initialized\n");
2856 	dasd_exit();
2857 	return rc;
2858 }
2859 
2860 module_init(dasd_init);
2861 module_exit(dasd_exit);
2862 
2863 EXPORT_SYMBOL(dasd_debug_area);
2864 EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2865 
2866 EXPORT_SYMBOL(dasd_add_request_head);
2867 EXPORT_SYMBOL(dasd_add_request_tail);
2868 EXPORT_SYMBOL(dasd_cancel_req);
2869 EXPORT_SYMBOL(dasd_device_clear_timer);
2870 EXPORT_SYMBOL(dasd_block_clear_timer);
2871 EXPORT_SYMBOL(dasd_enable_device);
2872 EXPORT_SYMBOL(dasd_int_handler);
2873 EXPORT_SYMBOL(dasd_kfree_request);
2874 EXPORT_SYMBOL(dasd_kick_device);
2875 EXPORT_SYMBOL(dasd_kmalloc_request);
2876 EXPORT_SYMBOL(dasd_schedule_device_bh);
2877 EXPORT_SYMBOL(dasd_schedule_block_bh);
2878 EXPORT_SYMBOL(dasd_set_target_state);
2879 EXPORT_SYMBOL(dasd_device_set_timer);
2880 EXPORT_SYMBOL(dasd_block_set_timer);
2881 EXPORT_SYMBOL(dasd_sfree_request);
2882 EXPORT_SYMBOL(dasd_sleep_on);
2883 EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2884 EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2885 EXPORT_SYMBOL(dasd_smalloc_request);
2886 EXPORT_SYMBOL(dasd_start_IO);
2887 EXPORT_SYMBOL(dasd_term_IO);
2888 
2889 EXPORT_SYMBOL_GPL(dasd_generic_probe);
2890 EXPORT_SYMBOL_GPL(dasd_generic_remove);
2891 EXPORT_SYMBOL_GPL(dasd_generic_notify);
2892 EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2893 EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
2894 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
2895 EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2896 EXPORT_SYMBOL_GPL(dasd_alloc_block);
2897 EXPORT_SYMBOL_GPL(dasd_free_block);
2898