xref: /linux/drivers/s390/block/dasd.c (revision cc25df3e2e22a956d3a0d427369367b4a901d203)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4  *		    Horst Hummel <Horst.Hummel@de.ibm.com>
5  *		    Carsten Otte <Cotte@de.ibm.com>
6  *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
7  * Bugreports.to..: <Linux390@de.ibm.com>
8  * Copyright IBM Corp. 1999, 2009
9  */
10 
11 #include <linux/export.h>
12 #include <linux/kmod.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/ctype.h>
16 #include <linux/major.h>
17 #include <linux/slab.h>
18 #include <linux/hdreg.h>
19 #include <linux/async.h>
20 #include <linux/mutex.h>
21 #include <linux/debugfs.h>
22 #include <linux/seq_file.h>
23 #include <linux/vmalloc.h>
24 
25 #include <asm/machine.h>
26 #include <asm/ccwdev.h>
27 #include <asm/ebcdic.h>
28 #include <asm/idals.h>
29 #include <asm/itcw.h>
30 #include <asm/diag.h>
31 
32 #include "dasd_int.h"
33 /*
34  * SECTION: Constant definitions to be used within this file
35  */
36 #define DASD_CHANQ_MAX_SIZE 4
37 
38 #define DASD_DIAG_MOD		"dasd_diag_mod"
39 
40 /*
41  * SECTION: exported variables of dasd.c
42  */
43 debug_info_t *dasd_debug_area;
44 EXPORT_SYMBOL(dasd_debug_area);
45 static struct dentry *dasd_debugfs_root_entry;
46 struct dasd_discipline *dasd_diag_discipline_pointer;
47 EXPORT_SYMBOL(dasd_diag_discipline_pointer);
48 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
49 
50 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
51 MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
52 		   " Copyright IBM Corp. 2000");
53 MODULE_LICENSE("GPL");
54 
55 /*
56  * SECTION: prototypes for static functions of dasd.c
57  */
58 static int dasd_flush_block_queue(struct dasd_block *);
59 static void dasd_device_tasklet(unsigned long);
60 static void dasd_block_tasklet(unsigned long);
61 static void do_kick_device(struct work_struct *);
62 static void do_reload_device(struct work_struct *);
63 static void do_requeue_requests(struct work_struct *);
64 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
65 static void dasd_device_timeout(struct timer_list *);
66 static void dasd_block_timeout(struct timer_list *);
67 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
68 static void dasd_profile_init(struct dasd_profile *, struct dentry *);
69 static void dasd_profile_exit(struct dasd_profile *);
70 static void dasd_hosts_init(struct dentry *, struct dasd_device *);
71 static void dasd_hosts_exit(struct dasd_device *);
72 static int dasd_handle_autoquiesce(struct dasd_device *, struct dasd_ccw_req *,
73 				   unsigned int);
74 /*
75  * SECTION: Operations on the device structure.
76  */
77 static wait_queue_head_t dasd_init_waitq;
78 static wait_queue_head_t dasd_flush_wq;
79 static wait_queue_head_t generic_waitq;
80 static wait_queue_head_t shutdown_waitq;
81 
82 /*
83  * Allocate memory for a new device structure.
84  */
85 struct dasd_device *dasd_alloc_device(void)
86 {
87 	struct dasd_device *device;
88 
89 	device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
90 	if (!device)
91 		return ERR_PTR(-ENOMEM);
92 
93 	/* Get two pages for normal block device operations. */
94 	device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
95 	if (!device->ccw_mem) {
96 		kfree(device);
97 		return ERR_PTR(-ENOMEM);
98 	}
99 	/* Get one page for error recovery. */
100 	device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
101 	if (!device->erp_mem) {
102 		free_pages((unsigned long) device->ccw_mem, 1);
103 		kfree(device);
104 		return ERR_PTR(-ENOMEM);
105 	}
106 	/* Get two pages for ese format. */
107 	device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
108 	if (!device->ese_mem) {
109 		free_page((unsigned long) device->erp_mem);
110 		free_pages((unsigned long) device->ccw_mem, 1);
111 		kfree(device);
112 		return ERR_PTR(-ENOMEM);
113 	}
114 
115 	dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
116 	dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
117 	dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2);
118 	spin_lock_init(&device->mem_lock);
119 	atomic_set(&device->tasklet_scheduled, 0);
120 	tasklet_init(&device->tasklet, dasd_device_tasklet,
121 		     (unsigned long) device);
122 	INIT_LIST_HEAD(&device->ccw_queue);
123 	timer_setup(&device->timer, dasd_device_timeout, 0);
124 	INIT_WORK(&device->kick_work, do_kick_device);
125 	INIT_WORK(&device->reload_device, do_reload_device);
126 	INIT_WORK(&device->requeue_requests, do_requeue_requests);
127 	device->state = DASD_STATE_NEW;
128 	device->target = DASD_STATE_NEW;
129 	mutex_init(&device->state_mutex);
130 	spin_lock_init(&device->profile.lock);
131 	return device;
132 }
133 
134 /*
135  * Free memory of a device structure.
136  */
137 void dasd_free_device(struct dasd_device *device)
138 {
139 	kfree(device->private);
140 	free_pages((unsigned long) device->ese_mem, 1);
141 	free_page((unsigned long) device->erp_mem);
142 	free_pages((unsigned long) device->ccw_mem, 1);
143 	kfree(device);
144 }
145 
146 /*
147  * Allocate memory for a new device structure.
148  */
149 struct dasd_block *dasd_alloc_block(void)
150 {
151 	struct dasd_block *block;
152 
153 	block = kzalloc(sizeof(*block), GFP_ATOMIC);
154 	if (!block)
155 		return ERR_PTR(-ENOMEM);
156 	/* open_count = 0 means device online but not in use */
157 	atomic_set(&block->open_count, -1);
158 
159 	atomic_set(&block->tasklet_scheduled, 0);
160 	tasklet_init(&block->tasklet, dasd_block_tasklet,
161 		     (unsigned long) block);
162 	INIT_LIST_HEAD(&block->ccw_queue);
163 	spin_lock_init(&block->queue_lock);
164 	INIT_LIST_HEAD(&block->format_list);
165 	spin_lock_init(&block->format_lock);
166 	timer_setup(&block->timer, dasd_block_timeout, 0);
167 	spin_lock_init(&block->profile.lock);
168 
169 	return block;
170 }
171 EXPORT_SYMBOL_GPL(dasd_alloc_block);
172 
173 /*
174  * Free memory of a device structure.
175  */
176 void dasd_free_block(struct dasd_block *block)
177 {
178 	kfree(block);
179 }
180 EXPORT_SYMBOL_GPL(dasd_free_block);
181 
182 /*
183  * Make a new device known to the system.
184  */
185 static int dasd_state_new_to_known(struct dasd_device *device)
186 {
187 	/*
188 	 * As long as the device is not in state DASD_STATE_NEW we want to
189 	 * keep the reference count > 0.
190 	 */
191 	dasd_get_device(device);
192 	device->state = DASD_STATE_KNOWN;
193 	return 0;
194 }
195 
196 /*
197  * Let the system forget about a device.
198  */
199 static int dasd_state_known_to_new(struct dasd_device *device)
200 {
201 	/* Disable extended error reporting for this device. */
202 	dasd_eer_disable(device);
203 	device->state = DASD_STATE_NEW;
204 
205 	/* Give up reference we took in dasd_state_new_to_known. */
206 	dasd_put_device(device);
207 	return 0;
208 }
209 
210 /*
211  * Request the irq line for the device.
212  */
213 static int dasd_state_known_to_basic(struct dasd_device *device)
214 {
215 	struct dasd_block *block = device->block;
216 	int rc = 0;
217 
218 	/* Allocate and register gendisk structure. */
219 	if (block) {
220 		rc = dasd_gendisk_alloc(block);
221 		if (rc)
222 			return rc;
223 		block->debugfs_dentry =
224 			debugfs_create_dir(block->gdp->disk_name,
225 					   dasd_debugfs_root_entry);
226 		dasd_profile_init(&block->profile, block->debugfs_dentry);
227 		if (dasd_global_profile_level == DASD_PROFILE_ON)
228 			dasd_profile_on(&device->block->profile);
229 	}
230 	device->debugfs_dentry =
231 		debugfs_create_dir(dev_name(&device->cdev->dev),
232 				   dasd_debugfs_root_entry);
233 	dasd_profile_init(&device->profile, device->debugfs_dentry);
234 	dasd_hosts_init(device->debugfs_dentry, device);
235 
236 	/* register 'device' debug area, used for all DBF_DEV_XXX calls */
237 	device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
238 					    8 * sizeof(long));
239 	debug_register_view(device->debug_area, &debug_sprintf_view);
240 	debug_set_level(device->debug_area, DBF_WARNING);
241 	DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
242 
243 	device->state = DASD_STATE_BASIC;
244 
245 	return rc;
246 }
247 
248 /*
249  * Release the irq line for the device. Terminate any running i/o.
250  */
251 static int dasd_state_basic_to_known(struct dasd_device *device)
252 {
253 	int rc;
254 
255 	if (device->discipline->basic_to_known) {
256 		rc = device->discipline->basic_to_known(device);
257 		if (rc)
258 			return rc;
259 	}
260 
261 	if (device->block) {
262 		dasd_profile_exit(&device->block->profile);
263 		debugfs_remove(device->block->debugfs_dentry);
264 		dasd_gendisk_free(device->block);
265 		dasd_block_clear_timer(device->block);
266 	}
267 	rc = dasd_flush_device_queue(device);
268 	if (rc)
269 		return rc;
270 	dasd_device_clear_timer(device);
271 	dasd_profile_exit(&device->profile);
272 	dasd_hosts_exit(device);
273 	debugfs_remove(device->debugfs_dentry);
274 	DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
275 	if (device->debug_area != NULL) {
276 		debug_unregister(device->debug_area);
277 		device->debug_area = NULL;
278 	}
279 	device->state = DASD_STATE_KNOWN;
280 	return 0;
281 }
282 
283 /*
284  * Do the initial analysis. The do_analysis function may return
285  * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
286  * until the discipline decides to continue the startup sequence
287  * by calling the function dasd_change_state. The eckd disciplines
288  * uses this to start a ccw that detects the format. The completion
289  * interrupt for this detection ccw uses the kernel event daemon to
290  * trigger the call to dasd_change_state. All this is done in the
291  * discipline code, see dasd_eckd.c.
292  * After the analysis ccw is done (do_analysis returned 0) the block
293  * device is setup.
294  * In case the analysis returns an error, the device setup is stopped
295  * (a fake disk was already added to allow formatting).
296  */
297 static int dasd_state_basic_to_ready(struct dasd_device *device)
298 {
299 	struct dasd_block *block = device->block;
300 	struct queue_limits lim;
301 	int rc = 0;
302 
303 	/* make disk known with correct capacity */
304 	if (!block) {
305 		device->state = DASD_STATE_READY;
306 		goto out;
307 	}
308 
309 	if (block->base->discipline->do_analysis != NULL)
310 		rc = block->base->discipline->do_analysis(block);
311 	if (rc) {
312 		if (rc == -EAGAIN)
313 			return rc;
314 		device->state = DASD_STATE_UNFMT;
315 		kobject_uevent(&disk_to_dev(device->block->gdp)->kobj,
316 			       KOBJ_CHANGE);
317 		goto out;
318 	}
319 
320 	lim = queue_limits_start_update(block->gdp->queue);
321 	lim.max_dev_sectors = device->discipline->max_sectors(block);
322 	lim.max_hw_sectors = lim.max_dev_sectors;
323 	lim.logical_block_size = block->bp_block;
324 	/*
325 	 * Adjust dma_alignment to match block_size - 1
326 	 * to ensure proper buffer alignment checks in the block layer.
327 	 */
328 	lim.dma_alignment = lim.logical_block_size - 1;
329 
330 	if (device->discipline->has_discard) {
331 		unsigned int max_bytes;
332 
333 		lim.discard_granularity = block->bp_block;
334 
335 		/* Calculate max_discard_sectors and make it PAGE aligned */
336 		max_bytes = USHRT_MAX * block->bp_block;
337 		max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE);
338 
339 		lim.max_hw_discard_sectors = max_bytes / block->bp_block;
340 		lim.max_write_zeroes_sectors = lim.max_hw_discard_sectors;
341 	}
342 	rc = queue_limits_commit_update(block->gdp->queue, &lim);
343 	if (rc)
344 		return rc;
345 
346 	set_capacity(block->gdp, block->blocks << block->s2b_shift);
347 	device->state = DASD_STATE_READY;
348 
349 	rc = dasd_scan_partitions(block);
350 	if (rc) {
351 		device->state = DASD_STATE_BASIC;
352 		return rc;
353 	}
354 
355 out:
356 	if (device->discipline->basic_to_ready)
357 		rc = device->discipline->basic_to_ready(device);
358 	return rc;
359 }
360 
361 static inline
362 int _wait_for_empty_queues(struct dasd_device *device)
363 {
364 	if (device->block)
365 		return list_empty(&device->ccw_queue) &&
366 			list_empty(&device->block->ccw_queue);
367 	else
368 		return list_empty(&device->ccw_queue);
369 }
370 
371 /*
372  * Remove device from block device layer. Destroy dirty buffers.
373  * Forget format information. Check if the target level is basic
374  * and if it is create fake disk for formatting.
375  */
376 static int dasd_state_ready_to_basic(struct dasd_device *device)
377 {
378 	int rc;
379 
380 	device->state = DASD_STATE_BASIC;
381 	if (device->block) {
382 		struct dasd_block *block = device->block;
383 		rc = dasd_flush_block_queue(block);
384 		if (rc) {
385 			device->state = DASD_STATE_READY;
386 			return rc;
387 		}
388 		dasd_destroy_partitions(block);
389 		block->blocks = 0;
390 		block->bp_block = 0;
391 		block->s2b_shift = 0;
392 	}
393 	return 0;
394 }
395 
396 /*
397  * Back to basic.
398  */
399 static int dasd_state_unfmt_to_basic(struct dasd_device *device)
400 {
401 	device->state = DASD_STATE_BASIC;
402 	return 0;
403 }
404 
405 /*
406  * Make the device online and schedule the bottom half to start
407  * the requeueing of requests from the linux request queue to the
408  * ccw queue.
409  */
410 static int
411 dasd_state_ready_to_online(struct dasd_device * device)
412 {
413 	device->state = DASD_STATE_ONLINE;
414 	if (device->block) {
415 		dasd_schedule_block_bh(device->block);
416 		if ((device->features & DASD_FEATURE_USERAW)) {
417 			kobject_uevent(&disk_to_dev(device->block->gdp)->kobj,
418 					KOBJ_CHANGE);
419 			return 0;
420 		}
421 		disk_uevent(file_bdev(device->block->bdev_file)->bd_disk,
422 			    KOBJ_CHANGE);
423 	}
424 	return 0;
425 }
426 
427 /*
428  * Stop the requeueing of requests again.
429  */
430 static int dasd_state_online_to_ready(struct dasd_device *device)
431 {
432 	int rc;
433 
434 	if (device->discipline->online_to_ready) {
435 		rc = device->discipline->online_to_ready(device);
436 		if (rc)
437 			return rc;
438 	}
439 
440 	device->state = DASD_STATE_READY;
441 	if (device->block && !(device->features & DASD_FEATURE_USERAW))
442 		disk_uevent(file_bdev(device->block->bdev_file)->bd_disk,
443 			    KOBJ_CHANGE);
444 	return 0;
445 }
446 
447 /*
448  * Device startup state changes.
449  */
450 static int dasd_increase_state(struct dasd_device *device)
451 {
452 	int rc;
453 
454 	rc = 0;
455 	if (device->state == DASD_STATE_NEW &&
456 	    device->target >= DASD_STATE_KNOWN)
457 		rc = dasd_state_new_to_known(device);
458 
459 	if (!rc &&
460 	    device->state == DASD_STATE_KNOWN &&
461 	    device->target >= DASD_STATE_BASIC)
462 		rc = dasd_state_known_to_basic(device);
463 
464 	if (!rc &&
465 	    device->state == DASD_STATE_BASIC &&
466 	    device->target >= DASD_STATE_READY)
467 		rc = dasd_state_basic_to_ready(device);
468 
469 	if (!rc &&
470 	    device->state == DASD_STATE_UNFMT &&
471 	    device->target > DASD_STATE_UNFMT)
472 		rc = -EPERM;
473 
474 	if (!rc &&
475 	    device->state == DASD_STATE_READY &&
476 	    device->target >= DASD_STATE_ONLINE)
477 		rc = dasd_state_ready_to_online(device);
478 
479 	return rc;
480 }
481 
482 /*
483  * Device shutdown state changes.
484  */
485 static int dasd_decrease_state(struct dasd_device *device)
486 {
487 	int rc;
488 
489 	rc = 0;
490 	if (device->state == DASD_STATE_ONLINE &&
491 	    device->target <= DASD_STATE_READY)
492 		rc = dasd_state_online_to_ready(device);
493 
494 	if (!rc &&
495 	    device->state == DASD_STATE_READY &&
496 	    device->target <= DASD_STATE_BASIC)
497 		rc = dasd_state_ready_to_basic(device);
498 
499 	if (!rc &&
500 	    device->state == DASD_STATE_UNFMT &&
501 	    device->target <= DASD_STATE_BASIC)
502 		rc = dasd_state_unfmt_to_basic(device);
503 
504 	if (!rc &&
505 	    device->state == DASD_STATE_BASIC &&
506 	    device->target <= DASD_STATE_KNOWN)
507 		rc = dasd_state_basic_to_known(device);
508 
509 	if (!rc &&
510 	    device->state == DASD_STATE_KNOWN &&
511 	    device->target <= DASD_STATE_NEW)
512 		rc = dasd_state_known_to_new(device);
513 
514 	return rc;
515 }
516 
517 /*
518  * This is the main startup/shutdown routine.
519  */
520 static void dasd_change_state(struct dasd_device *device)
521 {
522 	int rc;
523 
524 	if (device->state == device->target)
525 		/* Already where we want to go today... */
526 		return;
527 	if (device->state < device->target)
528 		rc = dasd_increase_state(device);
529 	else
530 		rc = dasd_decrease_state(device);
531 	if (rc == -EAGAIN)
532 		return;
533 	if (rc)
534 		device->target = device->state;
535 
536 	/* let user-space know that the device status changed */
537 	kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
538 
539 	if (device->state == device->target)
540 		wake_up(&dasd_init_waitq);
541 }
542 
543 /*
544  * Kick starter for devices that did not complete the startup/shutdown
545  * procedure or were sleeping because of a pending state.
546  * dasd_kick_device will schedule a call do do_kick_device to the kernel
547  * event daemon.
548  */
549 static void do_kick_device(struct work_struct *work)
550 {
551 	struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
552 	mutex_lock(&device->state_mutex);
553 	dasd_change_state(device);
554 	mutex_unlock(&device->state_mutex);
555 	dasd_schedule_device_bh(device);
556 	dasd_put_device(device);
557 }
558 
559 void dasd_kick_device(struct dasd_device *device)
560 {
561 	dasd_get_device(device);
562 	/* queue call to dasd_kick_device to the kernel event daemon. */
563 	if (!schedule_work(&device->kick_work))
564 		dasd_put_device(device);
565 }
566 EXPORT_SYMBOL(dasd_kick_device);
567 
568 /*
569  * dasd_reload_device will schedule a call do do_reload_device to the kernel
570  * event daemon.
571  */
572 static void do_reload_device(struct work_struct *work)
573 {
574 	struct dasd_device *device = container_of(work, struct dasd_device,
575 						  reload_device);
576 	device->discipline->reload(device);
577 	dasd_put_device(device);
578 }
579 
580 void dasd_reload_device(struct dasd_device *device)
581 {
582 	dasd_get_device(device);
583 	/* queue call to dasd_reload_device to the kernel event daemon. */
584 	if (!schedule_work(&device->reload_device))
585 		dasd_put_device(device);
586 }
587 EXPORT_SYMBOL(dasd_reload_device);
588 
589 /*
590  * Set the target state for a device and starts the state change.
591  */
592 void dasd_set_target_state(struct dasd_device *device, int target)
593 {
594 	dasd_get_device(device);
595 	mutex_lock(&device->state_mutex);
596 	/* If we are in probeonly mode stop at DASD_STATE_READY. */
597 	if (dasd_probeonly && target > DASD_STATE_READY)
598 		target = DASD_STATE_READY;
599 	if (device->target != target) {
600 		if (device->state == target)
601 			wake_up(&dasd_init_waitq);
602 		device->target = target;
603 	}
604 	if (device->state != device->target)
605 		dasd_change_state(device);
606 	mutex_unlock(&device->state_mutex);
607 	dasd_put_device(device);
608 }
609 
610 /*
611  * Enable devices with device numbers in [from..to].
612  */
613 static inline int _wait_for_device(struct dasd_device *device)
614 {
615 	return (device->state == device->target);
616 }
617 
618 void dasd_enable_device(struct dasd_device *device)
619 {
620 	dasd_set_target_state(device, DASD_STATE_ONLINE);
621 	if (device->state <= DASD_STATE_KNOWN)
622 		/* No discipline for device found. */
623 		dasd_set_target_state(device, DASD_STATE_NEW);
624 	/* Now wait for the devices to come up. */
625 	wait_event(dasd_init_waitq, _wait_for_device(device));
626 
627 	dasd_reload_device(device);
628 	if (device->discipline->kick_validate)
629 		device->discipline->kick_validate(device);
630 }
631 EXPORT_SYMBOL(dasd_enable_device);
632 
633 /*
634  * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
635  */
636 
637 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF;
638 
639 #ifdef CONFIG_DASD_PROFILE
640 struct dasd_profile dasd_global_profile = {
641 	.lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock),
642 };
643 static struct dentry *dasd_debugfs_global_entry;
644 
645 /*
646  * Add profiling information for cqr before execution.
647  */
648 static void dasd_profile_start(struct dasd_block *block,
649 			       struct dasd_ccw_req *cqr,
650 			       struct request *req)
651 {
652 	struct list_head *l;
653 	unsigned int counter;
654 	struct dasd_device *device;
655 
656 	/* count the length of the chanq for statistics */
657 	counter = 0;
658 	if (dasd_global_profile_level || block->profile.data)
659 		list_for_each(l, &block->ccw_queue)
660 			if (++counter >= 31)
661 				break;
662 
663 	spin_lock(&dasd_global_profile.lock);
664 	if (dasd_global_profile.data) {
665 		dasd_global_profile.data->dasd_io_nr_req[counter]++;
666 		if (rq_data_dir(req) == READ)
667 			dasd_global_profile.data->dasd_read_nr_req[counter]++;
668 	}
669 	spin_unlock(&dasd_global_profile.lock);
670 
671 	spin_lock(&block->profile.lock);
672 	if (block->profile.data) {
673 		block->profile.data->dasd_io_nr_req[counter]++;
674 		if (rq_data_dir(req) == READ)
675 			block->profile.data->dasd_read_nr_req[counter]++;
676 	}
677 	spin_unlock(&block->profile.lock);
678 
679 	/*
680 	 * We count the request for the start device, even though it may run on
681 	 * some other device due to error recovery. This way we make sure that
682 	 * we count each request only once.
683 	 */
684 	device = cqr->startdev;
685 	if (!device->profile.data)
686 		return;
687 
688 	spin_lock(get_ccwdev_lock(device->cdev));
689 	counter = 1; /* request is not yet queued on the start device */
690 	list_for_each(l, &device->ccw_queue)
691 		if (++counter >= 31)
692 			break;
693 	spin_unlock(get_ccwdev_lock(device->cdev));
694 
695 	spin_lock(&device->profile.lock);
696 	device->profile.data->dasd_io_nr_req[counter]++;
697 	if (rq_data_dir(req) == READ)
698 		device->profile.data->dasd_read_nr_req[counter]++;
699 	spin_unlock(&device->profile.lock);
700 }
701 
702 /*
703  * Add profiling information for cqr after execution.
704  */
705 
706 #define dasd_profile_counter(value, index)			   \
707 {								   \
708 	for (index = 0; index < 31 && value >> (2+index); index++) \
709 		;						   \
710 }
711 
712 static void dasd_profile_end_add_data(struct dasd_profile_info *data,
713 				      int is_alias,
714 				      int is_tpm,
715 				      int is_read,
716 				      long sectors,
717 				      int sectors_ind,
718 				      int tottime_ind,
719 				      int tottimeps_ind,
720 				      int strtime_ind,
721 				      int irqtime_ind,
722 				      int irqtimeps_ind,
723 				      int endtime_ind)
724 {
725 	/* in case of an overflow, reset the whole profile */
726 	if (data->dasd_io_reqs == UINT_MAX) {
727 			memset(data, 0, sizeof(*data));
728 			ktime_get_real_ts64(&data->starttod);
729 	}
730 	data->dasd_io_reqs++;
731 	data->dasd_io_sects += sectors;
732 	if (is_alias)
733 		data->dasd_io_alias++;
734 	if (is_tpm)
735 		data->dasd_io_tpm++;
736 
737 	data->dasd_io_secs[sectors_ind]++;
738 	data->dasd_io_times[tottime_ind]++;
739 	data->dasd_io_timps[tottimeps_ind]++;
740 	data->dasd_io_time1[strtime_ind]++;
741 	data->dasd_io_time2[irqtime_ind]++;
742 	data->dasd_io_time2ps[irqtimeps_ind]++;
743 	data->dasd_io_time3[endtime_ind]++;
744 
745 	if (is_read) {
746 		data->dasd_read_reqs++;
747 		data->dasd_read_sects += sectors;
748 		if (is_alias)
749 			data->dasd_read_alias++;
750 		if (is_tpm)
751 			data->dasd_read_tpm++;
752 		data->dasd_read_secs[sectors_ind]++;
753 		data->dasd_read_times[tottime_ind]++;
754 		data->dasd_read_time1[strtime_ind]++;
755 		data->dasd_read_time2[irqtime_ind]++;
756 		data->dasd_read_time3[endtime_ind]++;
757 	}
758 }
759 
760 static void dasd_profile_end(struct dasd_block *block,
761 			     struct dasd_ccw_req *cqr,
762 			     struct request *req)
763 {
764 	unsigned long strtime, irqtime, endtime, tottime;
765 	unsigned long tottimeps, sectors;
766 	struct dasd_device *device;
767 	int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind;
768 	int irqtime_ind, irqtimeps_ind, endtime_ind;
769 	struct dasd_profile_info *data;
770 
771 	device = cqr->startdev;
772 	if (!(dasd_global_profile_level ||
773 	      block->profile.data ||
774 	      device->profile.data))
775 		return;
776 
777 	sectors = blk_rq_sectors(req);
778 	if (!cqr->buildclk || !cqr->startclk ||
779 	    !cqr->stopclk || !cqr->endclk ||
780 	    !sectors)
781 		return;
782 
783 	strtime = ((cqr->startclk - cqr->buildclk) >> 12);
784 	irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
785 	endtime = ((cqr->endclk - cqr->stopclk) >> 12);
786 	tottime = ((cqr->endclk - cqr->buildclk) >> 12);
787 	tottimeps = tottime / sectors;
788 
789 	dasd_profile_counter(sectors, sectors_ind);
790 	dasd_profile_counter(tottime, tottime_ind);
791 	dasd_profile_counter(tottimeps, tottimeps_ind);
792 	dasd_profile_counter(strtime, strtime_ind);
793 	dasd_profile_counter(irqtime, irqtime_ind);
794 	dasd_profile_counter(irqtime / sectors, irqtimeps_ind);
795 	dasd_profile_counter(endtime, endtime_ind);
796 
797 	spin_lock(&dasd_global_profile.lock);
798 	if (dasd_global_profile.data) {
799 		data = dasd_global_profile.data;
800 		data->dasd_sum_times += tottime;
801 		data->dasd_sum_time_str += strtime;
802 		data->dasd_sum_time_irq += irqtime;
803 		data->dasd_sum_time_end += endtime;
804 		dasd_profile_end_add_data(dasd_global_profile.data,
805 					  cqr->startdev != block->base,
806 					  cqr->cpmode == 1,
807 					  rq_data_dir(req) == READ,
808 					  sectors, sectors_ind, tottime_ind,
809 					  tottimeps_ind, strtime_ind,
810 					  irqtime_ind, irqtimeps_ind,
811 					  endtime_ind);
812 	}
813 	spin_unlock(&dasd_global_profile.lock);
814 
815 	spin_lock(&block->profile.lock);
816 	if (block->profile.data) {
817 		data = block->profile.data;
818 		data->dasd_sum_times += tottime;
819 		data->dasd_sum_time_str += strtime;
820 		data->dasd_sum_time_irq += irqtime;
821 		data->dasd_sum_time_end += endtime;
822 		dasd_profile_end_add_data(block->profile.data,
823 					  cqr->startdev != block->base,
824 					  cqr->cpmode == 1,
825 					  rq_data_dir(req) == READ,
826 					  sectors, sectors_ind, tottime_ind,
827 					  tottimeps_ind, strtime_ind,
828 					  irqtime_ind, irqtimeps_ind,
829 					  endtime_ind);
830 	}
831 	spin_unlock(&block->profile.lock);
832 
833 	spin_lock(&device->profile.lock);
834 	if (device->profile.data) {
835 		data = device->profile.data;
836 		data->dasd_sum_times += tottime;
837 		data->dasd_sum_time_str += strtime;
838 		data->dasd_sum_time_irq += irqtime;
839 		data->dasd_sum_time_end += endtime;
840 		dasd_profile_end_add_data(device->profile.data,
841 					  cqr->startdev != block->base,
842 					  cqr->cpmode == 1,
843 					  rq_data_dir(req) == READ,
844 					  sectors, sectors_ind, tottime_ind,
845 					  tottimeps_ind, strtime_ind,
846 					  irqtime_ind, irqtimeps_ind,
847 					  endtime_ind);
848 	}
849 	spin_unlock(&device->profile.lock);
850 }
851 
852 void dasd_profile_reset(struct dasd_profile *profile)
853 {
854 	struct dasd_profile_info *data;
855 
856 	spin_lock_bh(&profile->lock);
857 	data = profile->data;
858 	if (!data) {
859 		spin_unlock_bh(&profile->lock);
860 		return;
861 	}
862 	memset(data, 0, sizeof(*data));
863 	ktime_get_real_ts64(&data->starttod);
864 	spin_unlock_bh(&profile->lock);
865 }
866 
867 int dasd_profile_on(struct dasd_profile *profile)
868 {
869 	struct dasd_profile_info *data;
870 
871 	data = kzalloc(sizeof(*data), GFP_KERNEL);
872 	if (!data)
873 		return -ENOMEM;
874 	spin_lock_bh(&profile->lock);
875 	if (profile->data) {
876 		spin_unlock_bh(&profile->lock);
877 		kfree(data);
878 		return 0;
879 	}
880 	ktime_get_real_ts64(&data->starttod);
881 	profile->data = data;
882 	spin_unlock_bh(&profile->lock);
883 	return 0;
884 }
885 
886 void dasd_profile_off(struct dasd_profile *profile)
887 {
888 	spin_lock_bh(&profile->lock);
889 	kfree(profile->data);
890 	profile->data = NULL;
891 	spin_unlock_bh(&profile->lock);
892 }
893 
894 char *dasd_get_user_string(const char __user *user_buf, size_t user_len)
895 {
896 	char *buffer;
897 
898 	buffer = vmalloc(user_len + 1);
899 	if (buffer == NULL)
900 		return ERR_PTR(-ENOMEM);
901 	if (copy_from_user(buffer, user_buf, user_len) != 0) {
902 		vfree(buffer);
903 		return ERR_PTR(-EFAULT);
904 	}
905 	/* got the string, now strip linefeed. */
906 	if (buffer[user_len - 1] == '\n')
907 		buffer[user_len - 1] = 0;
908 	else
909 		buffer[user_len] = 0;
910 	return buffer;
911 }
912 
913 static ssize_t dasd_stats_write(struct file *file,
914 				const char __user *user_buf,
915 				size_t user_len, loff_t *pos)
916 {
917 	char *buffer, *str;
918 	int rc;
919 	struct seq_file *m = (struct seq_file *)file->private_data;
920 	struct dasd_profile *prof = m->private;
921 
922 	if (user_len > 65536)
923 		user_len = 65536;
924 	buffer = dasd_get_user_string(user_buf, user_len);
925 	if (IS_ERR(buffer))
926 		return PTR_ERR(buffer);
927 
928 	str = skip_spaces(buffer);
929 	rc = user_len;
930 	if (strncmp(str, "reset", 5) == 0) {
931 		dasd_profile_reset(prof);
932 	} else if (strncmp(str, "on", 2) == 0) {
933 		rc = dasd_profile_on(prof);
934 		if (rc)
935 			goto out;
936 		rc = user_len;
937 		if (prof == &dasd_global_profile) {
938 			dasd_profile_reset(prof);
939 			dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
940 		}
941 	} else if (strncmp(str, "off", 3) == 0) {
942 		if (prof == &dasd_global_profile)
943 			dasd_global_profile_level = DASD_PROFILE_OFF;
944 		dasd_profile_off(prof);
945 	} else
946 		rc = -EINVAL;
947 out:
948 	vfree(buffer);
949 	return rc;
950 }
951 
952 static void dasd_stats_array(struct seq_file *m, unsigned int *array)
953 {
954 	int i;
955 
956 	for (i = 0; i < 32; i++)
957 		seq_printf(m, "%u ", array[i]);
958 	seq_putc(m, '\n');
959 }
960 
961 static void dasd_stats_seq_print(struct seq_file *m,
962 				 struct dasd_profile_info *data)
963 {
964 	seq_printf(m, "start_time %ptSp\n", &data->starttod);
965 	seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
966 	seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
967 	seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
968 	seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm);
969 	seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ?
970 		   data->dasd_sum_times / data->dasd_io_reqs : 0UL);
971 	seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ?
972 		   data->dasd_sum_time_str / data->dasd_io_reqs : 0UL);
973 	seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ?
974 		   data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL);
975 	seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ?
976 		   data->dasd_sum_time_end / data->dasd_io_reqs : 0UL);
977 	seq_puts(m, "histogram_sectors ");
978 	dasd_stats_array(m, data->dasd_io_secs);
979 	seq_puts(m, "histogram_io_times ");
980 	dasd_stats_array(m, data->dasd_io_times);
981 	seq_puts(m, "histogram_io_times_weighted ");
982 	dasd_stats_array(m, data->dasd_io_timps);
983 	seq_puts(m, "histogram_time_build_to_ssch ");
984 	dasd_stats_array(m, data->dasd_io_time1);
985 	seq_puts(m, "histogram_time_ssch_to_irq ");
986 	dasd_stats_array(m, data->dasd_io_time2);
987 	seq_puts(m, "histogram_time_ssch_to_irq_weighted ");
988 	dasd_stats_array(m, data->dasd_io_time2ps);
989 	seq_puts(m, "histogram_time_irq_to_end ");
990 	dasd_stats_array(m, data->dasd_io_time3);
991 	seq_puts(m, "histogram_ccw_queue_length ");
992 	dasd_stats_array(m, data->dasd_io_nr_req);
993 	seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs);
994 	seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects);
995 	seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias);
996 	seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm);
997 	seq_puts(m, "histogram_read_sectors ");
998 	dasd_stats_array(m, data->dasd_read_secs);
999 	seq_puts(m, "histogram_read_times ");
1000 	dasd_stats_array(m, data->dasd_read_times);
1001 	seq_puts(m, "histogram_read_time_build_to_ssch ");
1002 	dasd_stats_array(m, data->dasd_read_time1);
1003 	seq_puts(m, "histogram_read_time_ssch_to_irq ");
1004 	dasd_stats_array(m, data->dasd_read_time2);
1005 	seq_puts(m, "histogram_read_time_irq_to_end ");
1006 	dasd_stats_array(m, data->dasd_read_time3);
1007 	seq_puts(m, "histogram_read_ccw_queue_length ");
1008 	dasd_stats_array(m, data->dasd_read_nr_req);
1009 }
1010 
1011 static int dasd_stats_show(struct seq_file *m, void *v)
1012 {
1013 	struct dasd_profile *profile;
1014 	struct dasd_profile_info *data;
1015 
1016 	profile = m->private;
1017 	spin_lock_bh(&profile->lock);
1018 	data = profile->data;
1019 	if (!data) {
1020 		spin_unlock_bh(&profile->lock);
1021 		seq_puts(m, "disabled\n");
1022 		return 0;
1023 	}
1024 	dasd_stats_seq_print(m, data);
1025 	spin_unlock_bh(&profile->lock);
1026 	return 0;
1027 }
1028 
1029 static int dasd_stats_open(struct inode *inode, struct file *file)
1030 {
1031 	struct dasd_profile *profile = inode->i_private;
1032 	return single_open(file, dasd_stats_show, profile);
1033 }
1034 
1035 static const struct file_operations dasd_stats_raw_fops = {
1036 	.owner		= THIS_MODULE,
1037 	.open		= dasd_stats_open,
1038 	.read		= seq_read,
1039 	.llseek		= seq_lseek,
1040 	.release	= single_release,
1041 	.write		= dasd_stats_write,
1042 };
1043 
1044 static void dasd_profile_init(struct dasd_profile *profile,
1045 			      struct dentry *base_dentry)
1046 {
1047 	profile->data = NULL;
1048 	profile->dentry = debugfs_create_file("statistics", 0600, base_dentry,
1049 					      profile, &dasd_stats_raw_fops);
1050 }
1051 
1052 static void dasd_profile_exit(struct dasd_profile *profile)
1053 {
1054 	dasd_profile_off(profile);
1055 	debugfs_remove(profile->dentry);
1056 	profile->dentry = NULL;
1057 }
1058 
1059 static void dasd_statistics_removeroot(void)
1060 {
1061 	dasd_global_profile_level = DASD_PROFILE_OFF;
1062 	dasd_profile_exit(&dasd_global_profile);
1063 	debugfs_remove(dasd_debugfs_global_entry);
1064 	debugfs_remove(dasd_debugfs_root_entry);
1065 }
1066 
1067 static void dasd_statistics_createroot(void)
1068 {
1069 	dasd_debugfs_root_entry = debugfs_create_dir("dasd", NULL);
1070 	dasd_debugfs_global_entry = debugfs_create_dir("global", dasd_debugfs_root_entry);
1071 	dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry);
1072 }
1073 
1074 #else
1075 #define dasd_profile_start(block, cqr, req) do {} while (0)
1076 #define dasd_profile_end(block, cqr, req) do {} while (0)
1077 
1078 static void dasd_statistics_createroot(void)
1079 {
1080 	return;
1081 }
1082 
1083 static void dasd_statistics_removeroot(void)
1084 {
1085 	return;
1086 }
1087 
1088 static void dasd_profile_init(struct dasd_profile *profile,
1089 			      struct dentry *base_dentry)
1090 {
1091 	return;
1092 }
1093 
1094 static void dasd_profile_exit(struct dasd_profile *profile)
1095 {
1096 	return;
1097 }
1098 
1099 int dasd_profile_on(struct dasd_profile *profile)
1100 {
1101 	return 0;
1102 }
1103 
1104 #endif				/* CONFIG_DASD_PROFILE */
1105 
1106 static int dasd_hosts_show(struct seq_file *m, void *v)
1107 {
1108 	struct dasd_device *device;
1109 	int rc = -EOPNOTSUPP;
1110 
1111 	device = m->private;
1112 	dasd_get_device(device);
1113 
1114 	if (device->discipline->hosts_print)
1115 		rc = device->discipline->hosts_print(device, m);
1116 
1117 	dasd_put_device(device);
1118 	return rc;
1119 }
1120 
1121 DEFINE_SHOW_ATTRIBUTE(dasd_hosts);
1122 
1123 static void dasd_hosts_exit(struct dasd_device *device)
1124 {
1125 	debugfs_remove(device->hosts_dentry);
1126 	device->hosts_dentry = NULL;
1127 }
1128 
1129 static void dasd_hosts_init(struct dentry *base_dentry,
1130 			    struct dasd_device *device)
1131 {
1132 	device->hosts_dentry = debugfs_create_file("host_access_list", 0400, base_dentry,
1133 						   device, &dasd_hosts_fops);
1134 }
1135 
1136 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
1137 					  struct dasd_device *device,
1138 					  struct dasd_ccw_req *cqr)
1139 {
1140 	unsigned long flags;
1141 	char *data, *chunk;
1142 	int size = 0;
1143 
1144 	if (cplength > 0)
1145 		size += cplength * sizeof(struct ccw1);
1146 	if (datasize > 0)
1147 		size += datasize;
1148 	if (!cqr)
1149 		size += (sizeof(*cqr) + 7L) & -8L;
1150 
1151 	spin_lock_irqsave(&device->mem_lock, flags);
1152 	data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size);
1153 	spin_unlock_irqrestore(&device->mem_lock, flags);
1154 	if (!chunk)
1155 		return ERR_PTR(-ENOMEM);
1156 	if (!cqr) {
1157 		cqr = (void *) data;
1158 		data += (sizeof(*cqr) + 7L) & -8L;
1159 	}
1160 	memset(cqr, 0, sizeof(*cqr));
1161 	cqr->mem_chunk = chunk;
1162 	if (cplength > 0) {
1163 		cqr->cpaddr = data;
1164 		data += cplength * sizeof(struct ccw1);
1165 		memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
1166 	}
1167 	if (datasize > 0) {
1168 		cqr->data = data;
1169  		memset(cqr->data, 0, datasize);
1170 	}
1171 	cqr->magic = magic;
1172 	set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1173 	dasd_get_device(device);
1174 	return cqr;
1175 }
1176 EXPORT_SYMBOL(dasd_smalloc_request);
1177 
1178 struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength,
1179 					  int datasize,
1180 					  struct dasd_device *device)
1181 {
1182 	struct dasd_ccw_req *cqr;
1183 	unsigned long flags;
1184 	int size, cqr_size;
1185 	char *data;
1186 
1187 	cqr_size = (sizeof(*cqr) + 7L) & -8L;
1188 	size = cqr_size;
1189 	if (cplength > 0)
1190 		size += cplength * sizeof(struct ccw1);
1191 	if (datasize > 0)
1192 		size += datasize;
1193 
1194 	spin_lock_irqsave(&device->mem_lock, flags);
1195 	cqr = dasd_alloc_chunk(&device->ese_chunks, size);
1196 	spin_unlock_irqrestore(&device->mem_lock, flags);
1197 	if (!cqr)
1198 		return ERR_PTR(-ENOMEM);
1199 	memset(cqr, 0, sizeof(*cqr));
1200 	data = (char *)cqr + cqr_size;
1201 	cqr->cpaddr = NULL;
1202 	if (cplength > 0) {
1203 		cqr->cpaddr = data;
1204 		data += cplength * sizeof(struct ccw1);
1205 		memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
1206 	}
1207 	cqr->data = NULL;
1208 	if (datasize > 0) {
1209 		cqr->data = data;
1210 		memset(cqr->data, 0, datasize);
1211 	}
1212 
1213 	cqr->magic = magic;
1214 	set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1215 	dasd_get_device(device);
1216 
1217 	return cqr;
1218 }
1219 EXPORT_SYMBOL(dasd_fmalloc_request);
1220 
1221 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1222 {
1223 	unsigned long flags;
1224 
1225 	spin_lock_irqsave(&device->mem_lock, flags);
1226 	dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
1227 	spin_unlock_irqrestore(&device->mem_lock, flags);
1228 	dasd_put_device(device);
1229 }
1230 EXPORT_SYMBOL(dasd_sfree_request);
1231 
1232 void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1233 {
1234 	unsigned long flags;
1235 
1236 	spin_lock_irqsave(&device->mem_lock, flags);
1237 	dasd_free_chunk(&device->ese_chunks, cqr);
1238 	spin_unlock_irqrestore(&device->mem_lock, flags);
1239 	dasd_put_device(device);
1240 }
1241 EXPORT_SYMBOL(dasd_ffree_request);
1242 
1243 /*
1244  * Check discipline magic in cqr.
1245  */
1246 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
1247 {
1248 	struct dasd_device *device;
1249 
1250 	if (cqr == NULL)
1251 		return -EINVAL;
1252 	device = cqr->startdev;
1253 	if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
1254 		DBF_DEV_EVENT(DBF_WARNING, device,
1255 			    " dasd_ccw_req 0x%08x magic doesn't match"
1256 			    " discipline 0x%08x",
1257 			    cqr->magic,
1258 			    *(unsigned int *) device->discipline->name);
1259 		return -EINVAL;
1260 	}
1261 	return 0;
1262 }
1263 
1264 /*
1265  * Terminate the current i/o and set the request to clear_pending.
1266  * Timer keeps device runnig.
1267  * ccw_device_clear can fail if the i/o subsystem
1268  * is in a bad mood.
1269  */
1270 int dasd_term_IO(struct dasd_ccw_req *cqr)
1271 {
1272 	struct dasd_device *device;
1273 	int retries, rc;
1274 
1275 	/* Check the cqr */
1276 	rc = dasd_check_cqr(cqr);
1277 	if (rc)
1278 		return rc;
1279 	retries = 0;
1280 	device = (struct dasd_device *) cqr->startdev;
1281 	while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
1282 		rc = ccw_device_clear(device->cdev, (long) cqr);
1283 		switch (rc) {
1284 		case 0:	/* termination successful */
1285 			cqr->status = DASD_CQR_CLEAR_PENDING;
1286 			cqr->stopclk = get_tod_clock();
1287 			cqr->starttime = 0;
1288 			DBF_DEV_EVENT(DBF_DEBUG, device,
1289 				      "terminate cqr %p successful",
1290 				      cqr);
1291 			break;
1292 		case -ENODEV:
1293 			DBF_DEV_EVENT(DBF_ERR, device, "%s",
1294 				      "device gone, retry");
1295 			break;
1296 		case -EINVAL:
1297 			/*
1298 			 * device not valid so no I/O could be running
1299 			 * handle CQR as termination successful
1300 			 */
1301 			cqr->status = DASD_CQR_CLEARED;
1302 			cqr->stopclk = get_tod_clock();
1303 			cqr->starttime = 0;
1304 			/* no retries for invalid devices */
1305 			cqr->retries = -1;
1306 			DBF_DEV_EVENT(DBF_ERR, device, "%s",
1307 				      "EINVAL, handle as terminated");
1308 			/* fake rc to success */
1309 			rc = 0;
1310 			break;
1311 		default:
1312 			dev_err(&device->cdev->dev,
1313 				"Unexpected error during request termination %d\n", rc);
1314 			BUG();
1315 			break;
1316 		}
1317 		retries++;
1318 	}
1319 	dasd_schedule_device_bh(device);
1320 	return rc;
1321 }
1322 EXPORT_SYMBOL(dasd_term_IO);
1323 
1324 /*
1325  * Start the i/o. This start_IO can fail if the channel is really busy.
1326  * In that case set up a timer to start the request later.
1327  */
1328 int dasd_start_IO(struct dasd_ccw_req *cqr)
1329 {
1330 	struct dasd_device *device;
1331 	int rc;
1332 
1333 	/* Check the cqr */
1334 	rc = dasd_check_cqr(cqr);
1335 	if (rc) {
1336 		cqr->intrc = rc;
1337 		return rc;
1338 	}
1339 	device = (struct dasd_device *) cqr->startdev;
1340 	if (((cqr->block &&
1341 	      test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
1342 	     test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
1343 	    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
1344 		DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
1345 			      "because of stolen lock", cqr);
1346 		cqr->status = DASD_CQR_ERROR;
1347 		cqr->intrc = -EPERM;
1348 		return -EPERM;
1349 	}
1350 	if (cqr->retries < 0) {
1351 		dev_err(&device->cdev->dev,
1352 			"Start I/O ran out of retries\n");
1353 		cqr->status = DASD_CQR_ERROR;
1354 		return -EIO;
1355 	}
1356 	cqr->startclk = get_tod_clock();
1357 	cqr->starttime = jiffies;
1358 	cqr->retries--;
1359 	if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1360 		cqr->lpm &= dasd_path_get_opm(device);
1361 		if (!cqr->lpm)
1362 			cqr->lpm = dasd_path_get_opm(device);
1363 	}
1364 	/*
1365 	 * remember the amount of formatted tracks to prevent double format on
1366 	 * ESE devices
1367 	 */
1368 	if (cqr->block)
1369 		cqr->trkcount = atomic_read(&cqr->block->trkcount);
1370 
1371 	if (cqr->cpmode == 1) {
1372 		rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
1373 					 (long) cqr, cqr->lpm);
1374 	} else {
1375 		rc = ccw_device_start(device->cdev, cqr->cpaddr,
1376 				      (long) cqr, cqr->lpm, 0);
1377 	}
1378 	switch (rc) {
1379 	case 0:
1380 		cqr->status = DASD_CQR_IN_IO;
1381 		break;
1382 	case -EBUSY:
1383 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1384 			      "start_IO: device busy, retry later");
1385 		break;
1386 	case -EACCES:
1387 		/* -EACCES indicates that the request used only a subset of the
1388 		 * available paths and all these paths are gone. If the lpm of
1389 		 * this request was only a subset of the opm (e.g. the ppm) then
1390 		 * we just do a retry with all available paths.
1391 		 * If we already use the full opm, something is amiss, and we
1392 		 * need a full path verification.
1393 		 */
1394 		if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1395 			DBF_DEV_EVENT(DBF_WARNING, device,
1396 				      "start_IO: selected paths gone (%x)",
1397 				      cqr->lpm);
1398 		} else if (cqr->lpm != dasd_path_get_opm(device)) {
1399 			cqr->lpm = dasd_path_get_opm(device);
1400 			DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
1401 				      "start_IO: selected paths gone,"
1402 				      " retry on all paths");
1403 		} else {
1404 			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1405 				      "start_IO: all paths in opm gone,"
1406 				      " do path verification");
1407 			dasd_generic_last_path_gone(device);
1408 			dasd_path_no_path(device);
1409 			dasd_path_set_tbvpm(device,
1410 					  ccw_device_get_path_mask(
1411 						  device->cdev));
1412 		}
1413 		break;
1414 	case -ENODEV:
1415 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1416 			      "start_IO: -ENODEV device gone, retry");
1417 		/* this is equivalent to CC=3 for SSCH report this to EER */
1418 		dasd_handle_autoquiesce(device, cqr, DASD_EER_STARTIO);
1419 		break;
1420 	case -EIO:
1421 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1422 			      "start_IO: -EIO device gone, retry");
1423 		break;
1424 	case -EINVAL:
1425 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1426 			      "start_IO: -EINVAL device currently "
1427 			      "not accessible");
1428 		break;
1429 	default:
1430 		dev_err(&device->cdev->dev,
1431 			"Unexpected error during request start %d", rc);
1432 		BUG();
1433 		break;
1434 	}
1435 	cqr->intrc = rc;
1436 	return rc;
1437 }
1438 EXPORT_SYMBOL(dasd_start_IO);
1439 
1440 /*
1441  * Timeout function for dasd devices. This is used for different purposes
1442  *  1) missing interrupt handler for normal operation
1443  *  2) delayed start of request where start_IO failed with -EBUSY
1444  *  3) timeout for missing state change interrupts
1445  * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
1446  * DASD_CQR_QUEUED for 2) and 3).
1447  */
1448 static void dasd_device_timeout(struct timer_list *t)
1449 {
1450 	unsigned long flags;
1451 	struct dasd_device *device;
1452 
1453 	device = timer_container_of(device, t, timer);
1454 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1455 	/* re-activate request queue */
1456 	dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1457 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1458 	dasd_schedule_device_bh(device);
1459 }
1460 
1461 /*
1462  * Setup timeout for a device in jiffies.
1463  */
1464 void dasd_device_set_timer(struct dasd_device *device, int expires)
1465 {
1466 	if (expires == 0)
1467 		timer_delete(&device->timer);
1468 	else
1469 		mod_timer(&device->timer, jiffies + expires);
1470 }
1471 EXPORT_SYMBOL(dasd_device_set_timer);
1472 
1473 /*
1474  * Clear timeout for a device.
1475  */
1476 void dasd_device_clear_timer(struct dasd_device *device)
1477 {
1478 	timer_delete(&device->timer);
1479 }
1480 EXPORT_SYMBOL(dasd_device_clear_timer);
1481 
1482 static void dasd_handle_killed_request(struct ccw_device *cdev,
1483 				       unsigned long intparm)
1484 {
1485 	struct dasd_ccw_req *cqr;
1486 	struct dasd_device *device;
1487 
1488 	if (!intparm)
1489 		return;
1490 	cqr = (struct dasd_ccw_req *) intparm;
1491 	if (cqr->status != DASD_CQR_IN_IO) {
1492 		DBF_EVENT_DEVID(DBF_DEBUG, cdev,
1493 				"invalid status in handle_killed_request: "
1494 				"%02x", cqr->status);
1495 		return;
1496 	}
1497 
1498 	device = dasd_device_from_cdev_locked(cdev);
1499 	if (IS_ERR(device)) {
1500 		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1501 				"unable to get device from cdev");
1502 		return;
1503 	}
1504 
1505 	if (!cqr->startdev ||
1506 	    device != cqr->startdev ||
1507 	    strncmp(cqr->startdev->discipline->ebcname,
1508 		    (char *) &cqr->magic, 4)) {
1509 		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1510 				"invalid device in request");
1511 		dasd_put_device(device);
1512 		return;
1513 	}
1514 
1515 	/* Schedule request to be retried. */
1516 	cqr->status = DASD_CQR_QUEUED;
1517 
1518 	dasd_device_clear_timer(device);
1519 	dasd_schedule_device_bh(device);
1520 	dasd_put_device(device);
1521 }
1522 
1523 void dasd_generic_handle_state_change(struct dasd_device *device)
1524 {
1525 	/* First of all start sense subsystem status request. */
1526 	dasd_eer_snss(device);
1527 
1528 	dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1529 	dasd_schedule_device_bh(device);
1530 	if (device->block) {
1531 		dasd_schedule_block_bh(device->block);
1532 		if (device->block->gdp)
1533 			blk_mq_run_hw_queues(device->block->gdp->queue, true);
1534 	}
1535 }
1536 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
1537 
1538 static int dasd_check_hpf_error(struct irb *irb)
1539 {
1540 	return (scsw_tm_is_valid_schxs(&irb->scsw) &&
1541 	    (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX ||
1542 	     irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX));
1543 }
1544 
1545 static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb)
1546 {
1547 	struct dasd_device *device = NULL;
1548 	u8 *sense = NULL;
1549 
1550 	if (!block)
1551 		return 0;
1552 	device = block->base;
1553 	if (!device || !device->discipline->is_ese)
1554 		return 0;
1555 	if (!device->discipline->is_ese(device))
1556 		return 0;
1557 
1558 	sense = dasd_get_sense(irb);
1559 	if (!sense)
1560 		return 0;
1561 
1562 	if (sense[1] & SNS1_NO_REC_FOUND)
1563 		return 1;
1564 
1565 	if ((sense[1] & SNS1_INV_TRACK_FORMAT) &&
1566 	    scsw_is_tm(&irb->scsw) &&
1567 	    !(sense[2] & SNS2_ENV_DATA_PRESENT))
1568 		return 1;
1569 
1570 	return 0;
1571 }
1572 
1573 static int dasd_ese_oos_cond(u8 *sense)
1574 {
1575 	return sense[0] & SNS0_EQUIPMENT_CHECK &&
1576 		sense[1] & SNS1_PERM_ERR &&
1577 		sense[1] & SNS1_WRITE_INHIBITED &&
1578 		sense[25] == 0x01;
1579 }
1580 
1581 /*
1582  * Interrupt handler for "normal" ssch-io based dasd devices.
1583  */
1584 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1585 		      struct irb *irb)
1586 {
1587 	struct dasd_ccw_req *cqr, *next, *fcqr;
1588 	struct dasd_device *device;
1589 	unsigned long now;
1590 	int nrf_suppressed = 0;
1591 	int it_suppressed = 0;
1592 	struct request *req;
1593 	u8 *sense = NULL;
1594 	int expires;
1595 
1596 	cqr = (struct dasd_ccw_req *) intparm;
1597 	if (IS_ERR(irb)) {
1598 		switch (PTR_ERR(irb)) {
1599 		case -EIO:
1600 			if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
1601 				device = cqr->startdev;
1602 				cqr->status = DASD_CQR_CLEARED;
1603 				dasd_device_clear_timer(device);
1604 				wake_up(&dasd_flush_wq);
1605 				dasd_schedule_device_bh(device);
1606 				return;
1607 			}
1608 			break;
1609 		case -ETIMEDOUT:
1610 			DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1611 					"request timed out\n", __func__);
1612 			break;
1613 		default:
1614 			DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1615 					"unknown error %ld\n", __func__,
1616 					PTR_ERR(irb));
1617 		}
1618 		dasd_handle_killed_request(cdev, intparm);
1619 		return;
1620 	}
1621 
1622 	now = get_tod_clock();
1623 	/* check for conditions that should be handled immediately */
1624 	if (!cqr ||
1625 	    !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1626 	      scsw_cstat(&irb->scsw) == 0)) {
1627 		if (cqr)
1628 			memcpy(&cqr->irb, irb, sizeof(*irb));
1629 		device = dasd_device_from_cdev_locked(cdev);
1630 		if (IS_ERR(device))
1631 			return;
1632 		/* ignore unsolicited interrupts for DIAG discipline */
1633 		if (device->discipline == dasd_diag_discipline_pointer) {
1634 			dasd_put_device(device);
1635 			return;
1636 		}
1637 
1638 		/*
1639 		 * In some cases 'File Protected' or 'No Record Found' errors
1640 		 * might be expected and debug log messages for the
1641 		 * corresponding interrupts shouldn't be written then.
1642 		 * Check if either of the according suppress bits is set.
1643 		 */
1644 		sense = dasd_get_sense(irb);
1645 		if (sense) {
1646 			it_suppressed =	(sense[1] & SNS1_INV_TRACK_FORMAT) &&
1647 				!(sense[2] & SNS2_ENV_DATA_PRESENT) &&
1648 				test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags);
1649 			nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
1650 				test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
1651 
1652 			/*
1653 			 * Extent pool probably out-of-space.
1654 			 * Stop device and check exhaust level.
1655 			 */
1656 			if (dasd_ese_oos_cond(sense)) {
1657 				dasd_generic_space_exhaust(device, cqr);
1658 				device->discipline->ext_pool_exhaust(device, cqr);
1659 				dasd_put_device(device);
1660 				return;
1661 			}
1662 		}
1663 		if (!(it_suppressed || nrf_suppressed))
1664 			device->discipline->dump_sense_dbf(device, irb, "int");
1665 
1666 		if (device->features & DASD_FEATURE_ERPLOG)
1667 			device->discipline->dump_sense(device, cqr, irb);
1668 		device->discipline->check_for_device_change(device, cqr, irb);
1669 		dasd_put_device(device);
1670 	}
1671 
1672 	/* check for attention message */
1673 	if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
1674 		device = dasd_device_from_cdev_locked(cdev);
1675 		if (!IS_ERR(device)) {
1676 			device->discipline->check_attention(device,
1677 							    irb->esw.esw1.lpum);
1678 			dasd_put_device(device);
1679 		}
1680 	}
1681 
1682 	if (!cqr)
1683 		return;
1684 
1685 	device = (struct dasd_device *) cqr->startdev;
1686 	if (!device ||
1687 	    strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1688 		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1689 				"invalid device in request");
1690 		return;
1691 	}
1692 
1693 	if (dasd_ese_needs_format(cqr->block, irb)) {
1694 		req = dasd_get_callback_data(cqr);
1695 		if (!req) {
1696 			cqr->status = DASD_CQR_ERROR;
1697 			return;
1698 		}
1699 		if (rq_data_dir(req) == READ) {
1700 			device->discipline->ese_read(cqr, irb);
1701 			cqr->status = DASD_CQR_SUCCESS;
1702 			cqr->stopclk = now;
1703 			dasd_device_clear_timer(device);
1704 			dasd_schedule_device_bh(device);
1705 			return;
1706 		}
1707 		fcqr = device->discipline->ese_format(device, cqr, irb);
1708 		if (IS_ERR(fcqr)) {
1709 			if (PTR_ERR(fcqr) == -EINVAL) {
1710 				cqr->status = DASD_CQR_ERROR;
1711 				return;
1712 			}
1713 			/*
1714 			 * If we can't format now, let the request go
1715 			 * one extra round. Maybe we can format later.
1716 			 */
1717 			cqr->status = DASD_CQR_QUEUED;
1718 			dasd_schedule_device_bh(device);
1719 			return;
1720 		} else {
1721 			fcqr->status = DASD_CQR_QUEUED;
1722 			cqr->status = DASD_CQR_QUEUED;
1723 			list_add(&fcqr->devlist, &device->ccw_queue);
1724 			dasd_schedule_device_bh(device);
1725 			return;
1726 		}
1727 	}
1728 
1729 	/* Check for clear pending */
1730 	if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1731 	    scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
1732 		cqr->status = DASD_CQR_CLEARED;
1733 		dasd_device_clear_timer(device);
1734 		wake_up(&dasd_flush_wq);
1735 		dasd_schedule_device_bh(device);
1736 		return;
1737 	}
1738 
1739 	/* check status - the request might have been killed by dyn detach */
1740 	if (cqr->status != DASD_CQR_IN_IO) {
1741 		DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
1742 			      "status %02x", dev_name(&cdev->dev), cqr->status);
1743 		return;
1744 	}
1745 
1746 	next = NULL;
1747 	expires = 0;
1748 	if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1749 	    scsw_cstat(&irb->scsw) == 0) {
1750 		/* request was completed successfully */
1751 		cqr->status = DASD_CQR_SUCCESS;
1752 		cqr->stopclk = now;
1753 		/* Start first request on queue if possible -> fast_io. */
1754 		if (cqr->devlist.next != &device->ccw_queue) {
1755 			next = list_entry(cqr->devlist.next,
1756 					  struct dasd_ccw_req, devlist);
1757 		}
1758 	} else {  /* error */
1759 		/* check for HPF error
1760 		 * call discipline function to requeue all requests
1761 		 * and disable HPF accordingly
1762 		 */
1763 		if (cqr->cpmode && dasd_check_hpf_error(irb) &&
1764 		    device->discipline->handle_hpf_error)
1765 			device->discipline->handle_hpf_error(device, irb);
1766 		/*
1767 		 * If we don't want complex ERP for this request, then just
1768 		 * reset this and retry it in the fastpath
1769 		 */
1770 		if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1771 		    cqr->retries > 0) {
1772 			if (cqr->lpm == dasd_path_get_opm(device))
1773 				DBF_DEV_EVENT(DBF_DEBUG, device,
1774 					      "default ERP in fastpath "
1775 					      "(%i retries left)",
1776 					      cqr->retries);
1777 			if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
1778 				cqr->lpm = dasd_path_get_opm(device);
1779 			cqr->status = DASD_CQR_QUEUED;
1780 			next = cqr;
1781 		} else
1782 			cqr->status = DASD_CQR_ERROR;
1783 	}
1784 	if (next && (next->status == DASD_CQR_QUEUED) &&
1785 	    (!device->stopped)) {
1786 		if (device->discipline->start_IO(next) == 0)
1787 			expires = next->expires;
1788 	}
1789 	if (expires != 0)
1790 		dasd_device_set_timer(device, expires);
1791 	else
1792 		dasd_device_clear_timer(device);
1793 	dasd_schedule_device_bh(device);
1794 }
1795 EXPORT_SYMBOL(dasd_int_handler);
1796 
1797 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
1798 {
1799 	struct dasd_device *device;
1800 
1801 	device = dasd_device_from_cdev_locked(cdev);
1802 
1803 	if (IS_ERR(device))
1804 		goto out;
1805 	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1806 	   device->state != device->target ||
1807 	   !device->discipline->check_for_device_change){
1808 		dasd_put_device(device);
1809 		goto out;
1810 	}
1811 	if (device->discipline->dump_sense_dbf)
1812 		device->discipline->dump_sense_dbf(device, irb, "uc");
1813 	device->discipline->check_for_device_change(device, NULL, irb);
1814 	dasd_put_device(device);
1815 out:
1816 	return UC_TODO_RETRY;
1817 }
1818 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
1819 
1820 /*
1821  * If we have an error on a dasd_block layer request then we cancel
1822  * and return all further requests from the same dasd_block as well.
1823  */
1824 static void __dasd_device_recovery(struct dasd_device *device,
1825 				   struct dasd_ccw_req *ref_cqr)
1826 {
1827 	struct list_head *l, *n;
1828 	struct dasd_ccw_req *cqr;
1829 
1830 	/*
1831 	 * only requeue request that came from the dasd_block layer
1832 	 */
1833 	if (!ref_cqr->block)
1834 		return;
1835 
1836 	list_for_each_safe(l, n, &device->ccw_queue) {
1837 		cqr = list_entry(l, struct dasd_ccw_req, devlist);
1838 		if (cqr->status == DASD_CQR_QUEUED &&
1839 		    ref_cqr->block == cqr->block) {
1840 			cqr->status = DASD_CQR_CLEARED;
1841 		}
1842 	}
1843 };
1844 
1845 /*
1846  * Remove those ccw requests from the queue that need to be returned
1847  * to the upper layer.
1848  */
1849 static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1850 					    struct list_head *final_queue)
1851 {
1852 	struct list_head *l, *n;
1853 	struct dasd_ccw_req *cqr;
1854 
1855 	/* Process request with final status. */
1856 	list_for_each_safe(l, n, &device->ccw_queue) {
1857 		cqr = list_entry(l, struct dasd_ccw_req, devlist);
1858 
1859 		/* Skip any non-final request. */
1860 		if (cqr->status == DASD_CQR_QUEUED ||
1861 		    cqr->status == DASD_CQR_IN_IO ||
1862 		    cqr->status == DASD_CQR_CLEAR_PENDING)
1863 			continue;
1864 		if (cqr->status == DASD_CQR_ERROR) {
1865 			__dasd_device_recovery(device, cqr);
1866 		}
1867 		/* Rechain finished requests to final queue */
1868 		list_move_tail(&cqr->devlist, final_queue);
1869 	}
1870 }
1871 
1872 static void __dasd_process_cqr(struct dasd_device *device,
1873 			       struct dasd_ccw_req *cqr)
1874 {
1875 	switch (cqr->status) {
1876 	case DASD_CQR_SUCCESS:
1877 		cqr->status = DASD_CQR_DONE;
1878 		break;
1879 	case DASD_CQR_ERROR:
1880 		cqr->status = DASD_CQR_NEED_ERP;
1881 		break;
1882 	case DASD_CQR_CLEARED:
1883 		cqr->status = DASD_CQR_TERMINATED;
1884 		break;
1885 	default:
1886 		dev_err(&device->cdev->dev,
1887 			"Unexpected CQR status %02x", cqr->status);
1888 		BUG();
1889 	}
1890 	if (cqr->callback)
1891 		cqr->callback(cqr, cqr->callback_data);
1892 }
1893 
1894 /*
1895  * the cqrs from the final queue are returned to the upper layer
1896  * by setting a dasd_block state and calling the callback function
1897  */
1898 static void __dasd_device_process_final_queue(struct dasd_device *device,
1899 					      struct list_head *final_queue)
1900 {
1901 	struct list_head *l, *n;
1902 	struct dasd_ccw_req *cqr;
1903 	struct dasd_block *block;
1904 
1905 	list_for_each_safe(l, n, final_queue) {
1906 		cqr = list_entry(l, struct dasd_ccw_req, devlist);
1907 		list_del_init(&cqr->devlist);
1908 		block = cqr->block;
1909 		if (!block) {
1910 			__dasd_process_cqr(device, cqr);
1911 		} else {
1912 			spin_lock_bh(&block->queue_lock);
1913 			__dasd_process_cqr(device, cqr);
1914 			spin_unlock_bh(&block->queue_lock);
1915 		}
1916 	}
1917 }
1918 
1919 /*
1920  * check if device should be autoquiesced due to too many timeouts
1921  */
1922 static void __dasd_device_check_autoquiesce_timeout(struct dasd_device *device,
1923 						    struct dasd_ccw_req *cqr)
1924 {
1925 	if ((device->default_retries - cqr->retries) >= device->aq_timeouts)
1926 		dasd_handle_autoquiesce(device, cqr, DASD_EER_TIMEOUTS);
1927 }
1928 
1929 /*
1930  * Take a look at the first request on the ccw queue and check
1931  * if it reached its expire time. If so, terminate the IO.
1932  */
1933 static void __dasd_device_check_expire(struct dasd_device *device)
1934 {
1935 	struct dasd_ccw_req *cqr;
1936 
1937 	if (list_empty(&device->ccw_queue))
1938 		return;
1939 	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1940 	if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1941 	    (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1942 		if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
1943 			/*
1944 			 * IO in safe offline processing should not
1945 			 * run out of retries
1946 			 */
1947 			cqr->retries++;
1948 		}
1949 		if (device->discipline->term_IO(cqr) != 0) {
1950 			/* Hmpf, try again in 5 sec */
1951 			dev_err(&device->cdev->dev,
1952 				"CQR timed out (%lus) but cannot be ended, retrying in 5s\n",
1953 				(cqr->expires / HZ));
1954 			cqr->expires += 5*HZ;
1955 			dasd_device_set_timer(device, 5*HZ);
1956 		} else {
1957 			dev_err(&device->cdev->dev,
1958 				"CQR timed out (%lus), %i retries remaining\n",
1959 				(cqr->expires / HZ), cqr->retries);
1960 		}
1961 		__dasd_device_check_autoquiesce_timeout(device, cqr);
1962 	}
1963 }
1964 
1965 /*
1966  * return 1 when device is not eligible for IO
1967  */
1968 static int __dasd_device_is_unusable(struct dasd_device *device,
1969 				     struct dasd_ccw_req *cqr)
1970 {
1971 	int mask = ~(DASD_STOPPED_DC_WAIT | DASD_STOPPED_NOSPC);
1972 
1973 	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
1974 	    !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
1975 		/*
1976 		 * dasd is being set offline
1977 		 * but it is no safe offline where we have to allow I/O
1978 		 */
1979 		return 1;
1980 	}
1981 	if (device->stopped) {
1982 		if (device->stopped & mask) {
1983 			/* stopped and CQR will not change that. */
1984 			return 1;
1985 		}
1986 		if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1987 			/* CQR is not able to change device to
1988 			 * operational. */
1989 			return 1;
1990 		}
1991 		/* CQR required to get device operational. */
1992 	}
1993 	return 0;
1994 }
1995 
1996 /*
1997  * Take a look at the first request on the ccw queue and check
1998  * if it needs to be started.
1999  */
2000 static void __dasd_device_start_head(struct dasd_device *device)
2001 {
2002 	struct dasd_ccw_req *cqr;
2003 	int rc;
2004 
2005 	if (list_empty(&device->ccw_queue))
2006 		return;
2007 	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2008 	if (cqr->status != DASD_CQR_QUEUED)
2009 		return;
2010 	/* if device is not usable return request to upper layer */
2011 	if (__dasd_device_is_unusable(device, cqr)) {
2012 		cqr->intrc = -EAGAIN;
2013 		cqr->status = DASD_CQR_CLEARED;
2014 		dasd_schedule_device_bh(device);
2015 		return;
2016 	}
2017 
2018 	rc = device->discipline->start_IO(cqr);
2019 	if (rc == 0)
2020 		dasd_device_set_timer(device, cqr->expires);
2021 	else if (rc == -EACCES) {
2022 		dasd_schedule_device_bh(device);
2023 	} else
2024 		/* Hmpf, try again in 1/2 sec */
2025 		dasd_device_set_timer(device, 50);
2026 }
2027 
2028 static void __dasd_device_check_path_events(struct dasd_device *device)
2029 {
2030 	__u8 tbvpm, fcsecpm;
2031 	int rc;
2032 
2033 	tbvpm = dasd_path_get_tbvpm(device);
2034 	fcsecpm = dasd_path_get_fcsecpm(device);
2035 
2036 	if (!tbvpm && !fcsecpm)
2037 		return;
2038 
2039 	if (device->stopped & ~(DASD_STOPPED_DC_WAIT))
2040 		return;
2041 
2042 	dasd_path_clear_all_verify(device);
2043 	dasd_path_clear_all_fcsec(device);
2044 
2045 	rc = device->discipline->pe_handler(device, tbvpm, fcsecpm);
2046 	if (rc) {
2047 		dasd_path_add_tbvpm(device, tbvpm);
2048 		dasd_path_add_fcsecpm(device, fcsecpm);
2049 		dasd_device_set_timer(device, 50);
2050 	}
2051 };
2052 
2053 /*
2054  * Go through all request on the dasd_device request queue,
2055  * terminate them on the cdev if necessary, and return them to the
2056  * submitting layer via callback.
2057  * Note:
2058  * Make sure that all 'submitting layers' still exist when
2059  * this function is called!. In other words, when 'device' is a base
2060  * device then all block layer requests must have been removed before
2061  * via dasd_flush_block_queue.
2062  */
2063 int dasd_flush_device_queue(struct dasd_device *device)
2064 {
2065 	struct dasd_ccw_req *cqr, *n;
2066 	int rc;
2067 	struct list_head flush_queue;
2068 
2069 	INIT_LIST_HEAD(&flush_queue);
2070 	spin_lock_irq(get_ccwdev_lock(device->cdev));
2071 	rc = 0;
2072 	list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
2073 		/* Check status and move request to flush_queue */
2074 		switch (cqr->status) {
2075 		case DASD_CQR_IN_IO:
2076 			rc = device->discipline->term_IO(cqr);
2077 			if (rc) {
2078 				/* unable to terminate request */
2079 				dev_err(&device->cdev->dev,
2080 					"Flushing the DASD request queue failed\n");
2081 				/* stop flush processing */
2082 				goto finished;
2083 			}
2084 			break;
2085 		case DASD_CQR_QUEUED:
2086 			cqr->stopclk = get_tod_clock();
2087 			cqr->status = DASD_CQR_CLEARED;
2088 			break;
2089 		default: /* no need to modify the others */
2090 			break;
2091 		}
2092 		list_move_tail(&cqr->devlist, &flush_queue);
2093 	}
2094 finished:
2095 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
2096 	/*
2097 	 * After this point all requests must be in state CLEAR_PENDING,
2098 	 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
2099 	 * one of the others.
2100 	 */
2101 	list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
2102 		wait_event(dasd_flush_wq,
2103 			   (cqr->status != DASD_CQR_CLEAR_PENDING));
2104 	/*
2105 	 * Now set each request back to TERMINATED, DONE or NEED_ERP
2106 	 * and call the callback function of flushed requests
2107 	 */
2108 	__dasd_device_process_final_queue(device, &flush_queue);
2109 	return rc;
2110 }
2111 EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2112 
2113 /*
2114  * Acquire the device lock and process queues for the device.
2115  */
2116 static void dasd_device_tasklet(unsigned long data)
2117 {
2118 	struct dasd_device *device = (struct dasd_device *) data;
2119 	struct list_head final_queue;
2120 
2121 	atomic_set (&device->tasklet_scheduled, 0);
2122 	INIT_LIST_HEAD(&final_queue);
2123 	spin_lock_irq(get_ccwdev_lock(device->cdev));
2124 	/* Check expire time of first request on the ccw queue. */
2125 	__dasd_device_check_expire(device);
2126 	/* find final requests on ccw queue */
2127 	__dasd_device_process_ccw_queue(device, &final_queue);
2128 	__dasd_device_check_path_events(device);
2129 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
2130 	/* Now call the callback function of requests with final status */
2131 	__dasd_device_process_final_queue(device, &final_queue);
2132 	spin_lock_irq(get_ccwdev_lock(device->cdev));
2133 	/* Now check if the head of the ccw queue needs to be started. */
2134 	__dasd_device_start_head(device);
2135 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
2136 	if (waitqueue_active(&shutdown_waitq))
2137 		wake_up(&shutdown_waitq);
2138 	dasd_put_device(device);
2139 }
2140 
2141 /*
2142  * Schedules a call to dasd_tasklet over the device tasklet.
2143  */
2144 void dasd_schedule_device_bh(struct dasd_device *device)
2145 {
2146 	/* Protect against rescheduling. */
2147 	if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
2148 		return;
2149 	dasd_get_device(device);
2150 	tasklet_hi_schedule(&device->tasklet);
2151 }
2152 EXPORT_SYMBOL(dasd_schedule_device_bh);
2153 
2154 void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
2155 {
2156 	device->stopped |= bits;
2157 }
2158 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
2159 
2160 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
2161 {
2162 	device->stopped &= ~bits;
2163 	if (!device->stopped)
2164 		wake_up(&generic_waitq);
2165 }
2166 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
2167 
2168 /*
2169  * Queue a request to the head of the device ccw_queue.
2170  * Start the I/O if possible.
2171  */
2172 void dasd_add_request_head(struct dasd_ccw_req *cqr)
2173 {
2174 	struct dasd_device *device;
2175 	unsigned long flags;
2176 
2177 	device = cqr->startdev;
2178 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2179 	cqr->status = DASD_CQR_QUEUED;
2180 	list_add(&cqr->devlist, &device->ccw_queue);
2181 	/* let the bh start the request to keep them in order */
2182 	dasd_schedule_device_bh(device);
2183 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2184 }
2185 EXPORT_SYMBOL(dasd_add_request_head);
2186 
2187 /*
2188  * Queue a request to the tail of the device ccw_queue.
2189  * Start the I/O if possible.
2190  */
2191 void dasd_add_request_tail(struct dasd_ccw_req *cqr)
2192 {
2193 	struct dasd_device *device;
2194 	unsigned long flags;
2195 
2196 	device = cqr->startdev;
2197 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2198 	cqr->status = DASD_CQR_QUEUED;
2199 	list_add_tail(&cqr->devlist, &device->ccw_queue);
2200 	/* let the bh start the request to keep them in order */
2201 	dasd_schedule_device_bh(device);
2202 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2203 }
2204 EXPORT_SYMBOL(dasd_add_request_tail);
2205 
2206 /*
2207  * Wakeup helper for the 'sleep_on' functions.
2208  */
2209 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
2210 {
2211 	spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2212 	cqr->callback_data = DASD_SLEEPON_END_TAG;
2213 	spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2214 	wake_up(&generic_waitq);
2215 }
2216 EXPORT_SYMBOL_GPL(dasd_wakeup_cb);
2217 
2218 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
2219 {
2220 	struct dasd_device *device;
2221 	int rc;
2222 
2223 	device = cqr->startdev;
2224 	spin_lock_irq(get_ccwdev_lock(device->cdev));
2225 	rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
2226 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
2227 	return rc;
2228 }
2229 
2230 /*
2231  * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
2232  */
2233 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
2234 {
2235 	struct dasd_device *device;
2236 	dasd_erp_fn_t erp_fn;
2237 
2238 	if (cqr->status == DASD_CQR_FILLED)
2239 		return 0;
2240 	device = cqr->startdev;
2241 	if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
2242 		if (cqr->status == DASD_CQR_TERMINATED) {
2243 			device->discipline->handle_terminated_request(cqr);
2244 			return 1;
2245 		}
2246 		if (cqr->status == DASD_CQR_NEED_ERP) {
2247 			erp_fn = device->discipline->erp_action(cqr);
2248 			erp_fn(cqr);
2249 			return 1;
2250 		}
2251 		if (cqr->status == DASD_CQR_FAILED)
2252 			dasd_log_sense(cqr, &cqr->irb);
2253 		if (cqr->refers) {
2254 			__dasd_process_erp(device, cqr);
2255 			return 1;
2256 		}
2257 	}
2258 	return 0;
2259 }
2260 
2261 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
2262 {
2263 	if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
2264 		if (cqr->refers) /* erp is not done yet */
2265 			return 1;
2266 		return ((cqr->status != DASD_CQR_DONE) &&
2267 			(cqr->status != DASD_CQR_FAILED));
2268 	} else
2269 		return (cqr->status == DASD_CQR_FILLED);
2270 }
2271 
2272 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
2273 {
2274 	struct dasd_device *device;
2275 	int rc;
2276 	struct list_head ccw_queue;
2277 	struct dasd_ccw_req *cqr;
2278 
2279 	INIT_LIST_HEAD(&ccw_queue);
2280 	maincqr->status = DASD_CQR_FILLED;
2281 	device = maincqr->startdev;
2282 	list_add(&maincqr->blocklist, &ccw_queue);
2283 	for (cqr = maincqr;  __dasd_sleep_on_loop_condition(cqr);
2284 	     cqr = list_first_entry(&ccw_queue,
2285 				    struct dasd_ccw_req, blocklist)) {
2286 
2287 		if (__dasd_sleep_on_erp(cqr))
2288 			continue;
2289 		if (cqr->status != DASD_CQR_FILLED) /* could be failed */
2290 			continue;
2291 		if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2292 		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2293 			cqr->status = DASD_CQR_FAILED;
2294 			cqr->intrc = -EPERM;
2295 			continue;
2296 		}
2297 		/* Non-temporary stop condition will trigger fail fast */
2298 		if (device->stopped & ~DASD_STOPPED_PENDING &&
2299 		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2300 		    !dasd_eer_enabled(device) && device->aq_mask == 0) {
2301 			cqr->status = DASD_CQR_FAILED;
2302 			cqr->intrc = -ENOLINK;
2303 			continue;
2304 		}
2305 		/*
2306 		 * Don't try to start requests if device is in
2307 		 * offline processing, it might wait forever
2308 		 */
2309 		if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2310 			cqr->status = DASD_CQR_FAILED;
2311 			cqr->intrc = -ENODEV;
2312 			continue;
2313 		}
2314 		/*
2315 		 * Don't try to start requests if device is stopped
2316 		 * except path verification requests
2317 		 */
2318 		if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
2319 			if (interruptible) {
2320 				rc = wait_event_interruptible(
2321 					generic_waitq, !(device->stopped));
2322 				if (rc == -ERESTARTSYS) {
2323 					cqr->status = DASD_CQR_FAILED;
2324 					maincqr->intrc = rc;
2325 					continue;
2326 				}
2327 			} else
2328 				wait_event(generic_waitq, !(device->stopped));
2329 		}
2330 		if (!cqr->callback)
2331 			cqr->callback = dasd_wakeup_cb;
2332 
2333 		cqr->callback_data = DASD_SLEEPON_START_TAG;
2334 		dasd_add_request_tail(cqr);
2335 		if (interruptible) {
2336 			rc = wait_event_interruptible(
2337 				generic_waitq, _wait_for_wakeup(cqr));
2338 			if (rc == -ERESTARTSYS) {
2339 				dasd_cancel_req(cqr);
2340 				/* wait (non-interruptible) for final status */
2341 				wait_event(generic_waitq,
2342 					   _wait_for_wakeup(cqr));
2343 				cqr->status = DASD_CQR_FAILED;
2344 				maincqr->intrc = rc;
2345 				continue;
2346 			}
2347 		} else
2348 			wait_event(generic_waitq, _wait_for_wakeup(cqr));
2349 	}
2350 
2351 	maincqr->endclk = get_tod_clock();
2352 	if ((maincqr->status != DASD_CQR_DONE) &&
2353 	    (maincqr->intrc != -ERESTARTSYS))
2354 		dasd_log_sense(maincqr, &maincqr->irb);
2355 	if (maincqr->status == DASD_CQR_DONE)
2356 		rc = 0;
2357 	else if (maincqr->intrc)
2358 		rc = maincqr->intrc;
2359 	else
2360 		rc = -EIO;
2361 	return rc;
2362 }
2363 
2364 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue)
2365 {
2366 	struct dasd_ccw_req *cqr;
2367 
2368 	list_for_each_entry(cqr, ccw_queue, blocklist) {
2369 		if (cqr->callback_data != DASD_SLEEPON_END_TAG)
2370 			return 0;
2371 	}
2372 
2373 	return 1;
2374 }
2375 
2376 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
2377 {
2378 	struct dasd_device *device;
2379 	struct dasd_ccw_req *cqr, *n;
2380 	u8 *sense = NULL;
2381 	int rc;
2382 
2383 retry:
2384 	list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
2385 		device = cqr->startdev;
2386 		if (cqr->status != DASD_CQR_FILLED) /*could be failed*/
2387 			continue;
2388 
2389 		if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2390 		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2391 			cqr->status = DASD_CQR_FAILED;
2392 			cqr->intrc = -EPERM;
2393 			continue;
2394 		}
2395 		/*Non-temporary stop condition will trigger fail fast*/
2396 		if (device->stopped & ~DASD_STOPPED_PENDING &&
2397 		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2398 		    !dasd_eer_enabled(device)) {
2399 			cqr->status = DASD_CQR_FAILED;
2400 			cqr->intrc = -EAGAIN;
2401 			continue;
2402 		}
2403 
2404 		/*Don't try to start requests if device is stopped*/
2405 		if (interruptible) {
2406 			rc = wait_event_interruptible(
2407 				generic_waitq, !device->stopped);
2408 			if (rc == -ERESTARTSYS) {
2409 				cqr->status = DASD_CQR_FAILED;
2410 				cqr->intrc = rc;
2411 				continue;
2412 			}
2413 		} else
2414 			wait_event(generic_waitq, !(device->stopped));
2415 
2416 		if (!cqr->callback)
2417 			cqr->callback = dasd_wakeup_cb;
2418 		cqr->callback_data = DASD_SLEEPON_START_TAG;
2419 		dasd_add_request_tail(cqr);
2420 	}
2421 
2422 	wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue));
2423 
2424 	rc = 0;
2425 	list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
2426 		/*
2427 		 * In some cases certain errors might be expected and
2428 		 * error recovery would be unnecessary in these cases.
2429 		 * Check if the according suppress bit is set.
2430 		 */
2431 		sense = dasd_get_sense(&cqr->irb);
2432 		if (sense && (sense[1] & SNS1_INV_TRACK_FORMAT) &&
2433 		    !(sense[2] & SNS2_ENV_DATA_PRESENT) &&
2434 		    test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags))
2435 			continue;
2436 		if (sense && (sense[1] & SNS1_NO_REC_FOUND) &&
2437 		    test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags))
2438 			continue;
2439 		if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
2440 		    test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
2441 			continue;
2442 
2443 		/*
2444 		 * for alias devices simplify error recovery and
2445 		 * return to upper layer
2446 		 * do not skip ERP requests
2447 		 */
2448 		if (cqr->startdev != cqr->basedev && !cqr->refers &&
2449 		    (cqr->status == DASD_CQR_TERMINATED ||
2450 		     cqr->status == DASD_CQR_NEED_ERP))
2451 			return -EAGAIN;
2452 
2453 		/* normal recovery for basedev IO */
2454 		if (__dasd_sleep_on_erp(cqr))
2455 			/* handle erp first */
2456 			goto retry;
2457 	}
2458 
2459 	return 0;
2460 }
2461 
2462 /*
2463  * Queue a request to the tail of the device ccw_queue and wait for
2464  * it's completion.
2465  */
2466 int dasd_sleep_on(struct dasd_ccw_req *cqr)
2467 {
2468 	return _dasd_sleep_on(cqr, 0);
2469 }
2470 EXPORT_SYMBOL(dasd_sleep_on);
2471 
2472 /*
2473  * Start requests from a ccw_queue and wait for their completion.
2474  */
2475 int dasd_sleep_on_queue(struct list_head *ccw_queue)
2476 {
2477 	return _dasd_sleep_on_queue(ccw_queue, 0);
2478 }
2479 EXPORT_SYMBOL(dasd_sleep_on_queue);
2480 
2481 /*
2482  * Start requests from a ccw_queue and wait interruptible for their completion.
2483  */
2484 int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue)
2485 {
2486 	return _dasd_sleep_on_queue(ccw_queue, 1);
2487 }
2488 EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible);
2489 
2490 /*
2491  * Queue a request to the tail of the device ccw_queue and wait
2492  * interruptible for it's completion.
2493  */
2494 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
2495 {
2496 	return _dasd_sleep_on(cqr, 1);
2497 }
2498 EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2499 
2500 /*
2501  * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
2502  * for eckd devices) the currently running request has to be terminated
2503  * and be put back to status queued, before the special request is added
2504  * to the head of the queue. Then the special request is waited on normally.
2505  */
2506 static inline int _dasd_term_running_cqr(struct dasd_device *device)
2507 {
2508 	struct dasd_ccw_req *cqr;
2509 	int rc;
2510 
2511 	if (list_empty(&device->ccw_queue))
2512 		return 0;
2513 	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2514 	rc = device->discipline->term_IO(cqr);
2515 	if (!rc)
2516 		/*
2517 		 * CQR terminated because a more important request is pending.
2518 		 * Undo decreasing of retry counter because this is
2519 		 * not an error case.
2520 		 */
2521 		cqr->retries++;
2522 	return rc;
2523 }
2524 
2525 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
2526 {
2527 	struct dasd_device *device;
2528 	int rc;
2529 
2530 	device = cqr->startdev;
2531 	if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2532 	    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2533 		cqr->status = DASD_CQR_FAILED;
2534 		cqr->intrc = -EPERM;
2535 		return -EIO;
2536 	}
2537 	spin_lock_irq(get_ccwdev_lock(device->cdev));
2538 	rc = _dasd_term_running_cqr(device);
2539 	if (rc) {
2540 		spin_unlock_irq(get_ccwdev_lock(device->cdev));
2541 		return rc;
2542 	}
2543 	cqr->callback = dasd_wakeup_cb;
2544 	cqr->callback_data = DASD_SLEEPON_START_TAG;
2545 	cqr->status = DASD_CQR_QUEUED;
2546 	/*
2547 	 * add new request as second
2548 	 * first the terminated cqr needs to be finished
2549 	 */
2550 	list_add(&cqr->devlist, device->ccw_queue.next);
2551 
2552 	/* let the bh start the request to keep them in order */
2553 	dasd_schedule_device_bh(device);
2554 
2555 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
2556 
2557 	wait_event(generic_waitq, _wait_for_wakeup(cqr));
2558 
2559 	if (cqr->status == DASD_CQR_DONE)
2560 		rc = 0;
2561 	else if (cqr->intrc)
2562 		rc = cqr->intrc;
2563 	else
2564 		rc = -EIO;
2565 
2566 	/* kick tasklets */
2567 	dasd_schedule_device_bh(device);
2568 	if (device->block)
2569 		dasd_schedule_block_bh(device->block);
2570 
2571 	return rc;
2572 }
2573 EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2574 
2575 /*
2576  * Cancels a request that was started with dasd_sleep_on_req.
2577  * This is useful to timeout requests. The request will be
2578  * terminated if it is currently in i/o.
2579  * Returns 0 if request termination was successful
2580  *	   negative error code if termination failed
2581  * Cancellation of a request is an asynchronous operation! The calling
2582  * function has to wait until the request is properly returned via callback.
2583  */
2584 static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
2585 {
2586 	struct dasd_device *device = cqr->startdev;
2587 	int rc = 0;
2588 
2589 	switch (cqr->status) {
2590 	case DASD_CQR_QUEUED:
2591 		/* request was not started - just set to cleared */
2592 		cqr->status = DASD_CQR_CLEARED;
2593 		break;
2594 	case DASD_CQR_IN_IO:
2595 		/* request in IO - terminate IO and release again */
2596 		rc = device->discipline->term_IO(cqr);
2597 		if (rc) {
2598 			dev_err(&device->cdev->dev,
2599 				"Cancelling request failed with rc=%d\n", rc);
2600 		} else {
2601 			cqr->stopclk = get_tod_clock();
2602 		}
2603 		break;
2604 	default: /* already finished or clear pending - do nothing */
2605 		break;
2606 	}
2607 	dasd_schedule_device_bh(device);
2608 	return rc;
2609 }
2610 
2611 int dasd_cancel_req(struct dasd_ccw_req *cqr)
2612 {
2613 	struct dasd_device *device = cqr->startdev;
2614 	unsigned long flags;
2615 	int rc;
2616 
2617 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2618 	rc = __dasd_cancel_req(cqr);
2619 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2620 	return rc;
2621 }
2622 
2623 /*
2624  * SECTION: Operations of the dasd_block layer.
2625  */
2626 
2627 /*
2628  * Timeout function for dasd_block. This is used when the block layer
2629  * is waiting for something that may not come reliably, (e.g. a state
2630  * change interrupt)
2631  */
2632 static void dasd_block_timeout(struct timer_list *t)
2633 {
2634 	unsigned long flags;
2635 	struct dasd_block *block;
2636 
2637 	block = timer_container_of(block, t, timer);
2638 	spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
2639 	/* re-activate request queue */
2640 	dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
2641 	spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
2642 	dasd_schedule_block_bh(block);
2643 	blk_mq_run_hw_queues(block->gdp->queue, true);
2644 }
2645 
2646 /*
2647  * Setup timeout for a dasd_block in jiffies.
2648  */
2649 void dasd_block_set_timer(struct dasd_block *block, int expires)
2650 {
2651 	if (expires == 0)
2652 		timer_delete(&block->timer);
2653 	else
2654 		mod_timer(&block->timer, jiffies + expires);
2655 }
2656 EXPORT_SYMBOL(dasd_block_set_timer);
2657 
2658 /*
2659  * Clear timeout for a dasd_block.
2660  */
2661 void dasd_block_clear_timer(struct dasd_block *block)
2662 {
2663 	timer_delete(&block->timer);
2664 }
2665 EXPORT_SYMBOL(dasd_block_clear_timer);
2666 
2667 /*
2668  * Process finished error recovery ccw.
2669  */
2670 static void __dasd_process_erp(struct dasd_device *device,
2671 			       struct dasd_ccw_req *cqr)
2672 {
2673 	dasd_erp_fn_t erp_fn;
2674 
2675 	if (cqr->status == DASD_CQR_DONE)
2676 		DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
2677 	else
2678 		dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
2679 	erp_fn = device->discipline->erp_postaction(cqr);
2680 	erp_fn(cqr);
2681 }
2682 
2683 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
2684 {
2685 	struct request *req;
2686 	blk_status_t error = BLK_STS_OK;
2687 	unsigned int proc_bytes;
2688 	int status;
2689 
2690 	req = (struct request *) cqr->callback_data;
2691 	dasd_profile_end(cqr->block, cqr, req);
2692 
2693 	proc_bytes = cqr->proc_bytes;
2694 	status = cqr->block->base->discipline->free_cp(cqr, req);
2695 	if (status < 0)
2696 		error = errno_to_blk_status(status);
2697 	else if (status == 0) {
2698 		switch (cqr->intrc) {
2699 		case -EPERM:
2700 			/*
2701 			 * DASD doesn't implement SCSI/NVMe reservations, but it
2702 			 * implements a locking scheme similar to them. We
2703 			 * return this error when we no longer have the lock.
2704 			 */
2705 			error = BLK_STS_RESV_CONFLICT;
2706 			break;
2707 		case -ENOLINK:
2708 			error = BLK_STS_TRANSPORT;
2709 			break;
2710 		case -ETIMEDOUT:
2711 			error = BLK_STS_TIMEOUT;
2712 			break;
2713 		default:
2714 			error = BLK_STS_IOERR;
2715 			break;
2716 		}
2717 	}
2718 
2719 	/*
2720 	 * We need to take care for ETIMEDOUT errors here since the
2721 	 * complete callback does not get called in this case.
2722 	 * Take care of all errors here and avoid additional code to
2723 	 * transfer the error value to the complete callback.
2724 	 */
2725 	if (error) {
2726 		blk_mq_end_request(req, error);
2727 		blk_mq_run_hw_queues(req->q, true);
2728 	} else {
2729 		/*
2730 		 * Partial completed requests can happen with ESE devices.
2731 		 * During read we might have gotten a NRF error and have to
2732 		 * complete a request partially.
2733 		 */
2734 		if (proc_bytes) {
2735 			blk_update_request(req, BLK_STS_OK, proc_bytes);
2736 			blk_mq_requeue_request(req, true);
2737 		} else if (likely(!blk_should_fake_timeout(req->q))) {
2738 			blk_mq_complete_request(req);
2739 		}
2740 	}
2741 }
2742 
2743 /*
2744  * Process ccw request queue.
2745  */
2746 static void __dasd_process_block_ccw_queue(struct dasd_block *block,
2747 					   struct list_head *final_queue)
2748 {
2749 	struct list_head *l, *n;
2750 	struct dasd_ccw_req *cqr;
2751 	dasd_erp_fn_t erp_fn;
2752 	unsigned long flags;
2753 	struct dasd_device *base = block->base;
2754 
2755 restart:
2756 	/* Process request with final status. */
2757 	list_for_each_safe(l, n, &block->ccw_queue) {
2758 		cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2759 		if (cqr->status != DASD_CQR_DONE &&
2760 		    cqr->status != DASD_CQR_FAILED &&
2761 		    cqr->status != DASD_CQR_NEED_ERP &&
2762 		    cqr->status != DASD_CQR_TERMINATED)
2763 			continue;
2764 
2765 		if (cqr->status == DASD_CQR_TERMINATED) {
2766 			base->discipline->handle_terminated_request(cqr);
2767 			goto restart;
2768 		}
2769 
2770 		/*  Process requests that may be recovered */
2771 		if (cqr->status == DASD_CQR_NEED_ERP) {
2772 			erp_fn = base->discipline->erp_action(cqr);
2773 			if (IS_ERR(erp_fn(cqr)))
2774 				continue;
2775 			goto restart;
2776 		}
2777 
2778 		/* log sense for fatal error */
2779 		if (cqr->status == DASD_CQR_FAILED) {
2780 			dasd_log_sense(cqr, &cqr->irb);
2781 		}
2782 
2783 		/*
2784 		 * First call extended error reporting and check for autoquiesce
2785 		 */
2786 		spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
2787 		if (cqr->status == DASD_CQR_FAILED &&
2788 		    dasd_handle_autoquiesce(base, cqr, DASD_EER_FATALERROR)) {
2789 			cqr->status = DASD_CQR_FILLED;
2790 			cqr->retries = 255;
2791 			spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
2792 			goto restart;
2793 		}
2794 		spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
2795 
2796 		/* Process finished ERP request. */
2797 		if (cqr->refers) {
2798 			__dasd_process_erp(base, cqr);
2799 			goto restart;
2800 		}
2801 
2802 		/* Rechain finished requests to final queue */
2803 		cqr->endclk = get_tod_clock();
2804 		list_move_tail(&cqr->blocklist, final_queue);
2805 	}
2806 }
2807 
2808 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
2809 {
2810 	dasd_schedule_block_bh(cqr->block);
2811 }
2812 
2813 static void __dasd_block_start_head(struct dasd_block *block)
2814 {
2815 	struct dasd_ccw_req *cqr;
2816 
2817 	if (list_empty(&block->ccw_queue))
2818 		return;
2819 	/* We allways begin with the first requests on the queue, as some
2820 	 * of previously started requests have to be enqueued on a
2821 	 * dasd_device again for error recovery.
2822 	 */
2823 	list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
2824 		if (cqr->status != DASD_CQR_FILLED)
2825 			continue;
2826 		if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
2827 		    !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2828 			cqr->status = DASD_CQR_FAILED;
2829 			cqr->intrc = -EPERM;
2830 			dasd_schedule_block_bh(block);
2831 			continue;
2832 		}
2833 		/* Non-temporary stop condition will trigger fail fast */
2834 		if (block->base->stopped & ~DASD_STOPPED_PENDING &&
2835 		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2836 		    !dasd_eer_enabled(block->base) && block->base->aq_mask == 0) {
2837 			cqr->status = DASD_CQR_FAILED;
2838 			cqr->intrc = -ENOLINK;
2839 			dasd_schedule_block_bh(block);
2840 			continue;
2841 		}
2842 		/* Don't try to start requests if device is stopped */
2843 		if (block->base->stopped)
2844 			return;
2845 
2846 		/* just a fail safe check, should not happen */
2847 		if (!cqr->startdev)
2848 			cqr->startdev = block->base;
2849 
2850 		/* make sure that the requests we submit find their way back */
2851 		cqr->callback = dasd_return_cqr_cb;
2852 
2853 		dasd_add_request_tail(cqr);
2854 	}
2855 }
2856 
2857 /*
2858  * Central dasd_block layer routine. Takes requests from the generic
2859  * block layer request queue, creates ccw requests, enqueues them on
2860  * a dasd_device and processes ccw requests that have been returned.
2861  */
2862 static void dasd_block_tasklet(unsigned long data)
2863 {
2864 	struct dasd_block *block = (struct dasd_block *) data;
2865 	struct list_head final_queue;
2866 	struct list_head *l, *n;
2867 	struct dasd_ccw_req *cqr;
2868 	struct dasd_queue *dq;
2869 
2870 	atomic_set(&block->tasklet_scheduled, 0);
2871 	INIT_LIST_HEAD(&final_queue);
2872 	spin_lock_irq(&block->queue_lock);
2873 	/* Finish off requests on ccw queue */
2874 	__dasd_process_block_ccw_queue(block, &final_queue);
2875 	spin_unlock_irq(&block->queue_lock);
2876 
2877 	/* Now call the callback function of requests with final status */
2878 	list_for_each_safe(l, n, &final_queue) {
2879 		cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2880 		dq = cqr->dq;
2881 		spin_lock_irq(&dq->lock);
2882 		list_del_init(&cqr->blocklist);
2883 		__dasd_cleanup_cqr(cqr);
2884 		spin_unlock_irq(&dq->lock);
2885 	}
2886 
2887 	spin_lock_irq(&block->queue_lock);
2888 	/* Now check if the head of the ccw queue needs to be started. */
2889 	__dasd_block_start_head(block);
2890 	spin_unlock_irq(&block->queue_lock);
2891 
2892 	if (waitqueue_active(&shutdown_waitq))
2893 		wake_up(&shutdown_waitq);
2894 	dasd_put_device(block->base);
2895 }
2896 
2897 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
2898 {
2899 	wake_up(&dasd_flush_wq);
2900 }
2901 
2902 /*
2903  * Requeue a request back to the block request queue
2904  * only works for block requests
2905  */
2906 static void _dasd_requeue_request(struct dasd_ccw_req *cqr)
2907 {
2908 	struct request *req;
2909 
2910 	/*
2911 	 * If the request is an ERP request there is nothing to requeue.
2912 	 * This will be done with the remaining original request.
2913 	 */
2914 	if (cqr->refers)
2915 		return;
2916 	spin_lock_irq(&cqr->dq->lock);
2917 	req = (struct request *) cqr->callback_data;
2918 	blk_mq_requeue_request(req, true);
2919 	spin_unlock_irq(&cqr->dq->lock);
2920 
2921 	return;
2922 }
2923 
2924 static int _dasd_requests_to_flushqueue(struct dasd_block *block,
2925 					struct list_head *flush_queue)
2926 {
2927 	struct dasd_ccw_req *cqr, *n;
2928 	unsigned long flags;
2929 	int rc, i;
2930 
2931 	spin_lock_irqsave(&block->queue_lock, flags);
2932 	rc = 0;
2933 restart:
2934 	list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
2935 		/* if this request currently owned by a dasd_device cancel it */
2936 		if (cqr->status >= DASD_CQR_QUEUED)
2937 			rc = dasd_cancel_req(cqr);
2938 		if (rc < 0)
2939 			break;
2940 		/* Rechain request (including erp chain) so it won't be
2941 		 * touched by the dasd_block_tasklet anymore.
2942 		 * Replace the callback so we notice when the request
2943 		 * is returned from the dasd_device layer.
2944 		 */
2945 		cqr->callback = _dasd_wake_block_flush_cb;
2946 		for (i = 0; cqr; cqr = cqr->refers, i++)
2947 			list_move_tail(&cqr->blocklist, flush_queue);
2948 		if (i > 1)
2949 			/* moved more than one request - need to restart */
2950 			goto restart;
2951 	}
2952 	spin_unlock_irqrestore(&block->queue_lock, flags);
2953 
2954 	return rc;
2955 }
2956 
2957 /*
2958  * Go through all request on the dasd_block request queue, cancel them
2959  * on the respective dasd_device, and return them to the generic
2960  * block layer.
2961  */
2962 static int dasd_flush_block_queue(struct dasd_block *block)
2963 {
2964 	struct dasd_ccw_req *cqr, *n;
2965 	struct list_head flush_queue;
2966 	unsigned long flags;
2967 	int rc;
2968 
2969 	INIT_LIST_HEAD(&flush_queue);
2970 	rc = _dasd_requests_to_flushqueue(block, &flush_queue);
2971 
2972 	/* Now call the callback function of flushed requests */
2973 restart_cb:
2974 	list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
2975 		wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
2976 		/* Process finished ERP request. */
2977 		if (cqr->refers) {
2978 			spin_lock_bh(&block->queue_lock);
2979 			__dasd_process_erp(block->base, cqr);
2980 			spin_unlock_bh(&block->queue_lock);
2981 			/* restart list_for_xx loop since dasd_process_erp
2982 			 * might remove multiple elements */
2983 			goto restart_cb;
2984 		}
2985 		/* call the callback function */
2986 		spin_lock_irqsave(&cqr->dq->lock, flags);
2987 		cqr->endclk = get_tod_clock();
2988 		list_del_init(&cqr->blocklist);
2989 		__dasd_cleanup_cqr(cqr);
2990 		spin_unlock_irqrestore(&cqr->dq->lock, flags);
2991 	}
2992 	return rc;
2993 }
2994 
2995 /*
2996  * Schedules a call to dasd_tasklet over the device tasklet.
2997  */
2998 void dasd_schedule_block_bh(struct dasd_block *block)
2999 {
3000 	/* Protect against rescheduling. */
3001 	if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
3002 		return;
3003 	/* life cycle of block is bound to it's base device */
3004 	dasd_get_device(block->base);
3005 	tasklet_hi_schedule(&block->tasklet);
3006 }
3007 EXPORT_SYMBOL(dasd_schedule_block_bh);
3008 
3009 
3010 /*
3011  * SECTION: external block device operations
3012  * (request queue handling, open, release, etc.)
3013  */
3014 
3015 /*
3016  * Dasd request queue function. Called from ll_rw_blk.c
3017  */
3018 static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
3019 				    const struct blk_mq_queue_data *qd)
3020 {
3021 	struct dasd_block *block = hctx->queue->queuedata;
3022 	struct dasd_queue *dq = hctx->driver_data;
3023 	struct request *req = qd->rq;
3024 	struct dasd_device *basedev;
3025 	struct dasd_ccw_req *cqr;
3026 	blk_status_t rc = BLK_STS_OK;
3027 
3028 	basedev = block->base;
3029 	spin_lock_irq(&dq->lock);
3030 	if (basedev->state < DASD_STATE_READY ||
3031 	    test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) {
3032 		DBF_DEV_EVENT(DBF_ERR, basedev,
3033 			      "device not ready for request %p", req);
3034 		rc = BLK_STS_IOERR;
3035 		goto out;
3036 	}
3037 
3038 	/*
3039 	 * if device is stopped do not fetch new requests
3040 	 * except failfast is active which will let requests fail
3041 	 * immediately in __dasd_block_start_head()
3042 	 */
3043 	if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) {
3044 		DBF_DEV_EVENT(DBF_ERR, basedev,
3045 			      "device stopped request %p", req);
3046 		rc = BLK_STS_RESOURCE;
3047 		goto out;
3048 	}
3049 
3050 	if (basedev->features & DASD_FEATURE_READONLY &&
3051 	    rq_data_dir(req) == WRITE) {
3052 		DBF_DEV_EVENT(DBF_ERR, basedev,
3053 			      "Rejecting write request %p", req);
3054 		rc = BLK_STS_IOERR;
3055 		goto out;
3056 	}
3057 
3058 	if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
3059 	    (basedev->features & DASD_FEATURE_FAILFAST ||
3060 	     blk_noretry_request(req))) {
3061 		DBF_DEV_EVENT(DBF_ERR, basedev,
3062 			      "Rejecting failfast request %p", req);
3063 		rc = BLK_STS_IOERR;
3064 		goto out;
3065 	}
3066 
3067 	cqr = basedev->discipline->build_cp(basedev, block, req);
3068 	if (IS_ERR(cqr)) {
3069 		if (PTR_ERR(cqr) == -EBUSY ||
3070 		    PTR_ERR(cqr) == -ENOMEM ||
3071 		    PTR_ERR(cqr) == -EAGAIN) {
3072 			rc = BLK_STS_RESOURCE;
3073 		} else if (PTR_ERR(cqr) == -EINVAL) {
3074 			rc = BLK_STS_INVAL;
3075 		} else {
3076 			DBF_DEV_EVENT(DBF_ERR, basedev,
3077 				      "CCW creation failed (rc=%ld) on request %p",
3078 				      PTR_ERR(cqr), req);
3079 			rc = BLK_STS_IOERR;
3080 		}
3081 		goto out;
3082 	}
3083 	/*
3084 	 *  Note: callback is set to dasd_return_cqr_cb in
3085 	 * __dasd_block_start_head to cover erp requests as well
3086 	 */
3087 	cqr->callback_data = req;
3088 	cqr->status = DASD_CQR_FILLED;
3089 	cqr->dq = dq;
3090 
3091 	blk_mq_start_request(req);
3092 	spin_lock(&block->queue_lock);
3093 	list_add_tail(&cqr->blocklist, &block->ccw_queue);
3094 	INIT_LIST_HEAD(&cqr->devlist);
3095 	dasd_profile_start(block, cqr, req);
3096 	dasd_schedule_block_bh(block);
3097 	spin_unlock(&block->queue_lock);
3098 
3099 out:
3100 	spin_unlock_irq(&dq->lock);
3101 	return rc;
3102 }
3103 
3104 /*
3105  * Block timeout callback, called from the block layer
3106  *
3107  * Return values:
3108  * BLK_EH_RESET_TIMER if the request should be left running
3109  * BLK_EH_DONE if the request is handled or terminated
3110  *		      by the driver.
3111  */
3112 enum blk_eh_timer_return dasd_times_out(struct request *req)
3113 {
3114 	struct dasd_block *block = req->q->queuedata;
3115 	struct dasd_device *device;
3116 	struct dasd_ccw_req *cqr;
3117 	unsigned long flags;
3118 	int rc = 0;
3119 
3120 	cqr = blk_mq_rq_to_pdu(req);
3121 	if (!cqr)
3122 		return BLK_EH_DONE;
3123 
3124 	spin_lock_irqsave(&cqr->dq->lock, flags);
3125 	device = cqr->startdev ? cqr->startdev : block->base;
3126 	if (!device->blk_timeout) {
3127 		spin_unlock_irqrestore(&cqr->dq->lock, flags);
3128 		return BLK_EH_RESET_TIMER;
3129 	}
3130 	DBF_DEV_EVENT(DBF_WARNING, device,
3131 		      " dasd_times_out cqr %p status %x",
3132 		      cqr, cqr->status);
3133 
3134 	spin_lock(&block->queue_lock);
3135 	spin_lock(get_ccwdev_lock(device->cdev));
3136 	cqr->retries = -1;
3137 	cqr->intrc = -ETIMEDOUT;
3138 	if (cqr->status >= DASD_CQR_QUEUED) {
3139 		rc = __dasd_cancel_req(cqr);
3140 	} else if (cqr->status == DASD_CQR_FILLED ||
3141 		   cqr->status == DASD_CQR_NEED_ERP) {
3142 		cqr->status = DASD_CQR_TERMINATED;
3143 	} else if (cqr->status == DASD_CQR_IN_ERP) {
3144 		struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
3145 
3146 		list_for_each_entry_safe(searchcqr, nextcqr,
3147 					 &block->ccw_queue, blocklist) {
3148 			tmpcqr = searchcqr;
3149 			while (tmpcqr->refers)
3150 				tmpcqr = tmpcqr->refers;
3151 			if (tmpcqr != cqr)
3152 				continue;
3153 			/* searchcqr is an ERP request for cqr */
3154 			searchcqr->retries = -1;
3155 			searchcqr->intrc = -ETIMEDOUT;
3156 			if (searchcqr->status >= DASD_CQR_QUEUED) {
3157 				rc = __dasd_cancel_req(searchcqr);
3158 			} else if ((searchcqr->status == DASD_CQR_FILLED) ||
3159 				   (searchcqr->status == DASD_CQR_NEED_ERP)) {
3160 				searchcqr->status = DASD_CQR_TERMINATED;
3161 				rc = 0;
3162 			} else if (searchcqr->status == DASD_CQR_IN_ERP) {
3163 				/*
3164 				 * Shouldn't happen; most recent ERP
3165 				 * request is at the front of queue
3166 				 */
3167 				continue;
3168 			}
3169 			break;
3170 		}
3171 	}
3172 	spin_unlock(get_ccwdev_lock(device->cdev));
3173 	dasd_schedule_block_bh(block);
3174 	spin_unlock(&block->queue_lock);
3175 	spin_unlock_irqrestore(&cqr->dq->lock, flags);
3176 
3177 	return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE;
3178 }
3179 
3180 static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
3181 			  unsigned int idx)
3182 {
3183 	struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL);
3184 
3185 	if (!dq)
3186 		return -ENOMEM;
3187 
3188 	spin_lock_init(&dq->lock);
3189 	hctx->driver_data = dq;
3190 
3191 	return 0;
3192 }
3193 
3194 static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
3195 {
3196 	kfree(hctx->driver_data);
3197 	hctx->driver_data = NULL;
3198 }
3199 
3200 static void dasd_request_done(struct request *req)
3201 {
3202 	blk_mq_end_request(req, 0);
3203 	blk_mq_run_hw_queues(req->q, true);
3204 }
3205 
3206 struct blk_mq_ops dasd_mq_ops = {
3207 	.queue_rq = do_dasd_request,
3208 	.complete = dasd_request_done,
3209 	.timeout = dasd_times_out,
3210 	.init_hctx = dasd_init_hctx,
3211 	.exit_hctx = dasd_exit_hctx,
3212 };
3213 
3214 static int dasd_open(struct gendisk *disk, blk_mode_t mode)
3215 {
3216 	struct dasd_device *base;
3217 	int rc;
3218 
3219 	base = dasd_device_from_gendisk(disk);
3220 	if (!base)
3221 		return -ENODEV;
3222 
3223 	atomic_inc(&base->block->open_count);
3224 	if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
3225 		rc = -ENODEV;
3226 		goto unlock;
3227 	}
3228 
3229 	if (!try_module_get(base->discipline->owner)) {
3230 		rc = -EINVAL;
3231 		goto unlock;
3232 	}
3233 
3234 	if (dasd_probeonly) {
3235 		dev_info(&base->cdev->dev,
3236 			 "Accessing the DASD failed because it is in "
3237 			 "probeonly mode\n");
3238 		rc = -EPERM;
3239 		goto out;
3240 	}
3241 
3242 	if (base->state <= DASD_STATE_BASIC) {
3243 		DBF_DEV_EVENT(DBF_ERR, base, " %s",
3244 			      " Cannot open unrecognized device");
3245 		rc = -ENODEV;
3246 		goto out;
3247 	}
3248 	if ((mode & BLK_OPEN_WRITE) &&
3249 	    (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
3250 	     (base->features & DASD_FEATURE_READONLY))) {
3251 		rc = -EROFS;
3252 		goto out;
3253 	}
3254 	dasd_put_device(base);
3255 	return 0;
3256 
3257 out:
3258 	module_put(base->discipline->owner);
3259 unlock:
3260 	atomic_dec(&base->block->open_count);
3261 	dasd_put_device(base);
3262 	return rc;
3263 }
3264 
3265 static void dasd_release(struct gendisk *disk)
3266 {
3267 	struct dasd_device *base = dasd_device_from_gendisk(disk);
3268 	if (base) {
3269 		atomic_dec(&base->block->open_count);
3270 		module_put(base->discipline->owner);
3271 		dasd_put_device(base);
3272 	}
3273 }
3274 
3275 /*
3276  * Return disk geometry.
3277  */
3278 static int dasd_getgeo(struct gendisk *disk, struct hd_geometry *geo)
3279 {
3280 	struct dasd_device *base;
3281 
3282 	base = dasd_device_from_gendisk(disk);
3283 	if (!base)
3284 		return -ENODEV;
3285 
3286 	if (!base->discipline ||
3287 	    !base->discipline->fill_geometry) {
3288 		dasd_put_device(base);
3289 		return -EINVAL;
3290 	}
3291 	base->discipline->fill_geometry(base->block, geo);
3292 	// geo->start is left unchanged by the above
3293 	geo->start >>= base->block->s2b_shift;
3294 	dasd_put_device(base);
3295 	return 0;
3296 }
3297 
3298 const struct block_device_operations
3299 dasd_device_operations = {
3300 	.owner		= THIS_MODULE,
3301 	.open		= dasd_open,
3302 	.release	= dasd_release,
3303 	.ioctl		= dasd_ioctl,
3304 	.getgeo		= dasd_getgeo,
3305 	.set_read_only	= dasd_set_read_only,
3306 };
3307 
3308 /*******************************************************************************
3309  * end of block device operations
3310  */
3311 
3312 static void
3313 dasd_exit(void)
3314 {
3315 #ifdef CONFIG_PROC_FS
3316 	dasd_proc_exit();
3317 #endif
3318 	dasd_eer_exit();
3319 	kmem_cache_destroy(dasd_page_cache);
3320 	dasd_page_cache = NULL;
3321 	dasd_gendisk_exit();
3322 	dasd_devmap_exit();
3323 	if (dasd_debug_area != NULL) {
3324 		debug_unregister(dasd_debug_area);
3325 		dasd_debug_area = NULL;
3326 	}
3327 	dasd_statistics_removeroot();
3328 }
3329 
3330 /*
3331  * SECTION: common functions for ccw_driver use
3332  */
3333 
3334 /*
3335  * Is the device read-only?
3336  * Note that this function does not report the setting of the
3337  * readonly device attribute, but how it is configured in z/VM.
3338  */
3339 int dasd_device_is_ro(struct dasd_device *device)
3340 {
3341 	struct ccw_dev_id dev_id;
3342 	struct diag210 diag_data;
3343 	int rc;
3344 
3345 	if (!machine_is_vm())
3346 		return 0;
3347 	ccw_device_get_id(device->cdev, &dev_id);
3348 	memset(&diag_data, 0, sizeof(diag_data));
3349 	diag_data.vrdcdvno = dev_id.devno;
3350 	diag_data.vrdclen = sizeof(diag_data);
3351 	rc = diag210(&diag_data);
3352 	if (rc == 0 || rc == 2) {
3353 		return diag_data.vrdcvfla & 0x80;
3354 	} else {
3355 		DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
3356 			  dev_id.devno, rc);
3357 		return 0;
3358 	}
3359 }
3360 EXPORT_SYMBOL_GPL(dasd_device_is_ro);
3361 
3362 static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
3363 {
3364 	struct ccw_device *cdev = data;
3365 	int ret;
3366 
3367 	ret = ccw_device_set_online(cdev);
3368 	if (ret)
3369 		dev_warn(&cdev->dev, "Setting the DASD online failed with rc=%d\n", ret);
3370 }
3371 
3372 /*
3373  * Initial attempt at a probe function. this can be simplified once
3374  * the other detection code is gone.
3375  */
3376 int dasd_generic_probe(struct ccw_device *cdev)
3377 {
3378 	cdev->handler = &dasd_int_handler;
3379 
3380 	/*
3381 	 * Automatically online either all dasd devices (dasd_autodetect)
3382 	 * or all devices specified with dasd= parameters during
3383 	 * initial probe.
3384 	 */
3385 	if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
3386 	    (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
3387 		async_schedule(dasd_generic_auto_online, cdev);
3388 	return 0;
3389 }
3390 EXPORT_SYMBOL_GPL(dasd_generic_probe);
3391 
3392 void dasd_generic_free_discipline(struct dasd_device *device)
3393 {
3394 	/* Forget the discipline information. */
3395 	if (device->discipline) {
3396 		if (device->discipline->uncheck_device)
3397 			device->discipline->uncheck_device(device);
3398 		module_put(device->discipline->owner);
3399 		device->discipline = NULL;
3400 	}
3401 	if (device->base_discipline) {
3402 		module_put(device->base_discipline->owner);
3403 		device->base_discipline = NULL;
3404 	}
3405 }
3406 EXPORT_SYMBOL_GPL(dasd_generic_free_discipline);
3407 
3408 /*
3409  * This will one day be called from a global not_oper handler.
3410  * It is also used by driver_unregister during module unload.
3411  */
3412 void dasd_generic_remove(struct ccw_device *cdev)
3413 {
3414 	struct dasd_device *device;
3415 	struct dasd_block *block;
3416 
3417 	device = dasd_device_from_cdev(cdev);
3418 	if (IS_ERR(device))
3419 		return;
3420 
3421 	if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3422 	    !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3423 		/* Already doing offline processing */
3424 		dasd_put_device(device);
3425 		return;
3426 	}
3427 	/*
3428 	 * This device is removed unconditionally. Set offline
3429 	 * flag to prevent dasd_open from opening it while it is
3430 	 * no quite down yet.
3431 	 */
3432 	dasd_set_target_state(device, DASD_STATE_NEW);
3433 	cdev->handler = NULL;
3434 	/* dasd_delete_device destroys the device reference. */
3435 	block = device->block;
3436 	dasd_delete_device(device);
3437 	/*
3438 	 * life cycle of block is bound to device, so delete it after
3439 	 * device was safely removed
3440 	 */
3441 	if (block)
3442 		dasd_free_block(block);
3443 }
3444 EXPORT_SYMBOL_GPL(dasd_generic_remove);
3445 
3446 /*
3447  * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
3448  * the device is detected for the first time and is supposed to be used
3449  * or the user has started activation through sysfs.
3450  */
3451 int dasd_generic_set_online(struct ccw_device *cdev,
3452 			    struct dasd_discipline *base_discipline)
3453 {
3454 	struct dasd_discipline *discipline;
3455 	struct dasd_device *device;
3456 	struct device *dev;
3457 	int rc;
3458 
3459 	dev = &cdev->dev;
3460 
3461 	/* first online clears initial online feature flag */
3462 	dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
3463 	device = dasd_create_device(cdev);
3464 	if (IS_ERR(device))
3465 		return PTR_ERR(device);
3466 
3467 	discipline = base_discipline;
3468 	if (device->features & DASD_FEATURE_USEDIAG) {
3469 	  	if (!dasd_diag_discipline_pointer) {
3470 			/* Try to load the required module. */
3471 			rc = request_module(DASD_DIAG_MOD);
3472 			if (rc) {
3473 				dev_warn(dev, "Setting the DASD online failed "
3474 					 "because the required module %s "
3475 					 "could not be loaded (rc=%d)\n",
3476 					 DASD_DIAG_MOD, rc);
3477 				dasd_delete_device(device);
3478 				return -ENODEV;
3479 			}
3480 		}
3481 		/* Module init could have failed, so check again here after
3482 		 * request_module(). */
3483 		if (!dasd_diag_discipline_pointer) {
3484 			dev_warn(dev, "Setting the DASD online failed because of missing DIAG discipline\n");
3485 			dasd_delete_device(device);
3486 			return -ENODEV;
3487 		}
3488 		discipline = dasd_diag_discipline_pointer;
3489 	}
3490 	if (!try_module_get(base_discipline->owner)) {
3491 		dasd_delete_device(device);
3492 		return -EINVAL;
3493 	}
3494 	device->base_discipline = base_discipline;
3495 	if (!try_module_get(discipline->owner)) {
3496 		dasd_delete_device(device);
3497 		return -EINVAL;
3498 	}
3499 	device->discipline = discipline;
3500 
3501 	/* check_device will allocate block device if necessary */
3502 	rc = discipline->check_device(device);
3503 	if (rc) {
3504 		dev_warn(dev, "Setting the DASD online with discipline %s failed with rc=%i\n",
3505 			 discipline->name, rc);
3506 		dasd_delete_device(device);
3507 		return rc;
3508 	}
3509 
3510 	dasd_set_target_state(device, DASD_STATE_ONLINE);
3511 	if (device->state <= DASD_STATE_KNOWN) {
3512 		dev_warn(dev, "Setting the DASD online failed because of a missing discipline\n");
3513 		rc = -ENODEV;
3514 		dasd_set_target_state(device, DASD_STATE_NEW);
3515 		if (device->block)
3516 			dasd_free_block(device->block);
3517 		dasd_delete_device(device);
3518 	} else {
3519 		dev_dbg(dev, "dasd_generic device found\n");
3520 	}
3521 
3522 	wait_event(dasd_init_waitq, _wait_for_device(device));
3523 
3524 	dasd_put_device(device);
3525 	return rc;
3526 }
3527 EXPORT_SYMBOL_GPL(dasd_generic_set_online);
3528 
3529 int dasd_generic_set_offline(struct ccw_device *cdev)
3530 {
3531 	int max_count, open_count, rc;
3532 	struct dasd_device *device;
3533 	struct dasd_block *block;
3534 	unsigned long flags;
3535 	struct device *dev;
3536 
3537 	dev = &cdev->dev;
3538 
3539 	rc = 0;
3540 	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3541 	device = dasd_device_from_cdev_locked(cdev);
3542 	if (IS_ERR(device)) {
3543 		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3544 		return PTR_ERR(device);
3545 	}
3546 
3547 	/*
3548 	 * We must make sure that this device is currently not in use.
3549 	 * The open_count is increased for every opener, that includes
3550 	 * the blkdev_get in dasd_scan_partitions. We are only interested
3551 	 * in the other openers.
3552 	 */
3553 	if (device->block) {
3554 		max_count = device->block->bdev_file ? 0 : -1;
3555 		open_count = atomic_read(&device->block->open_count);
3556 		if (open_count > max_count) {
3557 			if (open_count > 0)
3558 				dev_warn(dev, "The DASD cannot be set offline with open count %i\n",
3559 					 open_count);
3560 			else
3561 				dev_warn(dev, "The DASD cannot be set offline while it is in use\n");
3562 			rc = -EBUSY;
3563 			goto out_err;
3564 		}
3565 	}
3566 
3567 	/*
3568 	 * Test if the offline processing is already running and exit if so.
3569 	 * If a safe offline is being processed this could only be a normal
3570 	 * offline that should be able to overtake the safe offline and
3571 	 * cancel any I/O we do not want to wait for any longer
3572 	 */
3573 	if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
3574 		if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3575 			clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING,
3576 				  &device->flags);
3577 		} else {
3578 			rc = -EBUSY;
3579 			goto out_err;
3580 		}
3581 	}
3582 	set_bit(DASD_FLAG_OFFLINE, &device->flags);
3583 
3584 	/*
3585 	 * if safe_offline is called set safe_offline_running flag and
3586 	 * clear safe_offline so that a call to normal offline
3587 	 * can overrun safe_offline processing
3588 	 */
3589 	if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
3590 	    !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3591 		/* need to unlock here to wait for outstanding I/O */
3592 		spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3593 		/*
3594 		 * If we want to set the device safe offline all IO operations
3595 		 * should be finished before continuing the offline process
3596 		 * so sync bdev first and then wait for our queues to become
3597 		 * empty
3598 		 */
3599 		if (device->block && device->block->bdev_file)
3600 			bdev_mark_dead(file_bdev(device->block->bdev_file), false);
3601 		dasd_schedule_device_bh(device);
3602 		rc = wait_event_interruptible(shutdown_waitq,
3603 					      _wait_for_empty_queues(device));
3604 		if (rc != 0)
3605 			goto interrupted;
3606 
3607 		/*
3608 		 * check if a normal offline process overtook the offline
3609 		 * processing in this case simply do nothing beside returning
3610 		 * that we got interrupted
3611 		 * otherwise mark safe offline as not running any longer and
3612 		 * continue with normal offline
3613 		 */
3614 		spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3615 		if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3616 			rc = -ERESTARTSYS;
3617 			goto out_err;
3618 		}
3619 		clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3620 	}
3621 	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3622 
3623 	dasd_set_target_state(device, DASD_STATE_NEW);
3624 	/* dasd_delete_device destroys the device reference. */
3625 	block = device->block;
3626 	dasd_delete_device(device);
3627 	/*
3628 	 * life cycle of block is bound to device, so delete it after
3629 	 * device was safely removed
3630 	 */
3631 	if (block)
3632 		dasd_free_block(block);
3633 
3634 	return 0;
3635 
3636 interrupted:
3637 	/* interrupted by signal */
3638 	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3639 	clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3640 	clear_bit(DASD_FLAG_OFFLINE, &device->flags);
3641 out_err:
3642 	dasd_put_device(device);
3643 	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3644 	return rc;
3645 }
3646 EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
3647 
3648 int dasd_generic_last_path_gone(struct dasd_device *device)
3649 {
3650 	struct dasd_ccw_req *cqr;
3651 
3652 	dev_warn(&device->cdev->dev, "No operational channel path is left "
3653 		 "for the device\n");
3654 	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
3655 	/* First call extended error reporting and check for autoquiesce. */
3656 	dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH);
3657 
3658 	if (device->state < DASD_STATE_BASIC)
3659 		return 0;
3660 	/* Device is active. We want to keep it. */
3661 	list_for_each_entry(cqr, &device->ccw_queue, devlist)
3662 		if ((cqr->status == DASD_CQR_IN_IO) ||
3663 		    (cqr->status == DASD_CQR_CLEAR_PENDING)) {
3664 			cqr->status = DASD_CQR_QUEUED;
3665 			cqr->retries++;
3666 		}
3667 	dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
3668 	dasd_device_clear_timer(device);
3669 	dasd_schedule_device_bh(device);
3670 	return 1;
3671 }
3672 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
3673 
3674 int dasd_generic_path_operational(struct dasd_device *device)
3675 {
3676 	dev_info(&device->cdev->dev, "A channel path to the device has become "
3677 		 "operational\n");
3678 	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
3679 	dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
3680 	dasd_schedule_device_bh(device);
3681 	if (device->block) {
3682 		dasd_schedule_block_bh(device->block);
3683 		if (device->block->gdp)
3684 			blk_mq_run_hw_queues(device->block->gdp->queue, true);
3685 	}
3686 
3687 	if (!device->stopped)
3688 		wake_up(&generic_waitq);
3689 
3690 	return 1;
3691 }
3692 EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
3693 
3694 int dasd_generic_notify(struct ccw_device *cdev, int event)
3695 {
3696 	struct dasd_device *device;
3697 	int ret;
3698 
3699 	device = dasd_device_from_cdev_locked(cdev);
3700 	if (IS_ERR(device))
3701 		return 0;
3702 	ret = 0;
3703 	switch (event) {
3704 	case CIO_GONE:
3705 	case CIO_BOXED:
3706 	case CIO_NO_PATH:
3707 		dasd_path_no_path(device);
3708 		ret = dasd_generic_last_path_gone(device);
3709 		break;
3710 	case CIO_OPER:
3711 		ret = 1;
3712 		if (dasd_path_get_opm(device))
3713 			ret = dasd_generic_path_operational(device);
3714 		break;
3715 	}
3716 	dasd_put_device(device);
3717 	return ret;
3718 }
3719 EXPORT_SYMBOL_GPL(dasd_generic_notify);
3720 
3721 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
3722 {
3723 	struct dasd_device *device;
3724 	int chp, oldopm, hpfpm, ifccpm;
3725 
3726 	device = dasd_device_from_cdev_locked(cdev);
3727 	if (IS_ERR(device))
3728 		return;
3729 
3730 	oldopm = dasd_path_get_opm(device);
3731 	for (chp = 0; chp < 8; chp++) {
3732 		if (path_event[chp] & PE_PATH_GONE) {
3733 			dasd_path_notoper(device, chp);
3734 		}
3735 		if (path_event[chp] & PE_PATH_AVAILABLE) {
3736 			dasd_path_available(device, chp);
3737 			dasd_schedule_device_bh(device);
3738 		}
3739 		if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
3740 			if (!dasd_path_is_operational(device, chp) &&
3741 			    !dasd_path_need_verify(device, chp)) {
3742 				/*
3743 				 * we can not establish a pathgroup on an
3744 				 * unavailable path, so trigger a path
3745 				 * verification first
3746 				 */
3747 			dasd_path_available(device, chp);
3748 			dasd_schedule_device_bh(device);
3749 			}
3750 			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3751 				      "Pathgroup re-established\n");
3752 			if (device->discipline->kick_validate)
3753 				device->discipline->kick_validate(device);
3754 		}
3755 		if (path_event[chp] & PE_PATH_FCES_EVENT) {
3756 			dasd_path_fcsec_update(device, chp);
3757 			dasd_schedule_device_bh(device);
3758 		}
3759 	}
3760 	hpfpm = dasd_path_get_hpfpm(device);
3761 	ifccpm = dasd_path_get_ifccpm(device);
3762 	if (!dasd_path_get_opm(device) && hpfpm) {
3763 		/*
3764 		 * device has no operational paths but at least one path is
3765 		 * disabled due to HPF errors
3766 		 * disable HPF at all and use the path(s) again
3767 		 */
3768 		if (device->discipline->disable_hpf)
3769 			device->discipline->disable_hpf(device);
3770 		dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
3771 		dasd_path_set_tbvpm(device, hpfpm);
3772 		dasd_schedule_device_bh(device);
3773 		dasd_schedule_requeue(device);
3774 	} else if (!dasd_path_get_opm(device) && ifccpm) {
3775 		/*
3776 		 * device has no operational paths but at least one path is
3777 		 * disabled due to IFCC errors
3778 		 * trigger path verification on paths with IFCC errors
3779 		 */
3780 		dasd_path_set_tbvpm(device, ifccpm);
3781 		dasd_schedule_device_bh(device);
3782 	}
3783 	if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) {
3784 		dev_warn(&device->cdev->dev,
3785 			 "No verified channel paths remain for the device\n");
3786 		DBF_DEV_EVENT(DBF_WARNING, device,
3787 			      "%s", "last verified path gone");
3788 		/* First call extended error reporting and check for autoquiesce. */
3789 		dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH);
3790 		dasd_device_set_stop_bits(device,
3791 					  DASD_STOPPED_DC_WAIT);
3792 	}
3793 	dasd_put_device(device);
3794 }
3795 EXPORT_SYMBOL_GPL(dasd_generic_path_event);
3796 
3797 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
3798 {
3799 	if (!dasd_path_get_opm(device) && lpm) {
3800 		dasd_path_set_opm(device, lpm);
3801 		dasd_generic_path_operational(device);
3802 	} else
3803 		dasd_path_add_opm(device, lpm);
3804 	return 0;
3805 }
3806 EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
3807 
3808 void dasd_generic_space_exhaust(struct dasd_device *device,
3809 				struct dasd_ccw_req *cqr)
3810 {
3811 	/* First call extended error reporting and check for autoquiesce. */
3812 	dasd_handle_autoquiesce(device, NULL, DASD_EER_NOSPC);
3813 
3814 	if (device->state < DASD_STATE_BASIC)
3815 		return;
3816 
3817 	if (cqr->status == DASD_CQR_IN_IO ||
3818 	    cqr->status == DASD_CQR_CLEAR_PENDING) {
3819 		cqr->status = DASD_CQR_QUEUED;
3820 		cqr->retries++;
3821 	}
3822 	dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC);
3823 	dasd_device_clear_timer(device);
3824 	dasd_schedule_device_bh(device);
3825 }
3826 EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust);
3827 
3828 void dasd_generic_space_avail(struct dasd_device *device)
3829 {
3830 	dev_info(&device->cdev->dev, "Extent pool space is available\n");
3831 	DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available");
3832 
3833 	dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC);
3834 	dasd_schedule_device_bh(device);
3835 
3836 	if (device->block) {
3837 		dasd_schedule_block_bh(device->block);
3838 		if (device->block->gdp)
3839 			blk_mq_run_hw_queues(device->block->gdp->queue, true);
3840 	}
3841 	if (!device->stopped)
3842 		wake_up(&generic_waitq);
3843 }
3844 EXPORT_SYMBOL_GPL(dasd_generic_space_avail);
3845 
3846 /*
3847  * clear active requests and requeue them to block layer if possible
3848  */
3849 int dasd_generic_requeue_all_requests(struct dasd_device *device)
3850 {
3851 	struct dasd_block *block = device->block;
3852 	struct list_head requeue_queue;
3853 	struct dasd_ccw_req *cqr, *n;
3854 	int rc;
3855 
3856 	if (!block)
3857 		return 0;
3858 
3859 	INIT_LIST_HEAD(&requeue_queue);
3860 	rc = _dasd_requests_to_flushqueue(block, &requeue_queue);
3861 
3862 	/* Now call the callback function of flushed requests */
3863 restart_cb:
3864 	list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) {
3865 		wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
3866 		/* Process finished ERP request. */
3867 		if (cqr->refers) {
3868 			spin_lock_bh(&block->queue_lock);
3869 			__dasd_process_erp(block->base, cqr);
3870 			spin_unlock_bh(&block->queue_lock);
3871 			/* restart list_for_xx loop since dasd_process_erp
3872 			 * might remove multiple elements
3873 			 */
3874 			goto restart_cb;
3875 		}
3876 		_dasd_requeue_request(cqr);
3877 		list_del_init(&cqr->blocklist);
3878 		cqr->block->base->discipline->free_cp(
3879 			cqr, (struct request *) cqr->callback_data);
3880 	}
3881 	dasd_schedule_device_bh(device);
3882 	return rc;
3883 }
3884 EXPORT_SYMBOL_GPL(dasd_generic_requeue_all_requests);
3885 
3886 static void do_requeue_requests(struct work_struct *work)
3887 {
3888 	struct dasd_device *device = container_of(work, struct dasd_device,
3889 						  requeue_requests);
3890 	dasd_generic_requeue_all_requests(device);
3891 	dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC);
3892 	if (device->block)
3893 		dasd_schedule_block_bh(device->block);
3894 	dasd_put_device(device);
3895 }
3896 
3897 void dasd_schedule_requeue(struct dasd_device *device)
3898 {
3899 	dasd_get_device(device);
3900 	/* queue call to dasd_reload_device to the kernel event daemon. */
3901 	if (!schedule_work(&device->requeue_requests))
3902 		dasd_put_device(device);
3903 }
3904 EXPORT_SYMBOL(dasd_schedule_requeue);
3905 
3906 static int dasd_handle_autoquiesce(struct dasd_device *device,
3907 				   struct dasd_ccw_req *cqr,
3908 				   unsigned int reason)
3909 {
3910 	/* in any case write eer message with reason */
3911 	if (dasd_eer_enabled(device))
3912 		dasd_eer_write(device, cqr, reason);
3913 
3914 	if (!test_bit(reason, &device->aq_mask))
3915 		return 0;
3916 
3917 	/* notify eer about autoquiesce */
3918 	if (dasd_eer_enabled(device))
3919 		dasd_eer_write(device, NULL, DASD_EER_AUTOQUIESCE);
3920 
3921 	dev_info(&device->cdev->dev,
3922 		 "The DASD has been put in the quiesce state\n");
3923 	dasd_device_set_stop_bits(device, DASD_STOPPED_QUIESCE);
3924 
3925 	if (device->features & DASD_FEATURE_REQUEUEQUIESCE)
3926 		dasd_schedule_requeue(device);
3927 
3928 	return 1;
3929 }
3930 
3931 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
3932 						   int rdc_buffer_size,
3933 						   int magic)
3934 {
3935 	struct dasd_ccw_req *cqr;
3936 	struct ccw1 *ccw;
3937 
3938 	cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
3939 				   NULL);
3940 
3941 	if (IS_ERR(cqr)) {
3942 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
3943 				"Could not allocate RDC request");
3944 		return cqr;
3945 	}
3946 
3947 	ccw = cqr->cpaddr;
3948 	ccw->cmd_code = CCW_CMD_RDC;
3949 	ccw->cda = virt_to_dma32(cqr->data);
3950 	ccw->flags = 0;
3951 	ccw->count = rdc_buffer_size;
3952 	cqr->startdev = device;
3953 	cqr->memdev = device;
3954 	cqr->expires = 10*HZ;
3955 	cqr->retries = 256;
3956 	cqr->buildclk = get_tod_clock();
3957 	cqr->status = DASD_CQR_FILLED;
3958 	return cqr;
3959 }
3960 
3961 
3962 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
3963 				void *rdc_buffer, int rdc_buffer_size)
3964 {
3965 	int ret;
3966 	struct dasd_ccw_req *cqr;
3967 
3968 	cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic);
3969 	if (IS_ERR(cqr))
3970 		return PTR_ERR(cqr);
3971 
3972 	ret = dasd_sleep_on(cqr);
3973 	if (ret == 0)
3974 		memcpy(rdc_buffer, cqr->data, rdc_buffer_size);
3975 	dasd_sfree_request(cqr, cqr->memdev);
3976 	return ret;
3977 }
3978 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
3979 
3980 /*
3981  *   In command mode and transport mode we need to look for sense
3982  *   data in different places. The sense data itself is allways
3983  *   an array of 32 bytes, so we can unify the sense data access
3984  *   for both modes.
3985  */
3986 char *dasd_get_sense(struct irb *irb)
3987 {
3988 	struct tsb *tsb = NULL;
3989 	char *sense = NULL;
3990 
3991 	if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
3992 		if (irb->scsw.tm.tcw)
3993 			tsb = tcw_get_tsb(dma32_to_virt(irb->scsw.tm.tcw));
3994 		if (tsb && tsb->length == 64 && tsb->flags)
3995 			switch (tsb->flags & 0x07) {
3996 			case 1:	/* tsa_iostat */
3997 				sense = tsb->tsa.iostat.sense;
3998 				break;
3999 			case 2: /* tsa_ddpc */
4000 				sense = tsb->tsa.ddpc.sense;
4001 				break;
4002 			default:
4003 				/* currently we don't use interrogate data */
4004 				break;
4005 			}
4006 	} else if (irb->esw.esw0.erw.cons) {
4007 		sense = irb->ecw;
4008 	}
4009 	return sense;
4010 }
4011 EXPORT_SYMBOL_GPL(dasd_get_sense);
4012 
4013 void dasd_generic_shutdown(struct ccw_device *cdev)
4014 {
4015 	struct dasd_device *device;
4016 
4017 	device = dasd_device_from_cdev(cdev);
4018 	if (IS_ERR(device))
4019 		return;
4020 
4021 	if (device->block)
4022 		dasd_schedule_block_bh(device->block);
4023 
4024 	dasd_schedule_device_bh(device);
4025 
4026 	wait_event(shutdown_waitq, _wait_for_empty_queues(device));
4027 }
4028 EXPORT_SYMBOL_GPL(dasd_generic_shutdown);
4029 
4030 static int __init dasd_init(void)
4031 {
4032 	int rc;
4033 
4034 	init_waitqueue_head(&dasd_init_waitq);
4035 	init_waitqueue_head(&dasd_flush_wq);
4036 	init_waitqueue_head(&generic_waitq);
4037 	init_waitqueue_head(&shutdown_waitq);
4038 
4039 	/* register 'common' DASD debug area, used for all DBF_XXX calls */
4040 	dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
4041 	if (dasd_debug_area == NULL) {
4042 		rc = -ENOMEM;
4043 		goto failed;
4044 	}
4045 	debug_register_view(dasd_debug_area, &debug_sprintf_view);
4046 	debug_set_level(dasd_debug_area, DBF_WARNING);
4047 
4048 	DBF_EVENT(DBF_EMERG, "%s", "debug area created");
4049 
4050 	dasd_diag_discipline_pointer = NULL;
4051 
4052 	dasd_statistics_createroot();
4053 
4054 	rc = dasd_devmap_init();
4055 	if (rc)
4056 		goto failed;
4057 	rc = dasd_gendisk_init();
4058 	if (rc)
4059 		goto failed;
4060 	rc = dasd_parse();
4061 	if (rc)
4062 		goto failed;
4063 	rc = dasd_eer_init();
4064 	if (rc)
4065 		goto failed;
4066 #ifdef CONFIG_PROC_FS
4067 	rc = dasd_proc_init();
4068 	if (rc)
4069 		goto failed;
4070 #endif
4071 
4072 	return 0;
4073 failed:
4074 	pr_info("The DASD device driver could not be initialized\n");
4075 	dasd_exit();
4076 	return rc;
4077 }
4078 
4079 module_init(dasd_init);
4080 module_exit(dasd_exit);
4081