1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * Copyright IBM Corp. 1999, 2009
9 */
10
11 #include <linux/export.h>
12 #include <linux/kmod.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/ctype.h>
16 #include <linux/major.h>
17 #include <linux/slab.h>
18 #include <linux/hdreg.h>
19 #include <linux/async.h>
20 #include <linux/mutex.h>
21 #include <linux/debugfs.h>
22 #include <linux/seq_file.h>
23 #include <linux/vmalloc.h>
24
25 #include <asm/machine.h>
26 #include <asm/ccwdev.h>
27 #include <asm/ebcdic.h>
28 #include <asm/idals.h>
29 #include <asm/itcw.h>
30 #include <asm/diag.h>
31
32 #include "dasd_int.h"
33 /*
34 * SECTION: Constant definitions to be used within this file
35 */
36 #define DASD_CHANQ_MAX_SIZE 4
37
38 #define DASD_DIAG_MOD "dasd_diag_mod"
39
40 /*
41 * SECTION: exported variables of dasd.c
42 */
43 debug_info_t *dasd_debug_area;
44 EXPORT_SYMBOL(dasd_debug_area);
45 static struct dentry *dasd_debugfs_root_entry;
46 struct dasd_discipline *dasd_diag_discipline_pointer;
47 EXPORT_SYMBOL(dasd_diag_discipline_pointer);
48 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
49
50 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
51 MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
52 " Copyright IBM Corp. 2000");
53 MODULE_LICENSE("GPL");
54
55 /*
56 * SECTION: prototypes for static functions of dasd.c
57 */
58 static int dasd_flush_block_queue(struct dasd_block *);
59 static void dasd_device_tasklet(unsigned long);
60 static void dasd_block_tasklet(unsigned long);
61 static void do_kick_device(struct work_struct *);
62 static void do_reload_device(struct work_struct *);
63 static void do_requeue_requests(struct work_struct *);
64 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
65 static void dasd_device_timeout(struct timer_list *);
66 static void dasd_block_timeout(struct timer_list *);
67 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
68 static void dasd_profile_init(struct dasd_profile *, struct dentry *);
69 static void dasd_profile_exit(struct dasd_profile *);
70 static void dasd_hosts_init(struct dentry *, struct dasd_device *);
71 static void dasd_hosts_exit(struct dasd_device *);
72 static int dasd_handle_autoquiesce(struct dasd_device *, struct dasd_ccw_req *,
73 unsigned int);
74 /*
75 * SECTION: Operations on the device structure.
76 */
77 static wait_queue_head_t dasd_init_waitq;
78 static wait_queue_head_t dasd_flush_wq;
79 static wait_queue_head_t generic_waitq;
80 static wait_queue_head_t shutdown_waitq;
81
82 /*
83 * Allocate memory for a new device structure.
84 */
dasd_alloc_device(void)85 struct dasd_device *dasd_alloc_device(void)
86 {
87 struct dasd_device *device;
88
89 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
90 if (!device)
91 return ERR_PTR(-ENOMEM);
92
93 /* Get two pages for normal block device operations. */
94 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
95 if (!device->ccw_mem) {
96 kfree(device);
97 return ERR_PTR(-ENOMEM);
98 }
99 /* Get one page for error recovery. */
100 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
101 if (!device->erp_mem) {
102 free_pages((unsigned long) device->ccw_mem, 1);
103 kfree(device);
104 return ERR_PTR(-ENOMEM);
105 }
106 /* Get two pages for ese format. */
107 device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
108 if (!device->ese_mem) {
109 free_page((unsigned long) device->erp_mem);
110 free_pages((unsigned long) device->ccw_mem, 1);
111 kfree(device);
112 return ERR_PTR(-ENOMEM);
113 }
114
115 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
116 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
117 dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2);
118 spin_lock_init(&device->mem_lock);
119 atomic_set(&device->tasklet_scheduled, 0);
120 tasklet_init(&device->tasklet, dasd_device_tasklet,
121 (unsigned long) device);
122 INIT_LIST_HEAD(&device->ccw_queue);
123 timer_setup(&device->timer, dasd_device_timeout, 0);
124 INIT_WORK(&device->kick_work, do_kick_device);
125 INIT_WORK(&device->reload_device, do_reload_device);
126 INIT_WORK(&device->requeue_requests, do_requeue_requests);
127 device->state = DASD_STATE_NEW;
128 device->target = DASD_STATE_NEW;
129 mutex_init(&device->state_mutex);
130 spin_lock_init(&device->profile.lock);
131 return device;
132 }
133
134 /*
135 * Free memory of a device structure.
136 */
dasd_free_device(struct dasd_device * device)137 void dasd_free_device(struct dasd_device *device)
138 {
139 kfree(device->private);
140 free_pages((unsigned long) device->ese_mem, 1);
141 free_page((unsigned long) device->erp_mem);
142 free_pages((unsigned long) device->ccw_mem, 1);
143 kfree(device);
144 }
145
146 /*
147 * Allocate memory for a new device structure.
148 */
dasd_alloc_block(void)149 struct dasd_block *dasd_alloc_block(void)
150 {
151 struct dasd_block *block;
152
153 block = kzalloc(sizeof(*block), GFP_ATOMIC);
154 if (!block)
155 return ERR_PTR(-ENOMEM);
156 /* open_count = 0 means device online but not in use */
157 atomic_set(&block->open_count, -1);
158
159 atomic_set(&block->tasklet_scheduled, 0);
160 tasklet_init(&block->tasklet, dasd_block_tasklet,
161 (unsigned long) block);
162 INIT_LIST_HEAD(&block->ccw_queue);
163 spin_lock_init(&block->queue_lock);
164 INIT_LIST_HEAD(&block->format_list);
165 spin_lock_init(&block->format_lock);
166 timer_setup(&block->timer, dasd_block_timeout, 0);
167 spin_lock_init(&block->profile.lock);
168
169 return block;
170 }
171 EXPORT_SYMBOL_GPL(dasd_alloc_block);
172
173 /*
174 * Free memory of a device structure.
175 */
dasd_free_block(struct dasd_block * block)176 void dasd_free_block(struct dasd_block *block)
177 {
178 kfree(block);
179 }
180 EXPORT_SYMBOL_GPL(dasd_free_block);
181
182 /*
183 * Make a new device known to the system.
184 */
dasd_state_new_to_known(struct dasd_device * device)185 static int dasd_state_new_to_known(struct dasd_device *device)
186 {
187 /*
188 * As long as the device is not in state DASD_STATE_NEW we want to
189 * keep the reference count > 0.
190 */
191 dasd_get_device(device);
192 device->state = DASD_STATE_KNOWN;
193 return 0;
194 }
195
196 /*
197 * Let the system forget about a device.
198 */
dasd_state_known_to_new(struct dasd_device * device)199 static int dasd_state_known_to_new(struct dasd_device *device)
200 {
201 /* Disable extended error reporting for this device. */
202 dasd_eer_disable(device);
203 device->state = DASD_STATE_NEW;
204
205 /* Give up reference we took in dasd_state_new_to_known. */
206 dasd_put_device(device);
207 return 0;
208 }
209
dasd_debugfs_setup(const char * name,struct dentry * base_dentry)210 static struct dentry *dasd_debugfs_setup(const char *name,
211 struct dentry *base_dentry)
212 {
213 struct dentry *pde;
214
215 if (!base_dentry)
216 return NULL;
217 pde = debugfs_create_dir(name, base_dentry);
218 if (!pde || IS_ERR(pde))
219 return NULL;
220 return pde;
221 }
222
223 /*
224 * Request the irq line for the device.
225 */
dasd_state_known_to_basic(struct dasd_device * device)226 static int dasd_state_known_to_basic(struct dasd_device *device)
227 {
228 struct dasd_block *block = device->block;
229 int rc = 0;
230
231 /* Allocate and register gendisk structure. */
232 if (block) {
233 rc = dasd_gendisk_alloc(block);
234 if (rc)
235 return rc;
236 block->debugfs_dentry =
237 dasd_debugfs_setup(block->gdp->disk_name,
238 dasd_debugfs_root_entry);
239 dasd_profile_init(&block->profile, block->debugfs_dentry);
240 if (dasd_global_profile_level == DASD_PROFILE_ON)
241 dasd_profile_on(&device->block->profile);
242 }
243 device->debugfs_dentry =
244 dasd_debugfs_setup(dev_name(&device->cdev->dev),
245 dasd_debugfs_root_entry);
246 dasd_profile_init(&device->profile, device->debugfs_dentry);
247 dasd_hosts_init(device->debugfs_dentry, device);
248
249 /* register 'device' debug area, used for all DBF_DEV_XXX calls */
250 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
251 8 * sizeof(long));
252 debug_register_view(device->debug_area, &debug_sprintf_view);
253 debug_set_level(device->debug_area, DBF_WARNING);
254 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
255
256 device->state = DASD_STATE_BASIC;
257
258 return rc;
259 }
260
261 /*
262 * Release the irq line for the device. Terminate any running i/o.
263 */
dasd_state_basic_to_known(struct dasd_device * device)264 static int dasd_state_basic_to_known(struct dasd_device *device)
265 {
266 int rc;
267
268 if (device->discipline->basic_to_known) {
269 rc = device->discipline->basic_to_known(device);
270 if (rc)
271 return rc;
272 }
273
274 if (device->block) {
275 dasd_profile_exit(&device->block->profile);
276 debugfs_remove(device->block->debugfs_dentry);
277 dasd_gendisk_free(device->block);
278 dasd_block_clear_timer(device->block);
279 }
280 rc = dasd_flush_device_queue(device);
281 if (rc)
282 return rc;
283 dasd_device_clear_timer(device);
284 dasd_profile_exit(&device->profile);
285 dasd_hosts_exit(device);
286 debugfs_remove(device->debugfs_dentry);
287 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
288 if (device->debug_area != NULL) {
289 debug_unregister(device->debug_area);
290 device->debug_area = NULL;
291 }
292 device->state = DASD_STATE_KNOWN;
293 return 0;
294 }
295
296 /*
297 * Do the initial analysis. The do_analysis function may return
298 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
299 * until the discipline decides to continue the startup sequence
300 * by calling the function dasd_change_state. The eckd disciplines
301 * uses this to start a ccw that detects the format. The completion
302 * interrupt for this detection ccw uses the kernel event daemon to
303 * trigger the call to dasd_change_state. All this is done in the
304 * discipline code, see dasd_eckd.c.
305 * After the analysis ccw is done (do_analysis returned 0) the block
306 * device is setup.
307 * In case the analysis returns an error, the device setup is stopped
308 * (a fake disk was already added to allow formatting).
309 */
dasd_state_basic_to_ready(struct dasd_device * device)310 static int dasd_state_basic_to_ready(struct dasd_device *device)
311 {
312 struct dasd_block *block = device->block;
313 struct queue_limits lim;
314 int rc = 0;
315
316 /* make disk known with correct capacity */
317 if (!block) {
318 device->state = DASD_STATE_READY;
319 goto out;
320 }
321
322 if (block->base->discipline->do_analysis != NULL)
323 rc = block->base->discipline->do_analysis(block);
324 if (rc) {
325 if (rc == -EAGAIN)
326 return rc;
327 device->state = DASD_STATE_UNFMT;
328 kobject_uevent(&disk_to_dev(device->block->gdp)->kobj,
329 KOBJ_CHANGE);
330 goto out;
331 }
332
333 lim = queue_limits_start_update(block->gdp->queue);
334 lim.max_dev_sectors = device->discipline->max_sectors(block);
335 lim.max_hw_sectors = lim.max_dev_sectors;
336 lim.logical_block_size = block->bp_block;
337 /*
338 * Adjust dma_alignment to match block_size - 1
339 * to ensure proper buffer alignment checks in the block layer.
340 */
341 lim.dma_alignment = lim.logical_block_size - 1;
342
343 if (device->discipline->has_discard) {
344 unsigned int max_bytes;
345
346 lim.discard_granularity = block->bp_block;
347
348 /* Calculate max_discard_sectors and make it PAGE aligned */
349 max_bytes = USHRT_MAX * block->bp_block;
350 max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE);
351
352 lim.max_hw_discard_sectors = max_bytes / block->bp_block;
353 lim.max_write_zeroes_sectors = lim.max_hw_discard_sectors;
354 }
355 rc = queue_limits_commit_update(block->gdp->queue, &lim);
356 if (rc)
357 return rc;
358
359 set_capacity(block->gdp, block->blocks << block->s2b_shift);
360 device->state = DASD_STATE_READY;
361
362 rc = dasd_scan_partitions(block);
363 if (rc) {
364 device->state = DASD_STATE_BASIC;
365 return rc;
366 }
367
368 out:
369 if (device->discipline->basic_to_ready)
370 rc = device->discipline->basic_to_ready(device);
371 return rc;
372 }
373
374 static inline
_wait_for_empty_queues(struct dasd_device * device)375 int _wait_for_empty_queues(struct dasd_device *device)
376 {
377 if (device->block)
378 return list_empty(&device->ccw_queue) &&
379 list_empty(&device->block->ccw_queue);
380 else
381 return list_empty(&device->ccw_queue);
382 }
383
384 /*
385 * Remove device from block device layer. Destroy dirty buffers.
386 * Forget format information. Check if the target level is basic
387 * and if it is create fake disk for formatting.
388 */
dasd_state_ready_to_basic(struct dasd_device * device)389 static int dasd_state_ready_to_basic(struct dasd_device *device)
390 {
391 int rc;
392
393 device->state = DASD_STATE_BASIC;
394 if (device->block) {
395 struct dasd_block *block = device->block;
396 rc = dasd_flush_block_queue(block);
397 if (rc) {
398 device->state = DASD_STATE_READY;
399 return rc;
400 }
401 dasd_destroy_partitions(block);
402 block->blocks = 0;
403 block->bp_block = 0;
404 block->s2b_shift = 0;
405 }
406 return 0;
407 }
408
409 /*
410 * Back to basic.
411 */
dasd_state_unfmt_to_basic(struct dasd_device * device)412 static int dasd_state_unfmt_to_basic(struct dasd_device *device)
413 {
414 device->state = DASD_STATE_BASIC;
415 return 0;
416 }
417
418 /*
419 * Make the device online and schedule the bottom half to start
420 * the requeueing of requests from the linux request queue to the
421 * ccw queue.
422 */
423 static int
dasd_state_ready_to_online(struct dasd_device * device)424 dasd_state_ready_to_online(struct dasd_device * device)
425 {
426 device->state = DASD_STATE_ONLINE;
427 if (device->block) {
428 dasd_schedule_block_bh(device->block);
429 if ((device->features & DASD_FEATURE_USERAW)) {
430 kobject_uevent(&disk_to_dev(device->block->gdp)->kobj,
431 KOBJ_CHANGE);
432 return 0;
433 }
434 disk_uevent(file_bdev(device->block->bdev_file)->bd_disk,
435 KOBJ_CHANGE);
436 }
437 return 0;
438 }
439
440 /*
441 * Stop the requeueing of requests again.
442 */
dasd_state_online_to_ready(struct dasd_device * device)443 static int dasd_state_online_to_ready(struct dasd_device *device)
444 {
445 int rc;
446
447 if (device->discipline->online_to_ready) {
448 rc = device->discipline->online_to_ready(device);
449 if (rc)
450 return rc;
451 }
452
453 device->state = DASD_STATE_READY;
454 if (device->block && !(device->features & DASD_FEATURE_USERAW))
455 disk_uevent(file_bdev(device->block->bdev_file)->bd_disk,
456 KOBJ_CHANGE);
457 return 0;
458 }
459
460 /*
461 * Device startup state changes.
462 */
dasd_increase_state(struct dasd_device * device)463 static int dasd_increase_state(struct dasd_device *device)
464 {
465 int rc;
466
467 rc = 0;
468 if (device->state == DASD_STATE_NEW &&
469 device->target >= DASD_STATE_KNOWN)
470 rc = dasd_state_new_to_known(device);
471
472 if (!rc &&
473 device->state == DASD_STATE_KNOWN &&
474 device->target >= DASD_STATE_BASIC)
475 rc = dasd_state_known_to_basic(device);
476
477 if (!rc &&
478 device->state == DASD_STATE_BASIC &&
479 device->target >= DASD_STATE_READY)
480 rc = dasd_state_basic_to_ready(device);
481
482 if (!rc &&
483 device->state == DASD_STATE_UNFMT &&
484 device->target > DASD_STATE_UNFMT)
485 rc = -EPERM;
486
487 if (!rc &&
488 device->state == DASD_STATE_READY &&
489 device->target >= DASD_STATE_ONLINE)
490 rc = dasd_state_ready_to_online(device);
491
492 return rc;
493 }
494
495 /*
496 * Device shutdown state changes.
497 */
dasd_decrease_state(struct dasd_device * device)498 static int dasd_decrease_state(struct dasd_device *device)
499 {
500 int rc;
501
502 rc = 0;
503 if (device->state == DASD_STATE_ONLINE &&
504 device->target <= DASD_STATE_READY)
505 rc = dasd_state_online_to_ready(device);
506
507 if (!rc &&
508 device->state == DASD_STATE_READY &&
509 device->target <= DASD_STATE_BASIC)
510 rc = dasd_state_ready_to_basic(device);
511
512 if (!rc &&
513 device->state == DASD_STATE_UNFMT &&
514 device->target <= DASD_STATE_BASIC)
515 rc = dasd_state_unfmt_to_basic(device);
516
517 if (!rc &&
518 device->state == DASD_STATE_BASIC &&
519 device->target <= DASD_STATE_KNOWN)
520 rc = dasd_state_basic_to_known(device);
521
522 if (!rc &&
523 device->state == DASD_STATE_KNOWN &&
524 device->target <= DASD_STATE_NEW)
525 rc = dasd_state_known_to_new(device);
526
527 return rc;
528 }
529
530 /*
531 * This is the main startup/shutdown routine.
532 */
dasd_change_state(struct dasd_device * device)533 static void dasd_change_state(struct dasd_device *device)
534 {
535 int rc;
536
537 if (device->state == device->target)
538 /* Already where we want to go today... */
539 return;
540 if (device->state < device->target)
541 rc = dasd_increase_state(device);
542 else
543 rc = dasd_decrease_state(device);
544 if (rc == -EAGAIN)
545 return;
546 if (rc)
547 device->target = device->state;
548
549 /* let user-space know that the device status changed */
550 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
551
552 if (device->state == device->target)
553 wake_up(&dasd_init_waitq);
554 }
555
556 /*
557 * Kick starter for devices that did not complete the startup/shutdown
558 * procedure or were sleeping because of a pending state.
559 * dasd_kick_device will schedule a call do do_kick_device to the kernel
560 * event daemon.
561 */
do_kick_device(struct work_struct * work)562 static void do_kick_device(struct work_struct *work)
563 {
564 struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
565 mutex_lock(&device->state_mutex);
566 dasd_change_state(device);
567 mutex_unlock(&device->state_mutex);
568 dasd_schedule_device_bh(device);
569 dasd_put_device(device);
570 }
571
dasd_kick_device(struct dasd_device * device)572 void dasd_kick_device(struct dasd_device *device)
573 {
574 dasd_get_device(device);
575 /* queue call to dasd_kick_device to the kernel event daemon. */
576 if (!schedule_work(&device->kick_work))
577 dasd_put_device(device);
578 }
579 EXPORT_SYMBOL(dasd_kick_device);
580
581 /*
582 * dasd_reload_device will schedule a call do do_reload_device to the kernel
583 * event daemon.
584 */
do_reload_device(struct work_struct * work)585 static void do_reload_device(struct work_struct *work)
586 {
587 struct dasd_device *device = container_of(work, struct dasd_device,
588 reload_device);
589 device->discipline->reload(device);
590 dasd_put_device(device);
591 }
592
dasd_reload_device(struct dasd_device * device)593 void dasd_reload_device(struct dasd_device *device)
594 {
595 dasd_get_device(device);
596 /* queue call to dasd_reload_device to the kernel event daemon. */
597 if (!schedule_work(&device->reload_device))
598 dasd_put_device(device);
599 }
600 EXPORT_SYMBOL(dasd_reload_device);
601
602 /*
603 * Set the target state for a device and starts the state change.
604 */
dasd_set_target_state(struct dasd_device * device,int target)605 void dasd_set_target_state(struct dasd_device *device, int target)
606 {
607 dasd_get_device(device);
608 mutex_lock(&device->state_mutex);
609 /* If we are in probeonly mode stop at DASD_STATE_READY. */
610 if (dasd_probeonly && target > DASD_STATE_READY)
611 target = DASD_STATE_READY;
612 if (device->target != target) {
613 if (device->state == target)
614 wake_up(&dasd_init_waitq);
615 device->target = target;
616 }
617 if (device->state != device->target)
618 dasd_change_state(device);
619 mutex_unlock(&device->state_mutex);
620 dasd_put_device(device);
621 }
622
623 /*
624 * Enable devices with device numbers in [from..to].
625 */
_wait_for_device(struct dasd_device * device)626 static inline int _wait_for_device(struct dasd_device *device)
627 {
628 return (device->state == device->target);
629 }
630
dasd_enable_device(struct dasd_device * device)631 void dasd_enable_device(struct dasd_device *device)
632 {
633 dasd_set_target_state(device, DASD_STATE_ONLINE);
634 if (device->state <= DASD_STATE_KNOWN)
635 /* No discipline for device found. */
636 dasd_set_target_state(device, DASD_STATE_NEW);
637 /* Now wait for the devices to come up. */
638 wait_event(dasd_init_waitq, _wait_for_device(device));
639
640 dasd_reload_device(device);
641 if (device->discipline->kick_validate)
642 device->discipline->kick_validate(device);
643 }
644 EXPORT_SYMBOL(dasd_enable_device);
645
646 /*
647 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
648 */
649
650 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF;
651
652 #ifdef CONFIG_DASD_PROFILE
653 struct dasd_profile dasd_global_profile = {
654 .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock),
655 };
656 static struct dentry *dasd_debugfs_global_entry;
657
658 /*
659 * Add profiling information for cqr before execution.
660 */
dasd_profile_start(struct dasd_block * block,struct dasd_ccw_req * cqr,struct request * req)661 static void dasd_profile_start(struct dasd_block *block,
662 struct dasd_ccw_req *cqr,
663 struct request *req)
664 {
665 struct list_head *l;
666 unsigned int counter;
667 struct dasd_device *device;
668
669 /* count the length of the chanq for statistics */
670 counter = 0;
671 if (dasd_global_profile_level || block->profile.data)
672 list_for_each(l, &block->ccw_queue)
673 if (++counter >= 31)
674 break;
675
676 spin_lock(&dasd_global_profile.lock);
677 if (dasd_global_profile.data) {
678 dasd_global_profile.data->dasd_io_nr_req[counter]++;
679 if (rq_data_dir(req) == READ)
680 dasd_global_profile.data->dasd_read_nr_req[counter]++;
681 }
682 spin_unlock(&dasd_global_profile.lock);
683
684 spin_lock(&block->profile.lock);
685 if (block->profile.data) {
686 block->profile.data->dasd_io_nr_req[counter]++;
687 if (rq_data_dir(req) == READ)
688 block->profile.data->dasd_read_nr_req[counter]++;
689 }
690 spin_unlock(&block->profile.lock);
691
692 /*
693 * We count the request for the start device, even though it may run on
694 * some other device due to error recovery. This way we make sure that
695 * we count each request only once.
696 */
697 device = cqr->startdev;
698 if (!device->profile.data)
699 return;
700
701 spin_lock(get_ccwdev_lock(device->cdev));
702 counter = 1; /* request is not yet queued on the start device */
703 list_for_each(l, &device->ccw_queue)
704 if (++counter >= 31)
705 break;
706 spin_unlock(get_ccwdev_lock(device->cdev));
707
708 spin_lock(&device->profile.lock);
709 device->profile.data->dasd_io_nr_req[counter]++;
710 if (rq_data_dir(req) == READ)
711 device->profile.data->dasd_read_nr_req[counter]++;
712 spin_unlock(&device->profile.lock);
713 }
714
715 /*
716 * Add profiling information for cqr after execution.
717 */
718
719 #define dasd_profile_counter(value, index) \
720 { \
721 for (index = 0; index < 31 && value >> (2+index); index++) \
722 ; \
723 }
724
dasd_profile_end_add_data(struct dasd_profile_info * data,int is_alias,int is_tpm,int is_read,long sectors,int sectors_ind,int tottime_ind,int tottimeps_ind,int strtime_ind,int irqtime_ind,int irqtimeps_ind,int endtime_ind)725 static void dasd_profile_end_add_data(struct dasd_profile_info *data,
726 int is_alias,
727 int is_tpm,
728 int is_read,
729 long sectors,
730 int sectors_ind,
731 int tottime_ind,
732 int tottimeps_ind,
733 int strtime_ind,
734 int irqtime_ind,
735 int irqtimeps_ind,
736 int endtime_ind)
737 {
738 /* in case of an overflow, reset the whole profile */
739 if (data->dasd_io_reqs == UINT_MAX) {
740 memset(data, 0, sizeof(*data));
741 ktime_get_real_ts64(&data->starttod);
742 }
743 data->dasd_io_reqs++;
744 data->dasd_io_sects += sectors;
745 if (is_alias)
746 data->dasd_io_alias++;
747 if (is_tpm)
748 data->dasd_io_tpm++;
749
750 data->dasd_io_secs[sectors_ind]++;
751 data->dasd_io_times[tottime_ind]++;
752 data->dasd_io_timps[tottimeps_ind]++;
753 data->dasd_io_time1[strtime_ind]++;
754 data->dasd_io_time2[irqtime_ind]++;
755 data->dasd_io_time2ps[irqtimeps_ind]++;
756 data->dasd_io_time3[endtime_ind]++;
757
758 if (is_read) {
759 data->dasd_read_reqs++;
760 data->dasd_read_sects += sectors;
761 if (is_alias)
762 data->dasd_read_alias++;
763 if (is_tpm)
764 data->dasd_read_tpm++;
765 data->dasd_read_secs[sectors_ind]++;
766 data->dasd_read_times[tottime_ind]++;
767 data->dasd_read_time1[strtime_ind]++;
768 data->dasd_read_time2[irqtime_ind]++;
769 data->dasd_read_time3[endtime_ind]++;
770 }
771 }
772
dasd_profile_end(struct dasd_block * block,struct dasd_ccw_req * cqr,struct request * req)773 static void dasd_profile_end(struct dasd_block *block,
774 struct dasd_ccw_req *cqr,
775 struct request *req)
776 {
777 unsigned long strtime, irqtime, endtime, tottime;
778 unsigned long tottimeps, sectors;
779 struct dasd_device *device;
780 int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind;
781 int irqtime_ind, irqtimeps_ind, endtime_ind;
782 struct dasd_profile_info *data;
783
784 device = cqr->startdev;
785 if (!(dasd_global_profile_level ||
786 block->profile.data ||
787 device->profile.data))
788 return;
789
790 sectors = blk_rq_sectors(req);
791 if (!cqr->buildclk || !cqr->startclk ||
792 !cqr->stopclk || !cqr->endclk ||
793 !sectors)
794 return;
795
796 strtime = ((cqr->startclk - cqr->buildclk) >> 12);
797 irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
798 endtime = ((cqr->endclk - cqr->stopclk) >> 12);
799 tottime = ((cqr->endclk - cqr->buildclk) >> 12);
800 tottimeps = tottime / sectors;
801
802 dasd_profile_counter(sectors, sectors_ind);
803 dasd_profile_counter(tottime, tottime_ind);
804 dasd_profile_counter(tottimeps, tottimeps_ind);
805 dasd_profile_counter(strtime, strtime_ind);
806 dasd_profile_counter(irqtime, irqtime_ind);
807 dasd_profile_counter(irqtime / sectors, irqtimeps_ind);
808 dasd_profile_counter(endtime, endtime_ind);
809
810 spin_lock(&dasd_global_profile.lock);
811 if (dasd_global_profile.data) {
812 data = dasd_global_profile.data;
813 data->dasd_sum_times += tottime;
814 data->dasd_sum_time_str += strtime;
815 data->dasd_sum_time_irq += irqtime;
816 data->dasd_sum_time_end += endtime;
817 dasd_profile_end_add_data(dasd_global_profile.data,
818 cqr->startdev != block->base,
819 cqr->cpmode == 1,
820 rq_data_dir(req) == READ,
821 sectors, sectors_ind, tottime_ind,
822 tottimeps_ind, strtime_ind,
823 irqtime_ind, irqtimeps_ind,
824 endtime_ind);
825 }
826 spin_unlock(&dasd_global_profile.lock);
827
828 spin_lock(&block->profile.lock);
829 if (block->profile.data) {
830 data = block->profile.data;
831 data->dasd_sum_times += tottime;
832 data->dasd_sum_time_str += strtime;
833 data->dasd_sum_time_irq += irqtime;
834 data->dasd_sum_time_end += endtime;
835 dasd_profile_end_add_data(block->profile.data,
836 cqr->startdev != block->base,
837 cqr->cpmode == 1,
838 rq_data_dir(req) == READ,
839 sectors, sectors_ind, tottime_ind,
840 tottimeps_ind, strtime_ind,
841 irqtime_ind, irqtimeps_ind,
842 endtime_ind);
843 }
844 spin_unlock(&block->profile.lock);
845
846 spin_lock(&device->profile.lock);
847 if (device->profile.data) {
848 data = device->profile.data;
849 data->dasd_sum_times += tottime;
850 data->dasd_sum_time_str += strtime;
851 data->dasd_sum_time_irq += irqtime;
852 data->dasd_sum_time_end += endtime;
853 dasd_profile_end_add_data(device->profile.data,
854 cqr->startdev != block->base,
855 cqr->cpmode == 1,
856 rq_data_dir(req) == READ,
857 sectors, sectors_ind, tottime_ind,
858 tottimeps_ind, strtime_ind,
859 irqtime_ind, irqtimeps_ind,
860 endtime_ind);
861 }
862 spin_unlock(&device->profile.lock);
863 }
864
dasd_profile_reset(struct dasd_profile * profile)865 void dasd_profile_reset(struct dasd_profile *profile)
866 {
867 struct dasd_profile_info *data;
868
869 spin_lock_bh(&profile->lock);
870 data = profile->data;
871 if (!data) {
872 spin_unlock_bh(&profile->lock);
873 return;
874 }
875 memset(data, 0, sizeof(*data));
876 ktime_get_real_ts64(&data->starttod);
877 spin_unlock_bh(&profile->lock);
878 }
879
dasd_profile_on(struct dasd_profile * profile)880 int dasd_profile_on(struct dasd_profile *profile)
881 {
882 struct dasd_profile_info *data;
883
884 data = kzalloc(sizeof(*data), GFP_KERNEL);
885 if (!data)
886 return -ENOMEM;
887 spin_lock_bh(&profile->lock);
888 if (profile->data) {
889 spin_unlock_bh(&profile->lock);
890 kfree(data);
891 return 0;
892 }
893 ktime_get_real_ts64(&data->starttod);
894 profile->data = data;
895 spin_unlock_bh(&profile->lock);
896 return 0;
897 }
898
dasd_profile_off(struct dasd_profile * profile)899 void dasd_profile_off(struct dasd_profile *profile)
900 {
901 spin_lock_bh(&profile->lock);
902 kfree(profile->data);
903 profile->data = NULL;
904 spin_unlock_bh(&profile->lock);
905 }
906
dasd_get_user_string(const char __user * user_buf,size_t user_len)907 char *dasd_get_user_string(const char __user *user_buf, size_t user_len)
908 {
909 char *buffer;
910
911 buffer = vmalloc(user_len + 1);
912 if (buffer == NULL)
913 return ERR_PTR(-ENOMEM);
914 if (copy_from_user(buffer, user_buf, user_len) != 0) {
915 vfree(buffer);
916 return ERR_PTR(-EFAULT);
917 }
918 /* got the string, now strip linefeed. */
919 if (buffer[user_len - 1] == '\n')
920 buffer[user_len - 1] = 0;
921 else
922 buffer[user_len] = 0;
923 return buffer;
924 }
925
dasd_stats_write(struct file * file,const char __user * user_buf,size_t user_len,loff_t * pos)926 static ssize_t dasd_stats_write(struct file *file,
927 const char __user *user_buf,
928 size_t user_len, loff_t *pos)
929 {
930 char *buffer, *str;
931 int rc;
932 struct seq_file *m = (struct seq_file *)file->private_data;
933 struct dasd_profile *prof = m->private;
934
935 if (user_len > 65536)
936 user_len = 65536;
937 buffer = dasd_get_user_string(user_buf, user_len);
938 if (IS_ERR(buffer))
939 return PTR_ERR(buffer);
940
941 str = skip_spaces(buffer);
942 rc = user_len;
943 if (strncmp(str, "reset", 5) == 0) {
944 dasd_profile_reset(prof);
945 } else if (strncmp(str, "on", 2) == 0) {
946 rc = dasd_profile_on(prof);
947 if (rc)
948 goto out;
949 rc = user_len;
950 if (prof == &dasd_global_profile) {
951 dasd_profile_reset(prof);
952 dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY;
953 }
954 } else if (strncmp(str, "off", 3) == 0) {
955 if (prof == &dasd_global_profile)
956 dasd_global_profile_level = DASD_PROFILE_OFF;
957 dasd_profile_off(prof);
958 } else
959 rc = -EINVAL;
960 out:
961 vfree(buffer);
962 return rc;
963 }
964
dasd_stats_array(struct seq_file * m,unsigned int * array)965 static void dasd_stats_array(struct seq_file *m, unsigned int *array)
966 {
967 int i;
968
969 for (i = 0; i < 32; i++)
970 seq_printf(m, "%u ", array[i]);
971 seq_putc(m, '\n');
972 }
973
dasd_stats_seq_print(struct seq_file * m,struct dasd_profile_info * data)974 static void dasd_stats_seq_print(struct seq_file *m,
975 struct dasd_profile_info *data)
976 {
977 seq_printf(m, "start_time %ptSp\n", &data->starttod);
978 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs);
979 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
980 seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
981 seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm);
982 seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ?
983 data->dasd_sum_times / data->dasd_io_reqs : 0UL);
984 seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ?
985 data->dasd_sum_time_str / data->dasd_io_reqs : 0UL);
986 seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ?
987 data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL);
988 seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ?
989 data->dasd_sum_time_end / data->dasd_io_reqs : 0UL);
990 seq_puts(m, "histogram_sectors ");
991 dasd_stats_array(m, data->dasd_io_secs);
992 seq_puts(m, "histogram_io_times ");
993 dasd_stats_array(m, data->dasd_io_times);
994 seq_puts(m, "histogram_io_times_weighted ");
995 dasd_stats_array(m, data->dasd_io_timps);
996 seq_puts(m, "histogram_time_build_to_ssch ");
997 dasd_stats_array(m, data->dasd_io_time1);
998 seq_puts(m, "histogram_time_ssch_to_irq ");
999 dasd_stats_array(m, data->dasd_io_time2);
1000 seq_puts(m, "histogram_time_ssch_to_irq_weighted ");
1001 dasd_stats_array(m, data->dasd_io_time2ps);
1002 seq_puts(m, "histogram_time_irq_to_end ");
1003 dasd_stats_array(m, data->dasd_io_time3);
1004 seq_puts(m, "histogram_ccw_queue_length ");
1005 dasd_stats_array(m, data->dasd_io_nr_req);
1006 seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs);
1007 seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects);
1008 seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias);
1009 seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm);
1010 seq_puts(m, "histogram_read_sectors ");
1011 dasd_stats_array(m, data->dasd_read_secs);
1012 seq_puts(m, "histogram_read_times ");
1013 dasd_stats_array(m, data->dasd_read_times);
1014 seq_puts(m, "histogram_read_time_build_to_ssch ");
1015 dasd_stats_array(m, data->dasd_read_time1);
1016 seq_puts(m, "histogram_read_time_ssch_to_irq ");
1017 dasd_stats_array(m, data->dasd_read_time2);
1018 seq_puts(m, "histogram_read_time_irq_to_end ");
1019 dasd_stats_array(m, data->dasd_read_time3);
1020 seq_puts(m, "histogram_read_ccw_queue_length ");
1021 dasd_stats_array(m, data->dasd_read_nr_req);
1022 }
1023
dasd_stats_show(struct seq_file * m,void * v)1024 static int dasd_stats_show(struct seq_file *m, void *v)
1025 {
1026 struct dasd_profile *profile;
1027 struct dasd_profile_info *data;
1028
1029 profile = m->private;
1030 spin_lock_bh(&profile->lock);
1031 data = profile->data;
1032 if (!data) {
1033 spin_unlock_bh(&profile->lock);
1034 seq_puts(m, "disabled\n");
1035 return 0;
1036 }
1037 dasd_stats_seq_print(m, data);
1038 spin_unlock_bh(&profile->lock);
1039 return 0;
1040 }
1041
dasd_stats_open(struct inode * inode,struct file * file)1042 static int dasd_stats_open(struct inode *inode, struct file *file)
1043 {
1044 struct dasd_profile *profile = inode->i_private;
1045 return single_open(file, dasd_stats_show, profile);
1046 }
1047
1048 static const struct file_operations dasd_stats_raw_fops = {
1049 .owner = THIS_MODULE,
1050 .open = dasd_stats_open,
1051 .read = seq_read,
1052 .llseek = seq_lseek,
1053 .release = single_release,
1054 .write = dasd_stats_write,
1055 };
1056
dasd_profile_init(struct dasd_profile * profile,struct dentry * base_dentry)1057 static void dasd_profile_init(struct dasd_profile *profile,
1058 struct dentry *base_dentry)
1059 {
1060 umode_t mode;
1061 struct dentry *pde;
1062
1063 if (!base_dentry)
1064 return;
1065 profile->dentry = NULL;
1066 profile->data = NULL;
1067 mode = (S_IRUSR | S_IWUSR | S_IFREG);
1068 pde = debugfs_create_file("statistics", mode, base_dentry,
1069 profile, &dasd_stats_raw_fops);
1070 if (pde && !IS_ERR(pde))
1071 profile->dentry = pde;
1072 return;
1073 }
1074
dasd_profile_exit(struct dasd_profile * profile)1075 static void dasd_profile_exit(struct dasd_profile *profile)
1076 {
1077 dasd_profile_off(profile);
1078 debugfs_remove(profile->dentry);
1079 profile->dentry = NULL;
1080 }
1081
dasd_statistics_removeroot(void)1082 static void dasd_statistics_removeroot(void)
1083 {
1084 dasd_global_profile_level = DASD_PROFILE_OFF;
1085 dasd_profile_exit(&dasd_global_profile);
1086 debugfs_remove(dasd_debugfs_global_entry);
1087 debugfs_remove(dasd_debugfs_root_entry);
1088 }
1089
dasd_statistics_createroot(void)1090 static void dasd_statistics_createroot(void)
1091 {
1092 struct dentry *pde;
1093
1094 dasd_debugfs_root_entry = NULL;
1095 pde = debugfs_create_dir("dasd", NULL);
1096 if (!pde || IS_ERR(pde))
1097 goto error;
1098 dasd_debugfs_root_entry = pde;
1099 pde = debugfs_create_dir("global", dasd_debugfs_root_entry);
1100 if (!pde || IS_ERR(pde))
1101 goto error;
1102 dasd_debugfs_global_entry = pde;
1103 dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry);
1104 return;
1105
1106 error:
1107 DBF_EVENT(DBF_ERR, "%s",
1108 "Creation of the dasd debugfs interface failed");
1109 dasd_statistics_removeroot();
1110 return;
1111 }
1112
1113 #else
1114 #define dasd_profile_start(block, cqr, req) do {} while (0)
1115 #define dasd_profile_end(block, cqr, req) do {} while (0)
1116
dasd_statistics_createroot(void)1117 static void dasd_statistics_createroot(void)
1118 {
1119 return;
1120 }
1121
dasd_statistics_removeroot(void)1122 static void dasd_statistics_removeroot(void)
1123 {
1124 return;
1125 }
1126
dasd_profile_init(struct dasd_profile * profile,struct dentry * base_dentry)1127 static void dasd_profile_init(struct dasd_profile *profile,
1128 struct dentry *base_dentry)
1129 {
1130 return;
1131 }
1132
dasd_profile_exit(struct dasd_profile * profile)1133 static void dasd_profile_exit(struct dasd_profile *profile)
1134 {
1135 return;
1136 }
1137
dasd_profile_on(struct dasd_profile * profile)1138 int dasd_profile_on(struct dasd_profile *profile)
1139 {
1140 return 0;
1141 }
1142
1143 #endif /* CONFIG_DASD_PROFILE */
1144
dasd_hosts_show(struct seq_file * m,void * v)1145 static int dasd_hosts_show(struct seq_file *m, void *v)
1146 {
1147 struct dasd_device *device;
1148 int rc = -EOPNOTSUPP;
1149
1150 device = m->private;
1151 dasd_get_device(device);
1152
1153 if (device->discipline->hosts_print)
1154 rc = device->discipline->hosts_print(device, m);
1155
1156 dasd_put_device(device);
1157 return rc;
1158 }
1159
1160 DEFINE_SHOW_ATTRIBUTE(dasd_hosts);
1161
dasd_hosts_exit(struct dasd_device * device)1162 static void dasd_hosts_exit(struct dasd_device *device)
1163 {
1164 debugfs_remove(device->hosts_dentry);
1165 device->hosts_dentry = NULL;
1166 }
1167
dasd_hosts_init(struct dentry * base_dentry,struct dasd_device * device)1168 static void dasd_hosts_init(struct dentry *base_dentry,
1169 struct dasd_device *device)
1170 {
1171 struct dentry *pde;
1172 umode_t mode;
1173
1174 if (!base_dentry)
1175 return;
1176
1177 mode = S_IRUSR | S_IFREG;
1178 pde = debugfs_create_file("host_access_list", mode, base_dentry,
1179 device, &dasd_hosts_fops);
1180 if (pde && !IS_ERR(pde))
1181 device->hosts_dentry = pde;
1182 }
1183
dasd_smalloc_request(int magic,int cplength,int datasize,struct dasd_device * device,struct dasd_ccw_req * cqr)1184 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
1185 struct dasd_device *device,
1186 struct dasd_ccw_req *cqr)
1187 {
1188 unsigned long flags;
1189 char *data, *chunk;
1190 int size = 0;
1191
1192 if (cplength > 0)
1193 size += cplength * sizeof(struct ccw1);
1194 if (datasize > 0)
1195 size += datasize;
1196 if (!cqr)
1197 size += (sizeof(*cqr) + 7L) & -8L;
1198
1199 spin_lock_irqsave(&device->mem_lock, flags);
1200 data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size);
1201 spin_unlock_irqrestore(&device->mem_lock, flags);
1202 if (!chunk)
1203 return ERR_PTR(-ENOMEM);
1204 if (!cqr) {
1205 cqr = (void *) data;
1206 data += (sizeof(*cqr) + 7L) & -8L;
1207 }
1208 memset(cqr, 0, sizeof(*cqr));
1209 cqr->mem_chunk = chunk;
1210 if (cplength > 0) {
1211 cqr->cpaddr = data;
1212 data += cplength * sizeof(struct ccw1);
1213 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
1214 }
1215 if (datasize > 0) {
1216 cqr->data = data;
1217 memset(cqr->data, 0, datasize);
1218 }
1219 cqr->magic = magic;
1220 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1221 dasd_get_device(device);
1222 return cqr;
1223 }
1224 EXPORT_SYMBOL(dasd_smalloc_request);
1225
dasd_fmalloc_request(int magic,int cplength,int datasize,struct dasd_device * device)1226 struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength,
1227 int datasize,
1228 struct dasd_device *device)
1229 {
1230 struct dasd_ccw_req *cqr;
1231 unsigned long flags;
1232 int size, cqr_size;
1233 char *data;
1234
1235 cqr_size = (sizeof(*cqr) + 7L) & -8L;
1236 size = cqr_size;
1237 if (cplength > 0)
1238 size += cplength * sizeof(struct ccw1);
1239 if (datasize > 0)
1240 size += datasize;
1241
1242 spin_lock_irqsave(&device->mem_lock, flags);
1243 cqr = dasd_alloc_chunk(&device->ese_chunks, size);
1244 spin_unlock_irqrestore(&device->mem_lock, flags);
1245 if (!cqr)
1246 return ERR_PTR(-ENOMEM);
1247 memset(cqr, 0, sizeof(*cqr));
1248 data = (char *)cqr + cqr_size;
1249 cqr->cpaddr = NULL;
1250 if (cplength > 0) {
1251 cqr->cpaddr = data;
1252 data += cplength * sizeof(struct ccw1);
1253 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
1254 }
1255 cqr->data = NULL;
1256 if (datasize > 0) {
1257 cqr->data = data;
1258 memset(cqr->data, 0, datasize);
1259 }
1260
1261 cqr->magic = magic;
1262 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1263 dasd_get_device(device);
1264
1265 return cqr;
1266 }
1267 EXPORT_SYMBOL(dasd_fmalloc_request);
1268
dasd_sfree_request(struct dasd_ccw_req * cqr,struct dasd_device * device)1269 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1270 {
1271 unsigned long flags;
1272
1273 spin_lock_irqsave(&device->mem_lock, flags);
1274 dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
1275 spin_unlock_irqrestore(&device->mem_lock, flags);
1276 dasd_put_device(device);
1277 }
1278 EXPORT_SYMBOL(dasd_sfree_request);
1279
dasd_ffree_request(struct dasd_ccw_req * cqr,struct dasd_device * device)1280 void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1281 {
1282 unsigned long flags;
1283
1284 spin_lock_irqsave(&device->mem_lock, flags);
1285 dasd_free_chunk(&device->ese_chunks, cqr);
1286 spin_unlock_irqrestore(&device->mem_lock, flags);
1287 dasd_put_device(device);
1288 }
1289 EXPORT_SYMBOL(dasd_ffree_request);
1290
1291 /*
1292 * Check discipline magic in cqr.
1293 */
dasd_check_cqr(struct dasd_ccw_req * cqr)1294 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
1295 {
1296 struct dasd_device *device;
1297
1298 if (cqr == NULL)
1299 return -EINVAL;
1300 device = cqr->startdev;
1301 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
1302 DBF_DEV_EVENT(DBF_WARNING, device,
1303 " dasd_ccw_req 0x%08x magic doesn't match"
1304 " discipline 0x%08x",
1305 cqr->magic,
1306 *(unsigned int *) device->discipline->name);
1307 return -EINVAL;
1308 }
1309 return 0;
1310 }
1311
1312 /*
1313 * Terminate the current i/o and set the request to clear_pending.
1314 * Timer keeps device runnig.
1315 * ccw_device_clear can fail if the i/o subsystem
1316 * is in a bad mood.
1317 */
dasd_term_IO(struct dasd_ccw_req * cqr)1318 int dasd_term_IO(struct dasd_ccw_req *cqr)
1319 {
1320 struct dasd_device *device;
1321 int retries, rc;
1322
1323 /* Check the cqr */
1324 rc = dasd_check_cqr(cqr);
1325 if (rc)
1326 return rc;
1327 retries = 0;
1328 device = (struct dasd_device *) cqr->startdev;
1329 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
1330 rc = ccw_device_clear(device->cdev, (long) cqr);
1331 switch (rc) {
1332 case 0: /* termination successful */
1333 cqr->status = DASD_CQR_CLEAR_PENDING;
1334 cqr->stopclk = get_tod_clock();
1335 cqr->starttime = 0;
1336 DBF_DEV_EVENT(DBF_DEBUG, device,
1337 "terminate cqr %p successful",
1338 cqr);
1339 break;
1340 case -ENODEV:
1341 DBF_DEV_EVENT(DBF_ERR, device, "%s",
1342 "device gone, retry");
1343 break;
1344 case -EINVAL:
1345 /*
1346 * device not valid so no I/O could be running
1347 * handle CQR as termination successful
1348 */
1349 cqr->status = DASD_CQR_CLEARED;
1350 cqr->stopclk = get_tod_clock();
1351 cqr->starttime = 0;
1352 /* no retries for invalid devices */
1353 cqr->retries = -1;
1354 DBF_DEV_EVENT(DBF_ERR, device, "%s",
1355 "EINVAL, handle as terminated");
1356 /* fake rc to success */
1357 rc = 0;
1358 break;
1359 default:
1360 dev_err(&device->cdev->dev,
1361 "Unexpected error during request termination %d\n", rc);
1362 BUG();
1363 break;
1364 }
1365 retries++;
1366 }
1367 dasd_schedule_device_bh(device);
1368 return rc;
1369 }
1370 EXPORT_SYMBOL(dasd_term_IO);
1371
1372 /*
1373 * Start the i/o. This start_IO can fail if the channel is really busy.
1374 * In that case set up a timer to start the request later.
1375 */
dasd_start_IO(struct dasd_ccw_req * cqr)1376 int dasd_start_IO(struct dasd_ccw_req *cqr)
1377 {
1378 struct dasd_device *device;
1379 int rc;
1380
1381 /* Check the cqr */
1382 rc = dasd_check_cqr(cqr);
1383 if (rc) {
1384 cqr->intrc = rc;
1385 return rc;
1386 }
1387 device = (struct dasd_device *) cqr->startdev;
1388 if (((cqr->block &&
1389 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) ||
1390 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) &&
1391 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
1392 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p "
1393 "because of stolen lock", cqr);
1394 cqr->status = DASD_CQR_ERROR;
1395 cqr->intrc = -EPERM;
1396 return -EPERM;
1397 }
1398 if (cqr->retries < 0) {
1399 dev_err(&device->cdev->dev,
1400 "Start I/O ran out of retries\n");
1401 cqr->status = DASD_CQR_ERROR;
1402 return -EIO;
1403 }
1404 cqr->startclk = get_tod_clock();
1405 cqr->starttime = jiffies;
1406 cqr->retries--;
1407 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1408 cqr->lpm &= dasd_path_get_opm(device);
1409 if (!cqr->lpm)
1410 cqr->lpm = dasd_path_get_opm(device);
1411 }
1412 /*
1413 * remember the amount of formatted tracks to prevent double format on
1414 * ESE devices
1415 */
1416 if (cqr->block)
1417 cqr->trkcount = atomic_read(&cqr->block->trkcount);
1418
1419 if (cqr->cpmode == 1) {
1420 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
1421 (long) cqr, cqr->lpm);
1422 } else {
1423 rc = ccw_device_start(device->cdev, cqr->cpaddr,
1424 (long) cqr, cqr->lpm, 0);
1425 }
1426 switch (rc) {
1427 case 0:
1428 cqr->status = DASD_CQR_IN_IO;
1429 break;
1430 case -EBUSY:
1431 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1432 "start_IO: device busy, retry later");
1433 break;
1434 case -EACCES:
1435 /* -EACCES indicates that the request used only a subset of the
1436 * available paths and all these paths are gone. If the lpm of
1437 * this request was only a subset of the opm (e.g. the ppm) then
1438 * we just do a retry with all available paths.
1439 * If we already use the full opm, something is amiss, and we
1440 * need a full path verification.
1441 */
1442 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
1443 DBF_DEV_EVENT(DBF_WARNING, device,
1444 "start_IO: selected paths gone (%x)",
1445 cqr->lpm);
1446 } else if (cqr->lpm != dasd_path_get_opm(device)) {
1447 cqr->lpm = dasd_path_get_opm(device);
1448 DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
1449 "start_IO: selected paths gone,"
1450 " retry on all paths");
1451 } else {
1452 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1453 "start_IO: all paths in opm gone,"
1454 " do path verification");
1455 dasd_generic_last_path_gone(device);
1456 dasd_path_no_path(device);
1457 dasd_path_set_tbvpm(device,
1458 ccw_device_get_path_mask(
1459 device->cdev));
1460 }
1461 break;
1462 case -ENODEV:
1463 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1464 "start_IO: -ENODEV device gone, retry");
1465 /* this is equivalent to CC=3 for SSCH report this to EER */
1466 dasd_handle_autoquiesce(device, cqr, DASD_EER_STARTIO);
1467 break;
1468 case -EIO:
1469 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1470 "start_IO: -EIO device gone, retry");
1471 break;
1472 case -EINVAL:
1473 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1474 "start_IO: -EINVAL device currently "
1475 "not accessible");
1476 break;
1477 default:
1478 dev_err(&device->cdev->dev,
1479 "Unexpected error during request start %d", rc);
1480 BUG();
1481 break;
1482 }
1483 cqr->intrc = rc;
1484 return rc;
1485 }
1486 EXPORT_SYMBOL(dasd_start_IO);
1487
1488 /*
1489 * Timeout function for dasd devices. This is used for different purposes
1490 * 1) missing interrupt handler for normal operation
1491 * 2) delayed start of request where start_IO failed with -EBUSY
1492 * 3) timeout for missing state change interrupts
1493 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
1494 * DASD_CQR_QUEUED for 2) and 3).
1495 */
dasd_device_timeout(struct timer_list * t)1496 static void dasd_device_timeout(struct timer_list *t)
1497 {
1498 unsigned long flags;
1499 struct dasd_device *device;
1500
1501 device = timer_container_of(device, t, timer);
1502 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1503 /* re-activate request queue */
1504 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1505 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1506 dasd_schedule_device_bh(device);
1507 }
1508
1509 /*
1510 * Setup timeout for a device in jiffies.
1511 */
dasd_device_set_timer(struct dasd_device * device,int expires)1512 void dasd_device_set_timer(struct dasd_device *device, int expires)
1513 {
1514 if (expires == 0)
1515 timer_delete(&device->timer);
1516 else
1517 mod_timer(&device->timer, jiffies + expires);
1518 }
1519 EXPORT_SYMBOL(dasd_device_set_timer);
1520
1521 /*
1522 * Clear timeout for a device.
1523 */
dasd_device_clear_timer(struct dasd_device * device)1524 void dasd_device_clear_timer(struct dasd_device *device)
1525 {
1526 timer_delete(&device->timer);
1527 }
1528 EXPORT_SYMBOL(dasd_device_clear_timer);
1529
dasd_handle_killed_request(struct ccw_device * cdev,unsigned long intparm)1530 static void dasd_handle_killed_request(struct ccw_device *cdev,
1531 unsigned long intparm)
1532 {
1533 struct dasd_ccw_req *cqr;
1534 struct dasd_device *device;
1535
1536 if (!intparm)
1537 return;
1538 cqr = (struct dasd_ccw_req *) intparm;
1539 if (cqr->status != DASD_CQR_IN_IO) {
1540 DBF_EVENT_DEVID(DBF_DEBUG, cdev,
1541 "invalid status in handle_killed_request: "
1542 "%02x", cqr->status);
1543 return;
1544 }
1545
1546 device = dasd_device_from_cdev_locked(cdev);
1547 if (IS_ERR(device)) {
1548 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1549 "unable to get device from cdev");
1550 return;
1551 }
1552
1553 if (!cqr->startdev ||
1554 device != cqr->startdev ||
1555 strncmp(cqr->startdev->discipline->ebcname,
1556 (char *) &cqr->magic, 4)) {
1557 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1558 "invalid device in request");
1559 dasd_put_device(device);
1560 return;
1561 }
1562
1563 /* Schedule request to be retried. */
1564 cqr->status = DASD_CQR_QUEUED;
1565
1566 dasd_device_clear_timer(device);
1567 dasd_schedule_device_bh(device);
1568 dasd_put_device(device);
1569 }
1570
dasd_generic_handle_state_change(struct dasd_device * device)1571 void dasd_generic_handle_state_change(struct dasd_device *device)
1572 {
1573 /* First of all start sense subsystem status request. */
1574 dasd_eer_snss(device);
1575
1576 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1577 dasd_schedule_device_bh(device);
1578 if (device->block) {
1579 dasd_schedule_block_bh(device->block);
1580 if (device->block->gdp)
1581 blk_mq_run_hw_queues(device->block->gdp->queue, true);
1582 }
1583 }
1584 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
1585
dasd_check_hpf_error(struct irb * irb)1586 static int dasd_check_hpf_error(struct irb *irb)
1587 {
1588 return (scsw_tm_is_valid_schxs(&irb->scsw) &&
1589 (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX ||
1590 irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX));
1591 }
1592
dasd_ese_needs_format(struct dasd_block * block,struct irb * irb)1593 static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb)
1594 {
1595 struct dasd_device *device = NULL;
1596 u8 *sense = NULL;
1597
1598 if (!block)
1599 return 0;
1600 device = block->base;
1601 if (!device || !device->discipline->is_ese)
1602 return 0;
1603 if (!device->discipline->is_ese(device))
1604 return 0;
1605
1606 sense = dasd_get_sense(irb);
1607 if (!sense)
1608 return 0;
1609
1610 if (sense[1] & SNS1_NO_REC_FOUND)
1611 return 1;
1612
1613 if ((sense[1] & SNS1_INV_TRACK_FORMAT) &&
1614 scsw_is_tm(&irb->scsw) &&
1615 !(sense[2] & SNS2_ENV_DATA_PRESENT))
1616 return 1;
1617
1618 return 0;
1619 }
1620
dasd_ese_oos_cond(u8 * sense)1621 static int dasd_ese_oos_cond(u8 *sense)
1622 {
1623 return sense[0] & SNS0_EQUIPMENT_CHECK &&
1624 sense[1] & SNS1_PERM_ERR &&
1625 sense[1] & SNS1_WRITE_INHIBITED &&
1626 sense[25] == 0x01;
1627 }
1628
1629 /*
1630 * Interrupt handler for "normal" ssch-io based dasd devices.
1631 */
dasd_int_handler(struct ccw_device * cdev,unsigned long intparm,struct irb * irb)1632 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1633 struct irb *irb)
1634 {
1635 struct dasd_ccw_req *cqr, *next, *fcqr;
1636 struct dasd_device *device;
1637 unsigned long now;
1638 int nrf_suppressed = 0;
1639 int it_suppressed = 0;
1640 struct request *req;
1641 u8 *sense = NULL;
1642 int expires;
1643
1644 cqr = (struct dasd_ccw_req *) intparm;
1645 if (IS_ERR(irb)) {
1646 switch (PTR_ERR(irb)) {
1647 case -EIO:
1648 if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
1649 device = cqr->startdev;
1650 cqr->status = DASD_CQR_CLEARED;
1651 dasd_device_clear_timer(device);
1652 wake_up(&dasd_flush_wq);
1653 dasd_schedule_device_bh(device);
1654 return;
1655 }
1656 break;
1657 case -ETIMEDOUT:
1658 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1659 "request timed out\n", __func__);
1660 break;
1661 default:
1662 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1663 "unknown error %ld\n", __func__,
1664 PTR_ERR(irb));
1665 }
1666 dasd_handle_killed_request(cdev, intparm);
1667 return;
1668 }
1669
1670 now = get_tod_clock();
1671 /* check for conditions that should be handled immediately */
1672 if (!cqr ||
1673 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1674 scsw_cstat(&irb->scsw) == 0)) {
1675 if (cqr)
1676 memcpy(&cqr->irb, irb, sizeof(*irb));
1677 device = dasd_device_from_cdev_locked(cdev);
1678 if (IS_ERR(device))
1679 return;
1680 /* ignore unsolicited interrupts for DIAG discipline */
1681 if (device->discipline == dasd_diag_discipline_pointer) {
1682 dasd_put_device(device);
1683 return;
1684 }
1685
1686 /*
1687 * In some cases 'File Protected' or 'No Record Found' errors
1688 * might be expected and debug log messages for the
1689 * corresponding interrupts shouldn't be written then.
1690 * Check if either of the according suppress bits is set.
1691 */
1692 sense = dasd_get_sense(irb);
1693 if (sense) {
1694 it_suppressed = (sense[1] & SNS1_INV_TRACK_FORMAT) &&
1695 !(sense[2] & SNS2_ENV_DATA_PRESENT) &&
1696 test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags);
1697 nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) &&
1698 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
1699
1700 /*
1701 * Extent pool probably out-of-space.
1702 * Stop device and check exhaust level.
1703 */
1704 if (dasd_ese_oos_cond(sense)) {
1705 dasd_generic_space_exhaust(device, cqr);
1706 device->discipline->ext_pool_exhaust(device, cqr);
1707 dasd_put_device(device);
1708 return;
1709 }
1710 }
1711 if (!(it_suppressed || nrf_suppressed))
1712 device->discipline->dump_sense_dbf(device, irb, "int");
1713
1714 if (device->features & DASD_FEATURE_ERPLOG)
1715 device->discipline->dump_sense(device, cqr, irb);
1716 device->discipline->check_for_device_change(device, cqr, irb);
1717 dasd_put_device(device);
1718 }
1719
1720 /* check for attention message */
1721 if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
1722 device = dasd_device_from_cdev_locked(cdev);
1723 if (!IS_ERR(device)) {
1724 device->discipline->check_attention(device,
1725 irb->esw.esw1.lpum);
1726 dasd_put_device(device);
1727 }
1728 }
1729
1730 if (!cqr)
1731 return;
1732
1733 device = (struct dasd_device *) cqr->startdev;
1734 if (!device ||
1735 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1736 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1737 "invalid device in request");
1738 return;
1739 }
1740
1741 if (dasd_ese_needs_format(cqr->block, irb)) {
1742 req = dasd_get_callback_data(cqr);
1743 if (!req) {
1744 cqr->status = DASD_CQR_ERROR;
1745 return;
1746 }
1747 if (rq_data_dir(req) == READ) {
1748 device->discipline->ese_read(cqr, irb);
1749 cqr->status = DASD_CQR_SUCCESS;
1750 cqr->stopclk = now;
1751 dasd_device_clear_timer(device);
1752 dasd_schedule_device_bh(device);
1753 return;
1754 }
1755 fcqr = device->discipline->ese_format(device, cqr, irb);
1756 if (IS_ERR(fcqr)) {
1757 if (PTR_ERR(fcqr) == -EINVAL) {
1758 cqr->status = DASD_CQR_ERROR;
1759 return;
1760 }
1761 /*
1762 * If we can't format now, let the request go
1763 * one extra round. Maybe we can format later.
1764 */
1765 cqr->status = DASD_CQR_QUEUED;
1766 dasd_schedule_device_bh(device);
1767 return;
1768 } else {
1769 fcqr->status = DASD_CQR_QUEUED;
1770 cqr->status = DASD_CQR_QUEUED;
1771 list_add(&fcqr->devlist, &device->ccw_queue);
1772 dasd_schedule_device_bh(device);
1773 return;
1774 }
1775 }
1776
1777 /* Check for clear pending */
1778 if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1779 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
1780 cqr->status = DASD_CQR_CLEARED;
1781 dasd_device_clear_timer(device);
1782 wake_up(&dasd_flush_wq);
1783 dasd_schedule_device_bh(device);
1784 return;
1785 }
1786
1787 /* check status - the request might have been killed by dyn detach */
1788 if (cqr->status != DASD_CQR_IN_IO) {
1789 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
1790 "status %02x", dev_name(&cdev->dev), cqr->status);
1791 return;
1792 }
1793
1794 next = NULL;
1795 expires = 0;
1796 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1797 scsw_cstat(&irb->scsw) == 0) {
1798 /* request was completed successfully */
1799 cqr->status = DASD_CQR_SUCCESS;
1800 cqr->stopclk = now;
1801 /* Start first request on queue if possible -> fast_io. */
1802 if (cqr->devlist.next != &device->ccw_queue) {
1803 next = list_entry(cqr->devlist.next,
1804 struct dasd_ccw_req, devlist);
1805 }
1806 } else { /* error */
1807 /* check for HPF error
1808 * call discipline function to requeue all requests
1809 * and disable HPF accordingly
1810 */
1811 if (cqr->cpmode && dasd_check_hpf_error(irb) &&
1812 device->discipline->handle_hpf_error)
1813 device->discipline->handle_hpf_error(device, irb);
1814 /*
1815 * If we don't want complex ERP for this request, then just
1816 * reset this and retry it in the fastpath
1817 */
1818 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1819 cqr->retries > 0) {
1820 if (cqr->lpm == dasd_path_get_opm(device))
1821 DBF_DEV_EVENT(DBF_DEBUG, device,
1822 "default ERP in fastpath "
1823 "(%i retries left)",
1824 cqr->retries);
1825 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))
1826 cqr->lpm = dasd_path_get_opm(device);
1827 cqr->status = DASD_CQR_QUEUED;
1828 next = cqr;
1829 } else
1830 cqr->status = DASD_CQR_ERROR;
1831 }
1832 if (next && (next->status == DASD_CQR_QUEUED) &&
1833 (!device->stopped)) {
1834 if (device->discipline->start_IO(next) == 0)
1835 expires = next->expires;
1836 }
1837 if (expires != 0)
1838 dasd_device_set_timer(device, expires);
1839 else
1840 dasd_device_clear_timer(device);
1841 dasd_schedule_device_bh(device);
1842 }
1843 EXPORT_SYMBOL(dasd_int_handler);
1844
dasd_generic_uc_handler(struct ccw_device * cdev,struct irb * irb)1845 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
1846 {
1847 struct dasd_device *device;
1848
1849 device = dasd_device_from_cdev_locked(cdev);
1850
1851 if (IS_ERR(device))
1852 goto out;
1853 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1854 device->state != device->target ||
1855 !device->discipline->check_for_device_change){
1856 dasd_put_device(device);
1857 goto out;
1858 }
1859 if (device->discipline->dump_sense_dbf)
1860 device->discipline->dump_sense_dbf(device, irb, "uc");
1861 device->discipline->check_for_device_change(device, NULL, irb);
1862 dasd_put_device(device);
1863 out:
1864 return UC_TODO_RETRY;
1865 }
1866 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
1867
1868 /*
1869 * If we have an error on a dasd_block layer request then we cancel
1870 * and return all further requests from the same dasd_block as well.
1871 */
__dasd_device_recovery(struct dasd_device * device,struct dasd_ccw_req * ref_cqr)1872 static void __dasd_device_recovery(struct dasd_device *device,
1873 struct dasd_ccw_req *ref_cqr)
1874 {
1875 struct list_head *l, *n;
1876 struct dasd_ccw_req *cqr;
1877
1878 /*
1879 * only requeue request that came from the dasd_block layer
1880 */
1881 if (!ref_cqr->block)
1882 return;
1883
1884 list_for_each_safe(l, n, &device->ccw_queue) {
1885 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1886 if (cqr->status == DASD_CQR_QUEUED &&
1887 ref_cqr->block == cqr->block) {
1888 cqr->status = DASD_CQR_CLEARED;
1889 }
1890 }
1891 };
1892
1893 /*
1894 * Remove those ccw requests from the queue that need to be returned
1895 * to the upper layer.
1896 */
__dasd_device_process_ccw_queue(struct dasd_device * device,struct list_head * final_queue)1897 static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1898 struct list_head *final_queue)
1899 {
1900 struct list_head *l, *n;
1901 struct dasd_ccw_req *cqr;
1902
1903 /* Process request with final status. */
1904 list_for_each_safe(l, n, &device->ccw_queue) {
1905 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1906
1907 /* Skip any non-final request. */
1908 if (cqr->status == DASD_CQR_QUEUED ||
1909 cqr->status == DASD_CQR_IN_IO ||
1910 cqr->status == DASD_CQR_CLEAR_PENDING)
1911 continue;
1912 if (cqr->status == DASD_CQR_ERROR) {
1913 __dasd_device_recovery(device, cqr);
1914 }
1915 /* Rechain finished requests to final queue */
1916 list_move_tail(&cqr->devlist, final_queue);
1917 }
1918 }
1919
__dasd_process_cqr(struct dasd_device * device,struct dasd_ccw_req * cqr)1920 static void __dasd_process_cqr(struct dasd_device *device,
1921 struct dasd_ccw_req *cqr)
1922 {
1923 switch (cqr->status) {
1924 case DASD_CQR_SUCCESS:
1925 cqr->status = DASD_CQR_DONE;
1926 break;
1927 case DASD_CQR_ERROR:
1928 cqr->status = DASD_CQR_NEED_ERP;
1929 break;
1930 case DASD_CQR_CLEARED:
1931 cqr->status = DASD_CQR_TERMINATED;
1932 break;
1933 default:
1934 dev_err(&device->cdev->dev,
1935 "Unexpected CQR status %02x", cqr->status);
1936 BUG();
1937 }
1938 if (cqr->callback)
1939 cqr->callback(cqr, cqr->callback_data);
1940 }
1941
1942 /*
1943 * the cqrs from the final queue are returned to the upper layer
1944 * by setting a dasd_block state and calling the callback function
1945 */
__dasd_device_process_final_queue(struct dasd_device * device,struct list_head * final_queue)1946 static void __dasd_device_process_final_queue(struct dasd_device *device,
1947 struct list_head *final_queue)
1948 {
1949 struct list_head *l, *n;
1950 struct dasd_ccw_req *cqr;
1951 struct dasd_block *block;
1952
1953 list_for_each_safe(l, n, final_queue) {
1954 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1955 list_del_init(&cqr->devlist);
1956 block = cqr->block;
1957 if (!block) {
1958 __dasd_process_cqr(device, cqr);
1959 } else {
1960 spin_lock_bh(&block->queue_lock);
1961 __dasd_process_cqr(device, cqr);
1962 spin_unlock_bh(&block->queue_lock);
1963 }
1964 }
1965 }
1966
1967 /*
1968 * check if device should be autoquiesced due to too many timeouts
1969 */
__dasd_device_check_autoquiesce_timeout(struct dasd_device * device,struct dasd_ccw_req * cqr)1970 static void __dasd_device_check_autoquiesce_timeout(struct dasd_device *device,
1971 struct dasd_ccw_req *cqr)
1972 {
1973 if ((device->default_retries - cqr->retries) >= device->aq_timeouts)
1974 dasd_handle_autoquiesce(device, cqr, DASD_EER_TIMEOUTS);
1975 }
1976
1977 /*
1978 * Take a look at the first request on the ccw queue and check
1979 * if it reached its expire time. If so, terminate the IO.
1980 */
__dasd_device_check_expire(struct dasd_device * device)1981 static void __dasd_device_check_expire(struct dasd_device *device)
1982 {
1983 struct dasd_ccw_req *cqr;
1984
1985 if (list_empty(&device->ccw_queue))
1986 return;
1987 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1988 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1989 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1990 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
1991 /*
1992 * IO in safe offline processing should not
1993 * run out of retries
1994 */
1995 cqr->retries++;
1996 }
1997 if (device->discipline->term_IO(cqr) != 0) {
1998 /* Hmpf, try again in 5 sec */
1999 dev_err(&device->cdev->dev,
2000 "CQR timed out (%lus) but cannot be ended, retrying in 5s\n",
2001 (cqr->expires / HZ));
2002 cqr->expires += 5*HZ;
2003 dasd_device_set_timer(device, 5*HZ);
2004 } else {
2005 dev_err(&device->cdev->dev,
2006 "CQR timed out (%lus), %i retries remaining\n",
2007 (cqr->expires / HZ), cqr->retries);
2008 }
2009 __dasd_device_check_autoquiesce_timeout(device, cqr);
2010 }
2011 }
2012
2013 /*
2014 * return 1 when device is not eligible for IO
2015 */
__dasd_device_is_unusable(struct dasd_device * device,struct dasd_ccw_req * cqr)2016 static int __dasd_device_is_unusable(struct dasd_device *device,
2017 struct dasd_ccw_req *cqr)
2018 {
2019 int mask = ~(DASD_STOPPED_DC_WAIT | DASD_STOPPED_NOSPC);
2020
2021 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
2022 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
2023 /*
2024 * dasd is being set offline
2025 * but it is no safe offline where we have to allow I/O
2026 */
2027 return 1;
2028 }
2029 if (device->stopped) {
2030 if (device->stopped & mask) {
2031 /* stopped and CQR will not change that. */
2032 return 1;
2033 }
2034 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
2035 /* CQR is not able to change device to
2036 * operational. */
2037 return 1;
2038 }
2039 /* CQR required to get device operational. */
2040 }
2041 return 0;
2042 }
2043
2044 /*
2045 * Take a look at the first request on the ccw queue and check
2046 * if it needs to be started.
2047 */
__dasd_device_start_head(struct dasd_device * device)2048 static void __dasd_device_start_head(struct dasd_device *device)
2049 {
2050 struct dasd_ccw_req *cqr;
2051 int rc;
2052
2053 if (list_empty(&device->ccw_queue))
2054 return;
2055 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2056 if (cqr->status != DASD_CQR_QUEUED)
2057 return;
2058 /* if device is not usable return request to upper layer */
2059 if (__dasd_device_is_unusable(device, cqr)) {
2060 cqr->intrc = -EAGAIN;
2061 cqr->status = DASD_CQR_CLEARED;
2062 dasd_schedule_device_bh(device);
2063 return;
2064 }
2065
2066 rc = device->discipline->start_IO(cqr);
2067 if (rc == 0)
2068 dasd_device_set_timer(device, cqr->expires);
2069 else if (rc == -EACCES) {
2070 dasd_schedule_device_bh(device);
2071 } else
2072 /* Hmpf, try again in 1/2 sec */
2073 dasd_device_set_timer(device, 50);
2074 }
2075
__dasd_device_check_path_events(struct dasd_device * device)2076 static void __dasd_device_check_path_events(struct dasd_device *device)
2077 {
2078 __u8 tbvpm, fcsecpm;
2079 int rc;
2080
2081 tbvpm = dasd_path_get_tbvpm(device);
2082 fcsecpm = dasd_path_get_fcsecpm(device);
2083
2084 if (!tbvpm && !fcsecpm)
2085 return;
2086
2087 if (device->stopped & ~(DASD_STOPPED_DC_WAIT))
2088 return;
2089
2090 dasd_path_clear_all_verify(device);
2091 dasd_path_clear_all_fcsec(device);
2092
2093 rc = device->discipline->pe_handler(device, tbvpm, fcsecpm);
2094 if (rc) {
2095 dasd_path_add_tbvpm(device, tbvpm);
2096 dasd_path_add_fcsecpm(device, fcsecpm);
2097 dasd_device_set_timer(device, 50);
2098 }
2099 };
2100
2101 /*
2102 * Go through all request on the dasd_device request queue,
2103 * terminate them on the cdev if necessary, and return them to the
2104 * submitting layer via callback.
2105 * Note:
2106 * Make sure that all 'submitting layers' still exist when
2107 * this function is called!. In other words, when 'device' is a base
2108 * device then all block layer requests must have been removed before
2109 * via dasd_flush_block_queue.
2110 */
dasd_flush_device_queue(struct dasd_device * device)2111 int dasd_flush_device_queue(struct dasd_device *device)
2112 {
2113 struct dasd_ccw_req *cqr, *n;
2114 int rc;
2115 struct list_head flush_queue;
2116
2117 INIT_LIST_HEAD(&flush_queue);
2118 spin_lock_irq(get_ccwdev_lock(device->cdev));
2119 rc = 0;
2120 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
2121 /* Check status and move request to flush_queue */
2122 switch (cqr->status) {
2123 case DASD_CQR_IN_IO:
2124 rc = device->discipline->term_IO(cqr);
2125 if (rc) {
2126 /* unable to terminate request */
2127 dev_err(&device->cdev->dev,
2128 "Flushing the DASD request queue failed\n");
2129 /* stop flush processing */
2130 goto finished;
2131 }
2132 break;
2133 case DASD_CQR_QUEUED:
2134 cqr->stopclk = get_tod_clock();
2135 cqr->status = DASD_CQR_CLEARED;
2136 break;
2137 default: /* no need to modify the others */
2138 break;
2139 }
2140 list_move_tail(&cqr->devlist, &flush_queue);
2141 }
2142 finished:
2143 spin_unlock_irq(get_ccwdev_lock(device->cdev));
2144 /*
2145 * After this point all requests must be in state CLEAR_PENDING,
2146 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
2147 * one of the others.
2148 */
2149 list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
2150 wait_event(dasd_flush_wq,
2151 (cqr->status != DASD_CQR_CLEAR_PENDING));
2152 /*
2153 * Now set each request back to TERMINATED, DONE or NEED_ERP
2154 * and call the callback function of flushed requests
2155 */
2156 __dasd_device_process_final_queue(device, &flush_queue);
2157 return rc;
2158 }
2159 EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2160
2161 /*
2162 * Acquire the device lock and process queues for the device.
2163 */
dasd_device_tasklet(unsigned long data)2164 static void dasd_device_tasklet(unsigned long data)
2165 {
2166 struct dasd_device *device = (struct dasd_device *) data;
2167 struct list_head final_queue;
2168
2169 atomic_set (&device->tasklet_scheduled, 0);
2170 INIT_LIST_HEAD(&final_queue);
2171 spin_lock_irq(get_ccwdev_lock(device->cdev));
2172 /* Check expire time of first request on the ccw queue. */
2173 __dasd_device_check_expire(device);
2174 /* find final requests on ccw queue */
2175 __dasd_device_process_ccw_queue(device, &final_queue);
2176 __dasd_device_check_path_events(device);
2177 spin_unlock_irq(get_ccwdev_lock(device->cdev));
2178 /* Now call the callback function of requests with final status */
2179 __dasd_device_process_final_queue(device, &final_queue);
2180 spin_lock_irq(get_ccwdev_lock(device->cdev));
2181 /* Now check if the head of the ccw queue needs to be started. */
2182 __dasd_device_start_head(device);
2183 spin_unlock_irq(get_ccwdev_lock(device->cdev));
2184 if (waitqueue_active(&shutdown_waitq))
2185 wake_up(&shutdown_waitq);
2186 dasd_put_device(device);
2187 }
2188
2189 /*
2190 * Schedules a call to dasd_tasklet over the device tasklet.
2191 */
dasd_schedule_device_bh(struct dasd_device * device)2192 void dasd_schedule_device_bh(struct dasd_device *device)
2193 {
2194 /* Protect against rescheduling. */
2195 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
2196 return;
2197 dasd_get_device(device);
2198 tasklet_hi_schedule(&device->tasklet);
2199 }
2200 EXPORT_SYMBOL(dasd_schedule_device_bh);
2201
dasd_device_set_stop_bits(struct dasd_device * device,int bits)2202 void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
2203 {
2204 device->stopped |= bits;
2205 }
2206 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
2207
dasd_device_remove_stop_bits(struct dasd_device * device,int bits)2208 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
2209 {
2210 device->stopped &= ~bits;
2211 if (!device->stopped)
2212 wake_up(&generic_waitq);
2213 }
2214 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
2215
2216 /*
2217 * Queue a request to the head of the device ccw_queue.
2218 * Start the I/O if possible.
2219 */
dasd_add_request_head(struct dasd_ccw_req * cqr)2220 void dasd_add_request_head(struct dasd_ccw_req *cqr)
2221 {
2222 struct dasd_device *device;
2223 unsigned long flags;
2224
2225 device = cqr->startdev;
2226 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2227 cqr->status = DASD_CQR_QUEUED;
2228 list_add(&cqr->devlist, &device->ccw_queue);
2229 /* let the bh start the request to keep them in order */
2230 dasd_schedule_device_bh(device);
2231 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2232 }
2233 EXPORT_SYMBOL(dasd_add_request_head);
2234
2235 /*
2236 * Queue a request to the tail of the device ccw_queue.
2237 * Start the I/O if possible.
2238 */
dasd_add_request_tail(struct dasd_ccw_req * cqr)2239 void dasd_add_request_tail(struct dasd_ccw_req *cqr)
2240 {
2241 struct dasd_device *device;
2242 unsigned long flags;
2243
2244 device = cqr->startdev;
2245 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2246 cqr->status = DASD_CQR_QUEUED;
2247 list_add_tail(&cqr->devlist, &device->ccw_queue);
2248 /* let the bh start the request to keep them in order */
2249 dasd_schedule_device_bh(device);
2250 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2251 }
2252 EXPORT_SYMBOL(dasd_add_request_tail);
2253
2254 /*
2255 * Wakeup helper for the 'sleep_on' functions.
2256 */
dasd_wakeup_cb(struct dasd_ccw_req * cqr,void * data)2257 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
2258 {
2259 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2260 cqr->callback_data = DASD_SLEEPON_END_TAG;
2261 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
2262 wake_up(&generic_waitq);
2263 }
2264 EXPORT_SYMBOL_GPL(dasd_wakeup_cb);
2265
_wait_for_wakeup(struct dasd_ccw_req * cqr)2266 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
2267 {
2268 struct dasd_device *device;
2269 int rc;
2270
2271 device = cqr->startdev;
2272 spin_lock_irq(get_ccwdev_lock(device->cdev));
2273 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
2274 spin_unlock_irq(get_ccwdev_lock(device->cdev));
2275 return rc;
2276 }
2277
2278 /*
2279 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
2280 */
__dasd_sleep_on_erp(struct dasd_ccw_req * cqr)2281 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
2282 {
2283 struct dasd_device *device;
2284 dasd_erp_fn_t erp_fn;
2285
2286 if (cqr->status == DASD_CQR_FILLED)
2287 return 0;
2288 device = cqr->startdev;
2289 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
2290 if (cqr->status == DASD_CQR_TERMINATED) {
2291 device->discipline->handle_terminated_request(cqr);
2292 return 1;
2293 }
2294 if (cqr->status == DASD_CQR_NEED_ERP) {
2295 erp_fn = device->discipline->erp_action(cqr);
2296 erp_fn(cqr);
2297 return 1;
2298 }
2299 if (cqr->status == DASD_CQR_FAILED)
2300 dasd_log_sense(cqr, &cqr->irb);
2301 if (cqr->refers) {
2302 __dasd_process_erp(device, cqr);
2303 return 1;
2304 }
2305 }
2306 return 0;
2307 }
2308
__dasd_sleep_on_loop_condition(struct dasd_ccw_req * cqr)2309 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
2310 {
2311 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
2312 if (cqr->refers) /* erp is not done yet */
2313 return 1;
2314 return ((cqr->status != DASD_CQR_DONE) &&
2315 (cqr->status != DASD_CQR_FAILED));
2316 } else
2317 return (cqr->status == DASD_CQR_FILLED);
2318 }
2319
_dasd_sleep_on(struct dasd_ccw_req * maincqr,int interruptible)2320 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
2321 {
2322 struct dasd_device *device;
2323 int rc;
2324 struct list_head ccw_queue;
2325 struct dasd_ccw_req *cqr;
2326
2327 INIT_LIST_HEAD(&ccw_queue);
2328 maincqr->status = DASD_CQR_FILLED;
2329 device = maincqr->startdev;
2330 list_add(&maincqr->blocklist, &ccw_queue);
2331 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr);
2332 cqr = list_first_entry(&ccw_queue,
2333 struct dasd_ccw_req, blocklist)) {
2334
2335 if (__dasd_sleep_on_erp(cqr))
2336 continue;
2337 if (cqr->status != DASD_CQR_FILLED) /* could be failed */
2338 continue;
2339 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2340 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2341 cqr->status = DASD_CQR_FAILED;
2342 cqr->intrc = -EPERM;
2343 continue;
2344 }
2345 /* Non-temporary stop condition will trigger fail fast */
2346 if (device->stopped & ~DASD_STOPPED_PENDING &&
2347 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2348 !dasd_eer_enabled(device) && device->aq_mask == 0) {
2349 cqr->status = DASD_CQR_FAILED;
2350 cqr->intrc = -ENOLINK;
2351 continue;
2352 }
2353 /*
2354 * Don't try to start requests if device is in
2355 * offline processing, it might wait forever
2356 */
2357 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2358 cqr->status = DASD_CQR_FAILED;
2359 cqr->intrc = -ENODEV;
2360 continue;
2361 }
2362 /*
2363 * Don't try to start requests if device is stopped
2364 * except path verification requests
2365 */
2366 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) {
2367 if (interruptible) {
2368 rc = wait_event_interruptible(
2369 generic_waitq, !(device->stopped));
2370 if (rc == -ERESTARTSYS) {
2371 cqr->status = DASD_CQR_FAILED;
2372 maincqr->intrc = rc;
2373 continue;
2374 }
2375 } else
2376 wait_event(generic_waitq, !(device->stopped));
2377 }
2378 if (!cqr->callback)
2379 cqr->callback = dasd_wakeup_cb;
2380
2381 cqr->callback_data = DASD_SLEEPON_START_TAG;
2382 dasd_add_request_tail(cqr);
2383 if (interruptible) {
2384 rc = wait_event_interruptible(
2385 generic_waitq, _wait_for_wakeup(cqr));
2386 if (rc == -ERESTARTSYS) {
2387 dasd_cancel_req(cqr);
2388 /* wait (non-interruptible) for final status */
2389 wait_event(generic_waitq,
2390 _wait_for_wakeup(cqr));
2391 cqr->status = DASD_CQR_FAILED;
2392 maincqr->intrc = rc;
2393 continue;
2394 }
2395 } else
2396 wait_event(generic_waitq, _wait_for_wakeup(cqr));
2397 }
2398
2399 maincqr->endclk = get_tod_clock();
2400 if ((maincqr->status != DASD_CQR_DONE) &&
2401 (maincqr->intrc != -ERESTARTSYS))
2402 dasd_log_sense(maincqr, &maincqr->irb);
2403 if (maincqr->status == DASD_CQR_DONE)
2404 rc = 0;
2405 else if (maincqr->intrc)
2406 rc = maincqr->intrc;
2407 else
2408 rc = -EIO;
2409 return rc;
2410 }
2411
_wait_for_wakeup_queue(struct list_head * ccw_queue)2412 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue)
2413 {
2414 struct dasd_ccw_req *cqr;
2415
2416 list_for_each_entry(cqr, ccw_queue, blocklist) {
2417 if (cqr->callback_data != DASD_SLEEPON_END_TAG)
2418 return 0;
2419 }
2420
2421 return 1;
2422 }
2423
_dasd_sleep_on_queue(struct list_head * ccw_queue,int interruptible)2424 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible)
2425 {
2426 struct dasd_device *device;
2427 struct dasd_ccw_req *cqr, *n;
2428 u8 *sense = NULL;
2429 int rc;
2430
2431 retry:
2432 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
2433 device = cqr->startdev;
2434 if (cqr->status != DASD_CQR_FILLED) /*could be failed*/
2435 continue;
2436
2437 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2438 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2439 cqr->status = DASD_CQR_FAILED;
2440 cqr->intrc = -EPERM;
2441 continue;
2442 }
2443 /*Non-temporary stop condition will trigger fail fast*/
2444 if (device->stopped & ~DASD_STOPPED_PENDING &&
2445 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2446 !dasd_eer_enabled(device)) {
2447 cqr->status = DASD_CQR_FAILED;
2448 cqr->intrc = -EAGAIN;
2449 continue;
2450 }
2451
2452 /*Don't try to start requests if device is stopped*/
2453 if (interruptible) {
2454 rc = wait_event_interruptible(
2455 generic_waitq, !device->stopped);
2456 if (rc == -ERESTARTSYS) {
2457 cqr->status = DASD_CQR_FAILED;
2458 cqr->intrc = rc;
2459 continue;
2460 }
2461 } else
2462 wait_event(generic_waitq, !(device->stopped));
2463
2464 if (!cqr->callback)
2465 cqr->callback = dasd_wakeup_cb;
2466 cqr->callback_data = DASD_SLEEPON_START_TAG;
2467 dasd_add_request_tail(cqr);
2468 }
2469
2470 wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue));
2471
2472 rc = 0;
2473 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
2474 /*
2475 * In some cases certain errors might be expected and
2476 * error recovery would be unnecessary in these cases.
2477 * Check if the according suppress bit is set.
2478 */
2479 sense = dasd_get_sense(&cqr->irb);
2480 if (sense && (sense[1] & SNS1_INV_TRACK_FORMAT) &&
2481 !(sense[2] & SNS2_ENV_DATA_PRESENT) &&
2482 test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags))
2483 continue;
2484 if (sense && (sense[1] & SNS1_NO_REC_FOUND) &&
2485 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags))
2486 continue;
2487 if (scsw_cstat(&cqr->irb.scsw) == 0x40 &&
2488 test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags))
2489 continue;
2490
2491 /*
2492 * for alias devices simplify error recovery and
2493 * return to upper layer
2494 * do not skip ERP requests
2495 */
2496 if (cqr->startdev != cqr->basedev && !cqr->refers &&
2497 (cqr->status == DASD_CQR_TERMINATED ||
2498 cqr->status == DASD_CQR_NEED_ERP))
2499 return -EAGAIN;
2500
2501 /* normal recovery for basedev IO */
2502 if (__dasd_sleep_on_erp(cqr))
2503 /* handle erp first */
2504 goto retry;
2505 }
2506
2507 return 0;
2508 }
2509
2510 /*
2511 * Queue a request to the tail of the device ccw_queue and wait for
2512 * it's completion.
2513 */
dasd_sleep_on(struct dasd_ccw_req * cqr)2514 int dasd_sleep_on(struct dasd_ccw_req *cqr)
2515 {
2516 return _dasd_sleep_on(cqr, 0);
2517 }
2518 EXPORT_SYMBOL(dasd_sleep_on);
2519
2520 /*
2521 * Start requests from a ccw_queue and wait for their completion.
2522 */
dasd_sleep_on_queue(struct list_head * ccw_queue)2523 int dasd_sleep_on_queue(struct list_head *ccw_queue)
2524 {
2525 return _dasd_sleep_on_queue(ccw_queue, 0);
2526 }
2527 EXPORT_SYMBOL(dasd_sleep_on_queue);
2528
2529 /*
2530 * Start requests from a ccw_queue and wait interruptible for their completion.
2531 */
dasd_sleep_on_queue_interruptible(struct list_head * ccw_queue)2532 int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue)
2533 {
2534 return _dasd_sleep_on_queue(ccw_queue, 1);
2535 }
2536 EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible);
2537
2538 /*
2539 * Queue a request to the tail of the device ccw_queue and wait
2540 * interruptible for it's completion.
2541 */
dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)2542 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
2543 {
2544 return _dasd_sleep_on(cqr, 1);
2545 }
2546 EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2547
2548 /*
2549 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
2550 * for eckd devices) the currently running request has to be terminated
2551 * and be put back to status queued, before the special request is added
2552 * to the head of the queue. Then the special request is waited on normally.
2553 */
_dasd_term_running_cqr(struct dasd_device * device)2554 static inline int _dasd_term_running_cqr(struct dasd_device *device)
2555 {
2556 struct dasd_ccw_req *cqr;
2557 int rc;
2558
2559 if (list_empty(&device->ccw_queue))
2560 return 0;
2561 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
2562 rc = device->discipline->term_IO(cqr);
2563 if (!rc)
2564 /*
2565 * CQR terminated because a more important request is pending.
2566 * Undo decreasing of retry counter because this is
2567 * not an error case.
2568 */
2569 cqr->retries++;
2570 return rc;
2571 }
2572
dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr)2573 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
2574 {
2575 struct dasd_device *device;
2576 int rc;
2577
2578 device = cqr->startdev;
2579 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) &&
2580 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2581 cqr->status = DASD_CQR_FAILED;
2582 cqr->intrc = -EPERM;
2583 return -EIO;
2584 }
2585 spin_lock_irq(get_ccwdev_lock(device->cdev));
2586 rc = _dasd_term_running_cqr(device);
2587 if (rc) {
2588 spin_unlock_irq(get_ccwdev_lock(device->cdev));
2589 return rc;
2590 }
2591 cqr->callback = dasd_wakeup_cb;
2592 cqr->callback_data = DASD_SLEEPON_START_TAG;
2593 cqr->status = DASD_CQR_QUEUED;
2594 /*
2595 * add new request as second
2596 * first the terminated cqr needs to be finished
2597 */
2598 list_add(&cqr->devlist, device->ccw_queue.next);
2599
2600 /* let the bh start the request to keep them in order */
2601 dasd_schedule_device_bh(device);
2602
2603 spin_unlock_irq(get_ccwdev_lock(device->cdev));
2604
2605 wait_event(generic_waitq, _wait_for_wakeup(cqr));
2606
2607 if (cqr->status == DASD_CQR_DONE)
2608 rc = 0;
2609 else if (cqr->intrc)
2610 rc = cqr->intrc;
2611 else
2612 rc = -EIO;
2613
2614 /* kick tasklets */
2615 dasd_schedule_device_bh(device);
2616 if (device->block)
2617 dasd_schedule_block_bh(device->block);
2618
2619 return rc;
2620 }
2621 EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2622
2623 /*
2624 * Cancels a request that was started with dasd_sleep_on_req.
2625 * This is useful to timeout requests. The request will be
2626 * terminated if it is currently in i/o.
2627 * Returns 0 if request termination was successful
2628 * negative error code if termination failed
2629 * Cancellation of a request is an asynchronous operation! The calling
2630 * function has to wait until the request is properly returned via callback.
2631 */
__dasd_cancel_req(struct dasd_ccw_req * cqr)2632 static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
2633 {
2634 struct dasd_device *device = cqr->startdev;
2635 int rc = 0;
2636
2637 switch (cqr->status) {
2638 case DASD_CQR_QUEUED:
2639 /* request was not started - just set to cleared */
2640 cqr->status = DASD_CQR_CLEARED;
2641 break;
2642 case DASD_CQR_IN_IO:
2643 /* request in IO - terminate IO and release again */
2644 rc = device->discipline->term_IO(cqr);
2645 if (rc) {
2646 dev_err(&device->cdev->dev,
2647 "Cancelling request failed with rc=%d\n", rc);
2648 } else {
2649 cqr->stopclk = get_tod_clock();
2650 }
2651 break;
2652 default: /* already finished or clear pending - do nothing */
2653 break;
2654 }
2655 dasd_schedule_device_bh(device);
2656 return rc;
2657 }
2658
dasd_cancel_req(struct dasd_ccw_req * cqr)2659 int dasd_cancel_req(struct dasd_ccw_req *cqr)
2660 {
2661 struct dasd_device *device = cqr->startdev;
2662 unsigned long flags;
2663 int rc;
2664
2665 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
2666 rc = __dasd_cancel_req(cqr);
2667 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2668 return rc;
2669 }
2670
2671 /*
2672 * SECTION: Operations of the dasd_block layer.
2673 */
2674
2675 /*
2676 * Timeout function for dasd_block. This is used when the block layer
2677 * is waiting for something that may not come reliably, (e.g. a state
2678 * change interrupt)
2679 */
dasd_block_timeout(struct timer_list * t)2680 static void dasd_block_timeout(struct timer_list *t)
2681 {
2682 unsigned long flags;
2683 struct dasd_block *block;
2684
2685 block = timer_container_of(block, t, timer);
2686 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
2687 /* re-activate request queue */
2688 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
2689 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
2690 dasd_schedule_block_bh(block);
2691 blk_mq_run_hw_queues(block->gdp->queue, true);
2692 }
2693
2694 /*
2695 * Setup timeout for a dasd_block in jiffies.
2696 */
dasd_block_set_timer(struct dasd_block * block,int expires)2697 void dasd_block_set_timer(struct dasd_block *block, int expires)
2698 {
2699 if (expires == 0)
2700 timer_delete(&block->timer);
2701 else
2702 mod_timer(&block->timer, jiffies + expires);
2703 }
2704 EXPORT_SYMBOL(dasd_block_set_timer);
2705
2706 /*
2707 * Clear timeout for a dasd_block.
2708 */
dasd_block_clear_timer(struct dasd_block * block)2709 void dasd_block_clear_timer(struct dasd_block *block)
2710 {
2711 timer_delete(&block->timer);
2712 }
2713 EXPORT_SYMBOL(dasd_block_clear_timer);
2714
2715 /*
2716 * Process finished error recovery ccw.
2717 */
__dasd_process_erp(struct dasd_device * device,struct dasd_ccw_req * cqr)2718 static void __dasd_process_erp(struct dasd_device *device,
2719 struct dasd_ccw_req *cqr)
2720 {
2721 dasd_erp_fn_t erp_fn;
2722
2723 if (cqr->status == DASD_CQR_DONE)
2724 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
2725 else
2726 dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
2727 erp_fn = device->discipline->erp_postaction(cqr);
2728 erp_fn(cqr);
2729 }
2730
__dasd_cleanup_cqr(struct dasd_ccw_req * cqr)2731 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
2732 {
2733 struct request *req;
2734 blk_status_t error = BLK_STS_OK;
2735 unsigned int proc_bytes;
2736 int status;
2737
2738 req = (struct request *) cqr->callback_data;
2739 dasd_profile_end(cqr->block, cqr, req);
2740
2741 proc_bytes = cqr->proc_bytes;
2742 status = cqr->block->base->discipline->free_cp(cqr, req);
2743 if (status < 0)
2744 error = errno_to_blk_status(status);
2745 else if (status == 0) {
2746 switch (cqr->intrc) {
2747 case -EPERM:
2748 /*
2749 * DASD doesn't implement SCSI/NVMe reservations, but it
2750 * implements a locking scheme similar to them. We
2751 * return this error when we no longer have the lock.
2752 */
2753 error = BLK_STS_RESV_CONFLICT;
2754 break;
2755 case -ENOLINK:
2756 error = BLK_STS_TRANSPORT;
2757 break;
2758 case -ETIMEDOUT:
2759 error = BLK_STS_TIMEOUT;
2760 break;
2761 default:
2762 error = BLK_STS_IOERR;
2763 break;
2764 }
2765 }
2766
2767 /*
2768 * We need to take care for ETIMEDOUT errors here since the
2769 * complete callback does not get called in this case.
2770 * Take care of all errors here and avoid additional code to
2771 * transfer the error value to the complete callback.
2772 */
2773 if (error) {
2774 blk_mq_end_request(req, error);
2775 blk_mq_run_hw_queues(req->q, true);
2776 } else {
2777 /*
2778 * Partial completed requests can happen with ESE devices.
2779 * During read we might have gotten a NRF error and have to
2780 * complete a request partially.
2781 */
2782 if (proc_bytes) {
2783 blk_update_request(req, BLK_STS_OK, proc_bytes);
2784 blk_mq_requeue_request(req, true);
2785 } else if (likely(!blk_should_fake_timeout(req->q))) {
2786 blk_mq_complete_request(req);
2787 }
2788 }
2789 }
2790
2791 /*
2792 * Process ccw request queue.
2793 */
__dasd_process_block_ccw_queue(struct dasd_block * block,struct list_head * final_queue)2794 static void __dasd_process_block_ccw_queue(struct dasd_block *block,
2795 struct list_head *final_queue)
2796 {
2797 struct list_head *l, *n;
2798 struct dasd_ccw_req *cqr;
2799 dasd_erp_fn_t erp_fn;
2800 unsigned long flags;
2801 struct dasd_device *base = block->base;
2802
2803 restart:
2804 /* Process request with final status. */
2805 list_for_each_safe(l, n, &block->ccw_queue) {
2806 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2807 if (cqr->status != DASD_CQR_DONE &&
2808 cqr->status != DASD_CQR_FAILED &&
2809 cqr->status != DASD_CQR_NEED_ERP &&
2810 cqr->status != DASD_CQR_TERMINATED)
2811 continue;
2812
2813 if (cqr->status == DASD_CQR_TERMINATED) {
2814 base->discipline->handle_terminated_request(cqr);
2815 goto restart;
2816 }
2817
2818 /* Process requests that may be recovered */
2819 if (cqr->status == DASD_CQR_NEED_ERP) {
2820 erp_fn = base->discipline->erp_action(cqr);
2821 if (IS_ERR(erp_fn(cqr)))
2822 continue;
2823 goto restart;
2824 }
2825
2826 /* log sense for fatal error */
2827 if (cqr->status == DASD_CQR_FAILED) {
2828 dasd_log_sense(cqr, &cqr->irb);
2829 }
2830
2831 /*
2832 * First call extended error reporting and check for autoquiesce
2833 */
2834 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
2835 if (cqr->status == DASD_CQR_FAILED &&
2836 dasd_handle_autoquiesce(base, cqr, DASD_EER_FATALERROR)) {
2837 cqr->status = DASD_CQR_FILLED;
2838 cqr->retries = 255;
2839 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
2840 goto restart;
2841 }
2842 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
2843
2844 /* Process finished ERP request. */
2845 if (cqr->refers) {
2846 __dasd_process_erp(base, cqr);
2847 goto restart;
2848 }
2849
2850 /* Rechain finished requests to final queue */
2851 cqr->endclk = get_tod_clock();
2852 list_move_tail(&cqr->blocklist, final_queue);
2853 }
2854 }
2855
dasd_return_cqr_cb(struct dasd_ccw_req * cqr,void * data)2856 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
2857 {
2858 dasd_schedule_block_bh(cqr->block);
2859 }
2860
__dasd_block_start_head(struct dasd_block * block)2861 static void __dasd_block_start_head(struct dasd_block *block)
2862 {
2863 struct dasd_ccw_req *cqr;
2864
2865 if (list_empty(&block->ccw_queue))
2866 return;
2867 /* We allways begin with the first requests on the queue, as some
2868 * of previously started requests have to be enqueued on a
2869 * dasd_device again for error recovery.
2870 */
2871 list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
2872 if (cqr->status != DASD_CQR_FILLED)
2873 continue;
2874 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) &&
2875 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) {
2876 cqr->status = DASD_CQR_FAILED;
2877 cqr->intrc = -EPERM;
2878 dasd_schedule_block_bh(block);
2879 continue;
2880 }
2881 /* Non-temporary stop condition will trigger fail fast */
2882 if (block->base->stopped & ~DASD_STOPPED_PENDING &&
2883 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2884 !dasd_eer_enabled(block->base) && block->base->aq_mask == 0) {
2885 cqr->status = DASD_CQR_FAILED;
2886 cqr->intrc = -ENOLINK;
2887 dasd_schedule_block_bh(block);
2888 continue;
2889 }
2890 /* Don't try to start requests if device is stopped */
2891 if (block->base->stopped)
2892 return;
2893
2894 /* just a fail safe check, should not happen */
2895 if (!cqr->startdev)
2896 cqr->startdev = block->base;
2897
2898 /* make sure that the requests we submit find their way back */
2899 cqr->callback = dasd_return_cqr_cb;
2900
2901 dasd_add_request_tail(cqr);
2902 }
2903 }
2904
2905 /*
2906 * Central dasd_block layer routine. Takes requests from the generic
2907 * block layer request queue, creates ccw requests, enqueues them on
2908 * a dasd_device and processes ccw requests that have been returned.
2909 */
dasd_block_tasklet(unsigned long data)2910 static void dasd_block_tasklet(unsigned long data)
2911 {
2912 struct dasd_block *block = (struct dasd_block *) data;
2913 struct list_head final_queue;
2914 struct list_head *l, *n;
2915 struct dasd_ccw_req *cqr;
2916 struct dasd_queue *dq;
2917
2918 atomic_set(&block->tasklet_scheduled, 0);
2919 INIT_LIST_HEAD(&final_queue);
2920 spin_lock_irq(&block->queue_lock);
2921 /* Finish off requests on ccw queue */
2922 __dasd_process_block_ccw_queue(block, &final_queue);
2923 spin_unlock_irq(&block->queue_lock);
2924
2925 /* Now call the callback function of requests with final status */
2926 list_for_each_safe(l, n, &final_queue) {
2927 cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2928 dq = cqr->dq;
2929 spin_lock_irq(&dq->lock);
2930 list_del_init(&cqr->blocklist);
2931 __dasd_cleanup_cqr(cqr);
2932 spin_unlock_irq(&dq->lock);
2933 }
2934
2935 spin_lock_irq(&block->queue_lock);
2936 /* Now check if the head of the ccw queue needs to be started. */
2937 __dasd_block_start_head(block);
2938 spin_unlock_irq(&block->queue_lock);
2939
2940 if (waitqueue_active(&shutdown_waitq))
2941 wake_up(&shutdown_waitq);
2942 dasd_put_device(block->base);
2943 }
2944
_dasd_wake_block_flush_cb(struct dasd_ccw_req * cqr,void * data)2945 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
2946 {
2947 wake_up(&dasd_flush_wq);
2948 }
2949
2950 /*
2951 * Requeue a request back to the block request queue
2952 * only works for block requests
2953 */
_dasd_requeue_request(struct dasd_ccw_req * cqr)2954 static void _dasd_requeue_request(struct dasd_ccw_req *cqr)
2955 {
2956 struct request *req;
2957
2958 /*
2959 * If the request is an ERP request there is nothing to requeue.
2960 * This will be done with the remaining original request.
2961 */
2962 if (cqr->refers)
2963 return;
2964 spin_lock_irq(&cqr->dq->lock);
2965 req = (struct request *) cqr->callback_data;
2966 blk_mq_requeue_request(req, true);
2967 spin_unlock_irq(&cqr->dq->lock);
2968
2969 return;
2970 }
2971
_dasd_requests_to_flushqueue(struct dasd_block * block,struct list_head * flush_queue)2972 static int _dasd_requests_to_flushqueue(struct dasd_block *block,
2973 struct list_head *flush_queue)
2974 {
2975 struct dasd_ccw_req *cqr, *n;
2976 unsigned long flags;
2977 int rc, i;
2978
2979 spin_lock_irqsave(&block->queue_lock, flags);
2980 rc = 0;
2981 restart:
2982 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
2983 /* if this request currently owned by a dasd_device cancel it */
2984 if (cqr->status >= DASD_CQR_QUEUED)
2985 rc = dasd_cancel_req(cqr);
2986 if (rc < 0)
2987 break;
2988 /* Rechain request (including erp chain) so it won't be
2989 * touched by the dasd_block_tasklet anymore.
2990 * Replace the callback so we notice when the request
2991 * is returned from the dasd_device layer.
2992 */
2993 cqr->callback = _dasd_wake_block_flush_cb;
2994 for (i = 0; cqr; cqr = cqr->refers, i++)
2995 list_move_tail(&cqr->blocklist, flush_queue);
2996 if (i > 1)
2997 /* moved more than one request - need to restart */
2998 goto restart;
2999 }
3000 spin_unlock_irqrestore(&block->queue_lock, flags);
3001
3002 return rc;
3003 }
3004
3005 /*
3006 * Go through all request on the dasd_block request queue, cancel them
3007 * on the respective dasd_device, and return them to the generic
3008 * block layer.
3009 */
dasd_flush_block_queue(struct dasd_block * block)3010 static int dasd_flush_block_queue(struct dasd_block *block)
3011 {
3012 struct dasd_ccw_req *cqr, *n;
3013 struct list_head flush_queue;
3014 unsigned long flags;
3015 int rc;
3016
3017 INIT_LIST_HEAD(&flush_queue);
3018 rc = _dasd_requests_to_flushqueue(block, &flush_queue);
3019
3020 /* Now call the callback function of flushed requests */
3021 restart_cb:
3022 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
3023 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
3024 /* Process finished ERP request. */
3025 if (cqr->refers) {
3026 spin_lock_bh(&block->queue_lock);
3027 __dasd_process_erp(block->base, cqr);
3028 spin_unlock_bh(&block->queue_lock);
3029 /* restart list_for_xx loop since dasd_process_erp
3030 * might remove multiple elements */
3031 goto restart_cb;
3032 }
3033 /* call the callback function */
3034 spin_lock_irqsave(&cqr->dq->lock, flags);
3035 cqr->endclk = get_tod_clock();
3036 list_del_init(&cqr->blocklist);
3037 __dasd_cleanup_cqr(cqr);
3038 spin_unlock_irqrestore(&cqr->dq->lock, flags);
3039 }
3040 return rc;
3041 }
3042
3043 /*
3044 * Schedules a call to dasd_tasklet over the device tasklet.
3045 */
dasd_schedule_block_bh(struct dasd_block * block)3046 void dasd_schedule_block_bh(struct dasd_block *block)
3047 {
3048 /* Protect against rescheduling. */
3049 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
3050 return;
3051 /* life cycle of block is bound to it's base device */
3052 dasd_get_device(block->base);
3053 tasklet_hi_schedule(&block->tasklet);
3054 }
3055 EXPORT_SYMBOL(dasd_schedule_block_bh);
3056
3057
3058 /*
3059 * SECTION: external block device operations
3060 * (request queue handling, open, release, etc.)
3061 */
3062
3063 /*
3064 * Dasd request queue function. Called from ll_rw_blk.c
3065 */
do_dasd_request(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * qd)3066 static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
3067 const struct blk_mq_queue_data *qd)
3068 {
3069 struct dasd_block *block = hctx->queue->queuedata;
3070 struct dasd_queue *dq = hctx->driver_data;
3071 struct request *req = qd->rq;
3072 struct dasd_device *basedev;
3073 struct dasd_ccw_req *cqr;
3074 blk_status_t rc = BLK_STS_OK;
3075
3076 basedev = block->base;
3077 spin_lock_irq(&dq->lock);
3078 if (basedev->state < DASD_STATE_READY ||
3079 test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) {
3080 DBF_DEV_EVENT(DBF_ERR, basedev,
3081 "device not ready for request %p", req);
3082 rc = BLK_STS_IOERR;
3083 goto out;
3084 }
3085
3086 /*
3087 * if device is stopped do not fetch new requests
3088 * except failfast is active which will let requests fail
3089 * immediately in __dasd_block_start_head()
3090 */
3091 if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) {
3092 DBF_DEV_EVENT(DBF_ERR, basedev,
3093 "device stopped request %p", req);
3094 rc = BLK_STS_RESOURCE;
3095 goto out;
3096 }
3097
3098 if (basedev->features & DASD_FEATURE_READONLY &&
3099 rq_data_dir(req) == WRITE) {
3100 DBF_DEV_EVENT(DBF_ERR, basedev,
3101 "Rejecting write request %p", req);
3102 rc = BLK_STS_IOERR;
3103 goto out;
3104 }
3105
3106 if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) &&
3107 (basedev->features & DASD_FEATURE_FAILFAST ||
3108 blk_noretry_request(req))) {
3109 DBF_DEV_EVENT(DBF_ERR, basedev,
3110 "Rejecting failfast request %p", req);
3111 rc = BLK_STS_IOERR;
3112 goto out;
3113 }
3114
3115 cqr = basedev->discipline->build_cp(basedev, block, req);
3116 if (IS_ERR(cqr)) {
3117 if (PTR_ERR(cqr) == -EBUSY ||
3118 PTR_ERR(cqr) == -ENOMEM ||
3119 PTR_ERR(cqr) == -EAGAIN) {
3120 rc = BLK_STS_RESOURCE;
3121 } else if (PTR_ERR(cqr) == -EINVAL) {
3122 rc = BLK_STS_INVAL;
3123 } else {
3124 DBF_DEV_EVENT(DBF_ERR, basedev,
3125 "CCW creation failed (rc=%ld) on request %p",
3126 PTR_ERR(cqr), req);
3127 rc = BLK_STS_IOERR;
3128 }
3129 goto out;
3130 }
3131 /*
3132 * Note: callback is set to dasd_return_cqr_cb in
3133 * __dasd_block_start_head to cover erp requests as well
3134 */
3135 cqr->callback_data = req;
3136 cqr->status = DASD_CQR_FILLED;
3137 cqr->dq = dq;
3138
3139 blk_mq_start_request(req);
3140 spin_lock(&block->queue_lock);
3141 list_add_tail(&cqr->blocklist, &block->ccw_queue);
3142 INIT_LIST_HEAD(&cqr->devlist);
3143 dasd_profile_start(block, cqr, req);
3144 dasd_schedule_block_bh(block);
3145 spin_unlock(&block->queue_lock);
3146
3147 out:
3148 spin_unlock_irq(&dq->lock);
3149 return rc;
3150 }
3151
3152 /*
3153 * Block timeout callback, called from the block layer
3154 *
3155 * Return values:
3156 * BLK_EH_RESET_TIMER if the request should be left running
3157 * BLK_EH_DONE if the request is handled or terminated
3158 * by the driver.
3159 */
dasd_times_out(struct request * req)3160 enum blk_eh_timer_return dasd_times_out(struct request *req)
3161 {
3162 struct dasd_block *block = req->q->queuedata;
3163 struct dasd_device *device;
3164 struct dasd_ccw_req *cqr;
3165 unsigned long flags;
3166 int rc = 0;
3167
3168 cqr = blk_mq_rq_to_pdu(req);
3169 if (!cqr)
3170 return BLK_EH_DONE;
3171
3172 spin_lock_irqsave(&cqr->dq->lock, flags);
3173 device = cqr->startdev ? cqr->startdev : block->base;
3174 if (!device->blk_timeout) {
3175 spin_unlock_irqrestore(&cqr->dq->lock, flags);
3176 return BLK_EH_RESET_TIMER;
3177 }
3178 DBF_DEV_EVENT(DBF_WARNING, device,
3179 " dasd_times_out cqr %p status %x",
3180 cqr, cqr->status);
3181
3182 spin_lock(&block->queue_lock);
3183 spin_lock(get_ccwdev_lock(device->cdev));
3184 cqr->retries = -1;
3185 cqr->intrc = -ETIMEDOUT;
3186 if (cqr->status >= DASD_CQR_QUEUED) {
3187 rc = __dasd_cancel_req(cqr);
3188 } else if (cqr->status == DASD_CQR_FILLED ||
3189 cqr->status == DASD_CQR_NEED_ERP) {
3190 cqr->status = DASD_CQR_TERMINATED;
3191 } else if (cqr->status == DASD_CQR_IN_ERP) {
3192 struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
3193
3194 list_for_each_entry_safe(searchcqr, nextcqr,
3195 &block->ccw_queue, blocklist) {
3196 tmpcqr = searchcqr;
3197 while (tmpcqr->refers)
3198 tmpcqr = tmpcqr->refers;
3199 if (tmpcqr != cqr)
3200 continue;
3201 /* searchcqr is an ERP request for cqr */
3202 searchcqr->retries = -1;
3203 searchcqr->intrc = -ETIMEDOUT;
3204 if (searchcqr->status >= DASD_CQR_QUEUED) {
3205 rc = __dasd_cancel_req(searchcqr);
3206 } else if ((searchcqr->status == DASD_CQR_FILLED) ||
3207 (searchcqr->status == DASD_CQR_NEED_ERP)) {
3208 searchcqr->status = DASD_CQR_TERMINATED;
3209 rc = 0;
3210 } else if (searchcqr->status == DASD_CQR_IN_ERP) {
3211 /*
3212 * Shouldn't happen; most recent ERP
3213 * request is at the front of queue
3214 */
3215 continue;
3216 }
3217 break;
3218 }
3219 }
3220 spin_unlock(get_ccwdev_lock(device->cdev));
3221 dasd_schedule_block_bh(block);
3222 spin_unlock(&block->queue_lock);
3223 spin_unlock_irqrestore(&cqr->dq->lock, flags);
3224
3225 return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE;
3226 }
3227
dasd_init_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int idx)3228 static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
3229 unsigned int idx)
3230 {
3231 struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL);
3232
3233 if (!dq)
3234 return -ENOMEM;
3235
3236 spin_lock_init(&dq->lock);
3237 hctx->driver_data = dq;
3238
3239 return 0;
3240 }
3241
dasd_exit_hctx(struct blk_mq_hw_ctx * hctx,unsigned int idx)3242 static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
3243 {
3244 kfree(hctx->driver_data);
3245 hctx->driver_data = NULL;
3246 }
3247
dasd_request_done(struct request * req)3248 static void dasd_request_done(struct request *req)
3249 {
3250 blk_mq_end_request(req, 0);
3251 blk_mq_run_hw_queues(req->q, true);
3252 }
3253
3254 struct blk_mq_ops dasd_mq_ops = {
3255 .queue_rq = do_dasd_request,
3256 .complete = dasd_request_done,
3257 .timeout = dasd_times_out,
3258 .init_hctx = dasd_init_hctx,
3259 .exit_hctx = dasd_exit_hctx,
3260 };
3261
dasd_open(struct gendisk * disk,blk_mode_t mode)3262 static int dasd_open(struct gendisk *disk, blk_mode_t mode)
3263 {
3264 struct dasd_device *base;
3265 int rc;
3266
3267 base = dasd_device_from_gendisk(disk);
3268 if (!base)
3269 return -ENODEV;
3270
3271 atomic_inc(&base->block->open_count);
3272 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
3273 rc = -ENODEV;
3274 goto unlock;
3275 }
3276
3277 if (!try_module_get(base->discipline->owner)) {
3278 rc = -EINVAL;
3279 goto unlock;
3280 }
3281
3282 if (dasd_probeonly) {
3283 dev_info(&base->cdev->dev,
3284 "Accessing the DASD failed because it is in "
3285 "probeonly mode\n");
3286 rc = -EPERM;
3287 goto out;
3288 }
3289
3290 if (base->state <= DASD_STATE_BASIC) {
3291 DBF_DEV_EVENT(DBF_ERR, base, " %s",
3292 " Cannot open unrecognized device");
3293 rc = -ENODEV;
3294 goto out;
3295 }
3296 if ((mode & BLK_OPEN_WRITE) &&
3297 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
3298 (base->features & DASD_FEATURE_READONLY))) {
3299 rc = -EROFS;
3300 goto out;
3301 }
3302 dasd_put_device(base);
3303 return 0;
3304
3305 out:
3306 module_put(base->discipline->owner);
3307 unlock:
3308 atomic_dec(&base->block->open_count);
3309 dasd_put_device(base);
3310 return rc;
3311 }
3312
dasd_release(struct gendisk * disk)3313 static void dasd_release(struct gendisk *disk)
3314 {
3315 struct dasd_device *base = dasd_device_from_gendisk(disk);
3316 if (base) {
3317 atomic_dec(&base->block->open_count);
3318 module_put(base->discipline->owner);
3319 dasd_put_device(base);
3320 }
3321 }
3322
3323 /*
3324 * Return disk geometry.
3325 */
dasd_getgeo(struct gendisk * disk,struct hd_geometry * geo)3326 static int dasd_getgeo(struct gendisk *disk, struct hd_geometry *geo)
3327 {
3328 struct dasd_device *base;
3329
3330 base = dasd_device_from_gendisk(disk);
3331 if (!base)
3332 return -ENODEV;
3333
3334 if (!base->discipline ||
3335 !base->discipline->fill_geometry) {
3336 dasd_put_device(base);
3337 return -EINVAL;
3338 }
3339 base->discipline->fill_geometry(base->block, geo);
3340 // geo->start is left unchanged by the above
3341 geo->start >>= base->block->s2b_shift;
3342 dasd_put_device(base);
3343 return 0;
3344 }
3345
3346 const struct block_device_operations
3347 dasd_device_operations = {
3348 .owner = THIS_MODULE,
3349 .open = dasd_open,
3350 .release = dasd_release,
3351 .ioctl = dasd_ioctl,
3352 .getgeo = dasd_getgeo,
3353 .set_read_only = dasd_set_read_only,
3354 };
3355
3356 /*******************************************************************************
3357 * end of block device operations
3358 */
3359
3360 static void
dasd_exit(void)3361 dasd_exit(void)
3362 {
3363 #ifdef CONFIG_PROC_FS
3364 dasd_proc_exit();
3365 #endif
3366 dasd_eer_exit();
3367 kmem_cache_destroy(dasd_page_cache);
3368 dasd_page_cache = NULL;
3369 dasd_gendisk_exit();
3370 dasd_devmap_exit();
3371 if (dasd_debug_area != NULL) {
3372 debug_unregister(dasd_debug_area);
3373 dasd_debug_area = NULL;
3374 }
3375 dasd_statistics_removeroot();
3376 }
3377
3378 /*
3379 * SECTION: common functions for ccw_driver use
3380 */
3381
3382 /*
3383 * Is the device read-only?
3384 * Note that this function does not report the setting of the
3385 * readonly device attribute, but how it is configured in z/VM.
3386 */
dasd_device_is_ro(struct dasd_device * device)3387 int dasd_device_is_ro(struct dasd_device *device)
3388 {
3389 struct ccw_dev_id dev_id;
3390 struct diag210 diag_data;
3391 int rc;
3392
3393 if (!machine_is_vm())
3394 return 0;
3395 ccw_device_get_id(device->cdev, &dev_id);
3396 memset(&diag_data, 0, sizeof(diag_data));
3397 diag_data.vrdcdvno = dev_id.devno;
3398 diag_data.vrdclen = sizeof(diag_data);
3399 rc = diag210(&diag_data);
3400 if (rc == 0 || rc == 2) {
3401 return diag_data.vrdcvfla & 0x80;
3402 } else {
3403 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
3404 dev_id.devno, rc);
3405 return 0;
3406 }
3407 }
3408 EXPORT_SYMBOL_GPL(dasd_device_is_ro);
3409
dasd_generic_auto_online(void * data,async_cookie_t cookie)3410 static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
3411 {
3412 struct ccw_device *cdev = data;
3413 int ret;
3414
3415 ret = ccw_device_set_online(cdev);
3416 if (ret)
3417 dev_warn(&cdev->dev, "Setting the DASD online failed with rc=%d\n", ret);
3418 }
3419
3420 /*
3421 * Initial attempt at a probe function. this can be simplified once
3422 * the other detection code is gone.
3423 */
dasd_generic_probe(struct ccw_device * cdev)3424 int dasd_generic_probe(struct ccw_device *cdev)
3425 {
3426 cdev->handler = &dasd_int_handler;
3427
3428 /*
3429 * Automatically online either all dasd devices (dasd_autodetect)
3430 * or all devices specified with dasd= parameters during
3431 * initial probe.
3432 */
3433 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
3434 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
3435 async_schedule(dasd_generic_auto_online, cdev);
3436 return 0;
3437 }
3438 EXPORT_SYMBOL_GPL(dasd_generic_probe);
3439
dasd_generic_free_discipline(struct dasd_device * device)3440 void dasd_generic_free_discipline(struct dasd_device *device)
3441 {
3442 /* Forget the discipline information. */
3443 if (device->discipline) {
3444 if (device->discipline->uncheck_device)
3445 device->discipline->uncheck_device(device);
3446 module_put(device->discipline->owner);
3447 device->discipline = NULL;
3448 }
3449 if (device->base_discipline) {
3450 module_put(device->base_discipline->owner);
3451 device->base_discipline = NULL;
3452 }
3453 }
3454 EXPORT_SYMBOL_GPL(dasd_generic_free_discipline);
3455
3456 /*
3457 * This will one day be called from a global not_oper handler.
3458 * It is also used by driver_unregister during module unload.
3459 */
dasd_generic_remove(struct ccw_device * cdev)3460 void dasd_generic_remove(struct ccw_device *cdev)
3461 {
3462 struct dasd_device *device;
3463 struct dasd_block *block;
3464
3465 device = dasd_device_from_cdev(cdev);
3466 if (IS_ERR(device))
3467 return;
3468
3469 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3470 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3471 /* Already doing offline processing */
3472 dasd_put_device(device);
3473 return;
3474 }
3475 /*
3476 * This device is removed unconditionally. Set offline
3477 * flag to prevent dasd_open from opening it while it is
3478 * no quite down yet.
3479 */
3480 dasd_set_target_state(device, DASD_STATE_NEW);
3481 cdev->handler = NULL;
3482 /* dasd_delete_device destroys the device reference. */
3483 block = device->block;
3484 dasd_delete_device(device);
3485 /*
3486 * life cycle of block is bound to device, so delete it after
3487 * device was safely removed
3488 */
3489 if (block)
3490 dasd_free_block(block);
3491 }
3492 EXPORT_SYMBOL_GPL(dasd_generic_remove);
3493
3494 /*
3495 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
3496 * the device is detected for the first time and is supposed to be used
3497 * or the user has started activation through sysfs.
3498 */
dasd_generic_set_online(struct ccw_device * cdev,struct dasd_discipline * base_discipline)3499 int dasd_generic_set_online(struct ccw_device *cdev,
3500 struct dasd_discipline *base_discipline)
3501 {
3502 struct dasd_discipline *discipline;
3503 struct dasd_device *device;
3504 struct device *dev;
3505 int rc;
3506
3507 dev = &cdev->dev;
3508
3509 /* first online clears initial online feature flag */
3510 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
3511 device = dasd_create_device(cdev);
3512 if (IS_ERR(device))
3513 return PTR_ERR(device);
3514
3515 discipline = base_discipline;
3516 if (device->features & DASD_FEATURE_USEDIAG) {
3517 if (!dasd_diag_discipline_pointer) {
3518 /* Try to load the required module. */
3519 rc = request_module(DASD_DIAG_MOD);
3520 if (rc) {
3521 dev_warn(dev, "Setting the DASD online failed "
3522 "because the required module %s "
3523 "could not be loaded (rc=%d)\n",
3524 DASD_DIAG_MOD, rc);
3525 dasd_delete_device(device);
3526 return -ENODEV;
3527 }
3528 }
3529 /* Module init could have failed, so check again here after
3530 * request_module(). */
3531 if (!dasd_diag_discipline_pointer) {
3532 dev_warn(dev, "Setting the DASD online failed because of missing DIAG discipline\n");
3533 dasd_delete_device(device);
3534 return -ENODEV;
3535 }
3536 discipline = dasd_diag_discipline_pointer;
3537 }
3538 if (!try_module_get(base_discipline->owner)) {
3539 dasd_delete_device(device);
3540 return -EINVAL;
3541 }
3542 device->base_discipline = base_discipline;
3543 if (!try_module_get(discipline->owner)) {
3544 dasd_delete_device(device);
3545 return -EINVAL;
3546 }
3547 device->discipline = discipline;
3548
3549 /* check_device will allocate block device if necessary */
3550 rc = discipline->check_device(device);
3551 if (rc) {
3552 dev_warn(dev, "Setting the DASD online with discipline %s failed with rc=%i\n",
3553 discipline->name, rc);
3554 dasd_delete_device(device);
3555 return rc;
3556 }
3557
3558 dasd_set_target_state(device, DASD_STATE_ONLINE);
3559 if (device->state <= DASD_STATE_KNOWN) {
3560 dev_warn(dev, "Setting the DASD online failed because of a missing discipline\n");
3561 rc = -ENODEV;
3562 dasd_set_target_state(device, DASD_STATE_NEW);
3563 if (device->block)
3564 dasd_free_block(device->block);
3565 dasd_delete_device(device);
3566 } else {
3567 dev_dbg(dev, "dasd_generic device found\n");
3568 }
3569
3570 wait_event(dasd_init_waitq, _wait_for_device(device));
3571
3572 dasd_put_device(device);
3573 return rc;
3574 }
3575 EXPORT_SYMBOL_GPL(dasd_generic_set_online);
3576
dasd_generic_set_offline(struct ccw_device * cdev)3577 int dasd_generic_set_offline(struct ccw_device *cdev)
3578 {
3579 int max_count, open_count, rc;
3580 struct dasd_device *device;
3581 struct dasd_block *block;
3582 unsigned long flags;
3583 struct device *dev;
3584
3585 dev = &cdev->dev;
3586
3587 rc = 0;
3588 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3589 device = dasd_device_from_cdev_locked(cdev);
3590 if (IS_ERR(device)) {
3591 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3592 return PTR_ERR(device);
3593 }
3594
3595 /*
3596 * We must make sure that this device is currently not in use.
3597 * The open_count is increased for every opener, that includes
3598 * the blkdev_get in dasd_scan_partitions. We are only interested
3599 * in the other openers.
3600 */
3601 if (device->block) {
3602 max_count = device->block->bdev_file ? 0 : -1;
3603 open_count = atomic_read(&device->block->open_count);
3604 if (open_count > max_count) {
3605 if (open_count > 0)
3606 dev_warn(dev, "The DASD cannot be set offline with open count %i\n",
3607 open_count);
3608 else
3609 dev_warn(dev, "The DASD cannot be set offline while it is in use\n");
3610 rc = -EBUSY;
3611 goto out_err;
3612 }
3613 }
3614
3615 /*
3616 * Test if the offline processing is already running and exit if so.
3617 * If a safe offline is being processed this could only be a normal
3618 * offline that should be able to overtake the safe offline and
3619 * cancel any I/O we do not want to wait for any longer
3620 */
3621 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
3622 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3623 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING,
3624 &device->flags);
3625 } else {
3626 rc = -EBUSY;
3627 goto out_err;
3628 }
3629 }
3630 set_bit(DASD_FLAG_OFFLINE, &device->flags);
3631
3632 /*
3633 * if safe_offline is called set safe_offline_running flag and
3634 * clear safe_offline so that a call to normal offline
3635 * can overrun safe_offline processing
3636 */
3637 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) &&
3638 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3639 /* need to unlock here to wait for outstanding I/O */
3640 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3641 /*
3642 * If we want to set the device safe offline all IO operations
3643 * should be finished before continuing the offline process
3644 * so sync bdev first and then wait for our queues to become
3645 * empty
3646 */
3647 if (device->block && device->block->bdev_file)
3648 bdev_mark_dead(file_bdev(device->block->bdev_file), false);
3649 dasd_schedule_device_bh(device);
3650 rc = wait_event_interruptible(shutdown_waitq,
3651 _wait_for_empty_queues(device));
3652 if (rc != 0)
3653 goto interrupted;
3654
3655 /*
3656 * check if a normal offline process overtook the offline
3657 * processing in this case simply do nothing beside returning
3658 * that we got interrupted
3659 * otherwise mark safe offline as not running any longer and
3660 * continue with normal offline
3661 */
3662 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3663 if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) {
3664 rc = -ERESTARTSYS;
3665 goto out_err;
3666 }
3667 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3668 }
3669 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3670
3671 dasd_set_target_state(device, DASD_STATE_NEW);
3672 /* dasd_delete_device destroys the device reference. */
3673 block = device->block;
3674 dasd_delete_device(device);
3675 /*
3676 * life cycle of block is bound to device, so delete it after
3677 * device was safely removed
3678 */
3679 if (block)
3680 dasd_free_block(block);
3681
3682 return 0;
3683
3684 interrupted:
3685 /* interrupted by signal */
3686 spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
3687 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags);
3688 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
3689 out_err:
3690 dasd_put_device(device);
3691 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
3692 return rc;
3693 }
3694 EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
3695
dasd_generic_last_path_gone(struct dasd_device * device)3696 int dasd_generic_last_path_gone(struct dasd_device *device)
3697 {
3698 struct dasd_ccw_req *cqr;
3699
3700 dev_warn(&device->cdev->dev, "No operational channel path is left "
3701 "for the device\n");
3702 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone");
3703 /* First call extended error reporting and check for autoquiesce. */
3704 dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH);
3705
3706 if (device->state < DASD_STATE_BASIC)
3707 return 0;
3708 /* Device is active. We want to keep it. */
3709 list_for_each_entry(cqr, &device->ccw_queue, devlist)
3710 if ((cqr->status == DASD_CQR_IN_IO) ||
3711 (cqr->status == DASD_CQR_CLEAR_PENDING)) {
3712 cqr->status = DASD_CQR_QUEUED;
3713 cqr->retries++;
3714 }
3715 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
3716 dasd_device_clear_timer(device);
3717 dasd_schedule_device_bh(device);
3718 return 1;
3719 }
3720 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone);
3721
dasd_generic_path_operational(struct dasd_device * device)3722 int dasd_generic_path_operational(struct dasd_device *device)
3723 {
3724 dev_info(&device->cdev->dev, "A channel path to the device has become "
3725 "operational\n");
3726 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational");
3727 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
3728 dasd_schedule_device_bh(device);
3729 if (device->block) {
3730 dasd_schedule_block_bh(device->block);
3731 if (device->block->gdp)
3732 blk_mq_run_hw_queues(device->block->gdp->queue, true);
3733 }
3734
3735 if (!device->stopped)
3736 wake_up(&generic_waitq);
3737
3738 return 1;
3739 }
3740 EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
3741
dasd_generic_notify(struct ccw_device * cdev,int event)3742 int dasd_generic_notify(struct ccw_device *cdev, int event)
3743 {
3744 struct dasd_device *device;
3745 int ret;
3746
3747 device = dasd_device_from_cdev_locked(cdev);
3748 if (IS_ERR(device))
3749 return 0;
3750 ret = 0;
3751 switch (event) {
3752 case CIO_GONE:
3753 case CIO_BOXED:
3754 case CIO_NO_PATH:
3755 dasd_path_no_path(device);
3756 ret = dasd_generic_last_path_gone(device);
3757 break;
3758 case CIO_OPER:
3759 ret = 1;
3760 if (dasd_path_get_opm(device))
3761 ret = dasd_generic_path_operational(device);
3762 break;
3763 }
3764 dasd_put_device(device);
3765 return ret;
3766 }
3767 EXPORT_SYMBOL_GPL(dasd_generic_notify);
3768
dasd_generic_path_event(struct ccw_device * cdev,int * path_event)3769 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
3770 {
3771 struct dasd_device *device;
3772 int chp, oldopm, hpfpm, ifccpm;
3773
3774 device = dasd_device_from_cdev_locked(cdev);
3775 if (IS_ERR(device))
3776 return;
3777
3778 oldopm = dasd_path_get_opm(device);
3779 for (chp = 0; chp < 8; chp++) {
3780 if (path_event[chp] & PE_PATH_GONE) {
3781 dasd_path_notoper(device, chp);
3782 }
3783 if (path_event[chp] & PE_PATH_AVAILABLE) {
3784 dasd_path_available(device, chp);
3785 dasd_schedule_device_bh(device);
3786 }
3787 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
3788 if (!dasd_path_is_operational(device, chp) &&
3789 !dasd_path_need_verify(device, chp)) {
3790 /*
3791 * we can not establish a pathgroup on an
3792 * unavailable path, so trigger a path
3793 * verification first
3794 */
3795 dasd_path_available(device, chp);
3796 dasd_schedule_device_bh(device);
3797 }
3798 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3799 "Pathgroup re-established\n");
3800 if (device->discipline->kick_validate)
3801 device->discipline->kick_validate(device);
3802 }
3803 if (path_event[chp] & PE_PATH_FCES_EVENT) {
3804 dasd_path_fcsec_update(device, chp);
3805 dasd_schedule_device_bh(device);
3806 }
3807 }
3808 hpfpm = dasd_path_get_hpfpm(device);
3809 ifccpm = dasd_path_get_ifccpm(device);
3810 if (!dasd_path_get_opm(device) && hpfpm) {
3811 /*
3812 * device has no operational paths but at least one path is
3813 * disabled due to HPF errors
3814 * disable HPF at all and use the path(s) again
3815 */
3816 if (device->discipline->disable_hpf)
3817 device->discipline->disable_hpf(device);
3818 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
3819 dasd_path_set_tbvpm(device, hpfpm);
3820 dasd_schedule_device_bh(device);
3821 dasd_schedule_requeue(device);
3822 } else if (!dasd_path_get_opm(device) && ifccpm) {
3823 /*
3824 * device has no operational paths but at least one path is
3825 * disabled due to IFCC errors
3826 * trigger path verification on paths with IFCC errors
3827 */
3828 dasd_path_set_tbvpm(device, ifccpm);
3829 dasd_schedule_device_bh(device);
3830 }
3831 if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) {
3832 dev_warn(&device->cdev->dev,
3833 "No verified channel paths remain for the device\n");
3834 DBF_DEV_EVENT(DBF_WARNING, device,
3835 "%s", "last verified path gone");
3836 /* First call extended error reporting and check for autoquiesce. */
3837 dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH);
3838 dasd_device_set_stop_bits(device,
3839 DASD_STOPPED_DC_WAIT);
3840 }
3841 dasd_put_device(device);
3842 }
3843 EXPORT_SYMBOL_GPL(dasd_generic_path_event);
3844
dasd_generic_verify_path(struct dasd_device * device,__u8 lpm)3845 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm)
3846 {
3847 if (!dasd_path_get_opm(device) && lpm) {
3848 dasd_path_set_opm(device, lpm);
3849 dasd_generic_path_operational(device);
3850 } else
3851 dasd_path_add_opm(device, lpm);
3852 return 0;
3853 }
3854 EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
3855
dasd_generic_space_exhaust(struct dasd_device * device,struct dasd_ccw_req * cqr)3856 void dasd_generic_space_exhaust(struct dasd_device *device,
3857 struct dasd_ccw_req *cqr)
3858 {
3859 /* First call extended error reporting and check for autoquiesce. */
3860 dasd_handle_autoquiesce(device, NULL, DASD_EER_NOSPC);
3861
3862 if (device->state < DASD_STATE_BASIC)
3863 return;
3864
3865 if (cqr->status == DASD_CQR_IN_IO ||
3866 cqr->status == DASD_CQR_CLEAR_PENDING) {
3867 cqr->status = DASD_CQR_QUEUED;
3868 cqr->retries++;
3869 }
3870 dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC);
3871 dasd_device_clear_timer(device);
3872 dasd_schedule_device_bh(device);
3873 }
3874 EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust);
3875
dasd_generic_space_avail(struct dasd_device * device)3876 void dasd_generic_space_avail(struct dasd_device *device)
3877 {
3878 dev_info(&device->cdev->dev, "Extent pool space is available\n");
3879 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available");
3880
3881 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC);
3882 dasd_schedule_device_bh(device);
3883
3884 if (device->block) {
3885 dasd_schedule_block_bh(device->block);
3886 if (device->block->gdp)
3887 blk_mq_run_hw_queues(device->block->gdp->queue, true);
3888 }
3889 if (!device->stopped)
3890 wake_up(&generic_waitq);
3891 }
3892 EXPORT_SYMBOL_GPL(dasd_generic_space_avail);
3893
3894 /*
3895 * clear active requests and requeue them to block layer if possible
3896 */
dasd_generic_requeue_all_requests(struct dasd_device * device)3897 int dasd_generic_requeue_all_requests(struct dasd_device *device)
3898 {
3899 struct dasd_block *block = device->block;
3900 struct list_head requeue_queue;
3901 struct dasd_ccw_req *cqr, *n;
3902 int rc;
3903
3904 if (!block)
3905 return 0;
3906
3907 INIT_LIST_HEAD(&requeue_queue);
3908 rc = _dasd_requests_to_flushqueue(block, &requeue_queue);
3909
3910 /* Now call the callback function of flushed requests */
3911 restart_cb:
3912 list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) {
3913 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
3914 /* Process finished ERP request. */
3915 if (cqr->refers) {
3916 spin_lock_bh(&block->queue_lock);
3917 __dasd_process_erp(block->base, cqr);
3918 spin_unlock_bh(&block->queue_lock);
3919 /* restart list_for_xx loop since dasd_process_erp
3920 * might remove multiple elements
3921 */
3922 goto restart_cb;
3923 }
3924 _dasd_requeue_request(cqr);
3925 list_del_init(&cqr->blocklist);
3926 cqr->block->base->discipline->free_cp(
3927 cqr, (struct request *) cqr->callback_data);
3928 }
3929 dasd_schedule_device_bh(device);
3930 return rc;
3931 }
3932 EXPORT_SYMBOL_GPL(dasd_generic_requeue_all_requests);
3933
do_requeue_requests(struct work_struct * work)3934 static void do_requeue_requests(struct work_struct *work)
3935 {
3936 struct dasd_device *device = container_of(work, struct dasd_device,
3937 requeue_requests);
3938 dasd_generic_requeue_all_requests(device);
3939 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC);
3940 if (device->block)
3941 dasd_schedule_block_bh(device->block);
3942 dasd_put_device(device);
3943 }
3944
dasd_schedule_requeue(struct dasd_device * device)3945 void dasd_schedule_requeue(struct dasd_device *device)
3946 {
3947 dasd_get_device(device);
3948 /* queue call to dasd_reload_device to the kernel event daemon. */
3949 if (!schedule_work(&device->requeue_requests))
3950 dasd_put_device(device);
3951 }
3952 EXPORT_SYMBOL(dasd_schedule_requeue);
3953
dasd_handle_autoquiesce(struct dasd_device * device,struct dasd_ccw_req * cqr,unsigned int reason)3954 static int dasd_handle_autoquiesce(struct dasd_device *device,
3955 struct dasd_ccw_req *cqr,
3956 unsigned int reason)
3957 {
3958 /* in any case write eer message with reason */
3959 if (dasd_eer_enabled(device))
3960 dasd_eer_write(device, cqr, reason);
3961
3962 if (!test_bit(reason, &device->aq_mask))
3963 return 0;
3964
3965 /* notify eer about autoquiesce */
3966 if (dasd_eer_enabled(device))
3967 dasd_eer_write(device, NULL, DASD_EER_AUTOQUIESCE);
3968
3969 dev_info(&device->cdev->dev,
3970 "The DASD has been put in the quiesce state\n");
3971 dasd_device_set_stop_bits(device, DASD_STOPPED_QUIESCE);
3972
3973 if (device->features & DASD_FEATURE_REQUEUEQUIESCE)
3974 dasd_schedule_requeue(device);
3975
3976 return 1;
3977 }
3978
dasd_generic_build_rdc(struct dasd_device * device,int rdc_buffer_size,int magic)3979 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
3980 int rdc_buffer_size,
3981 int magic)
3982 {
3983 struct dasd_ccw_req *cqr;
3984 struct ccw1 *ccw;
3985
3986 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
3987 NULL);
3988
3989 if (IS_ERR(cqr)) {
3990 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
3991 "Could not allocate RDC request");
3992 return cqr;
3993 }
3994
3995 ccw = cqr->cpaddr;
3996 ccw->cmd_code = CCW_CMD_RDC;
3997 ccw->cda = virt_to_dma32(cqr->data);
3998 ccw->flags = 0;
3999 ccw->count = rdc_buffer_size;
4000 cqr->startdev = device;
4001 cqr->memdev = device;
4002 cqr->expires = 10*HZ;
4003 cqr->retries = 256;
4004 cqr->buildclk = get_tod_clock();
4005 cqr->status = DASD_CQR_FILLED;
4006 return cqr;
4007 }
4008
4009
dasd_generic_read_dev_chars(struct dasd_device * device,int magic,void * rdc_buffer,int rdc_buffer_size)4010 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
4011 void *rdc_buffer, int rdc_buffer_size)
4012 {
4013 int ret;
4014 struct dasd_ccw_req *cqr;
4015
4016 cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic);
4017 if (IS_ERR(cqr))
4018 return PTR_ERR(cqr);
4019
4020 ret = dasd_sleep_on(cqr);
4021 if (ret == 0)
4022 memcpy(rdc_buffer, cqr->data, rdc_buffer_size);
4023 dasd_sfree_request(cqr, cqr->memdev);
4024 return ret;
4025 }
4026 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
4027
4028 /*
4029 * In command mode and transport mode we need to look for sense
4030 * data in different places. The sense data itself is allways
4031 * an array of 32 bytes, so we can unify the sense data access
4032 * for both modes.
4033 */
dasd_get_sense(struct irb * irb)4034 char *dasd_get_sense(struct irb *irb)
4035 {
4036 struct tsb *tsb = NULL;
4037 char *sense = NULL;
4038
4039 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
4040 if (irb->scsw.tm.tcw)
4041 tsb = tcw_get_tsb(dma32_to_virt(irb->scsw.tm.tcw));
4042 if (tsb && tsb->length == 64 && tsb->flags)
4043 switch (tsb->flags & 0x07) {
4044 case 1: /* tsa_iostat */
4045 sense = tsb->tsa.iostat.sense;
4046 break;
4047 case 2: /* tsa_ddpc */
4048 sense = tsb->tsa.ddpc.sense;
4049 break;
4050 default:
4051 /* currently we don't use interrogate data */
4052 break;
4053 }
4054 } else if (irb->esw.esw0.erw.cons) {
4055 sense = irb->ecw;
4056 }
4057 return sense;
4058 }
4059 EXPORT_SYMBOL_GPL(dasd_get_sense);
4060
dasd_generic_shutdown(struct ccw_device * cdev)4061 void dasd_generic_shutdown(struct ccw_device *cdev)
4062 {
4063 struct dasd_device *device;
4064
4065 device = dasd_device_from_cdev(cdev);
4066 if (IS_ERR(device))
4067 return;
4068
4069 if (device->block)
4070 dasd_schedule_block_bh(device->block);
4071
4072 dasd_schedule_device_bh(device);
4073
4074 wait_event(shutdown_waitq, _wait_for_empty_queues(device));
4075 }
4076 EXPORT_SYMBOL_GPL(dasd_generic_shutdown);
4077
dasd_init(void)4078 static int __init dasd_init(void)
4079 {
4080 int rc;
4081
4082 init_waitqueue_head(&dasd_init_waitq);
4083 init_waitqueue_head(&dasd_flush_wq);
4084 init_waitqueue_head(&generic_waitq);
4085 init_waitqueue_head(&shutdown_waitq);
4086
4087 /* register 'common' DASD debug area, used for all DBF_XXX calls */
4088 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
4089 if (dasd_debug_area == NULL) {
4090 rc = -ENOMEM;
4091 goto failed;
4092 }
4093 debug_register_view(dasd_debug_area, &debug_sprintf_view);
4094 debug_set_level(dasd_debug_area, DBF_WARNING);
4095
4096 DBF_EVENT(DBF_EMERG, "%s", "debug area created");
4097
4098 dasd_diag_discipline_pointer = NULL;
4099
4100 dasd_statistics_createroot();
4101
4102 rc = dasd_devmap_init();
4103 if (rc)
4104 goto failed;
4105 rc = dasd_gendisk_init();
4106 if (rc)
4107 goto failed;
4108 rc = dasd_parse();
4109 if (rc)
4110 goto failed;
4111 rc = dasd_eer_init();
4112 if (rc)
4113 goto failed;
4114 #ifdef CONFIG_PROC_FS
4115 rc = dasd_proc_init();
4116 if (rc)
4117 goto failed;
4118 #endif
4119
4120 return 0;
4121 failed:
4122 pr_info("The DASD device driver could not be initialized\n");
4123 dasd_exit();
4124 return rc;
4125 }
4126
4127 module_init(dasd_init);
4128 module_exit(dasd_exit);
4129