xref: /linux/drivers/s390/block/dasd_alias.c (revision d39d0ed196aa1685bb24771e92f78633c66ac9cb)
1 /*
2  * PAV alias management for the DASD ECKD discipline
3  *
4  * Copyright IBM Corporation, 2007
5  * Author(s): Stefan Weinhuber <wein@de.ibm.com>
6  */
7 
8 #define KMSG_COMPONENT "dasd-eckd"
9 
10 #include <linux/list.h>
11 #include <linux/slab.h>
12 #include <asm/ebcdic.h>
13 #include "dasd_int.h"
14 #include "dasd_eckd.h"
15 
16 #ifdef PRINTK_HEADER
17 #undef PRINTK_HEADER
18 #endif				/* PRINTK_HEADER */
19 #define PRINTK_HEADER "dasd(eckd):"
20 
21 
22 /*
23  * General concept of alias management:
24  * - PAV and DASD alias management is specific to the eckd discipline.
25  * - A device is connected to an lcu as long as the device exists.
26  *   dasd_alias_make_device_known_to_lcu will be called wenn the
27  *   device is checked by the eckd discipline and
28  *   dasd_alias_disconnect_device_from_lcu will be called
29  *   before the device is deleted.
30  * - The dasd_alias_add_device / dasd_alias_remove_device
31  *   functions mark the point when a device is 'ready for service'.
32  * - A summary unit check is a rare occasion, but it is mandatory to
33  *   support it. It requires some complex recovery actions before the
34  *   devices can be used again (see dasd_alias_handle_summary_unit_check).
35  * - dasd_alias_get_start_dev will find an alias device that can be used
36  *   instead of the base device and does some (very simple) load balancing.
37  *   This is the function that gets called for each I/O, so when improving
38  *   something, this function should get faster or better, the rest has just
39  *   to be correct.
40  */
41 
42 
43 static void summary_unit_check_handling_work(struct work_struct *);
44 static void lcu_update_work(struct work_struct *);
45 static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
46 
47 static struct alias_root aliastree = {
48 	.serverlist = LIST_HEAD_INIT(aliastree.serverlist),
49 	.lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
50 };
51 
52 static struct alias_server *_find_server(struct dasd_uid *uid)
53 {
54 	struct alias_server *pos;
55 	list_for_each_entry(pos, &aliastree.serverlist, server) {
56 		if (!strncmp(pos->uid.vendor, uid->vendor,
57 			     sizeof(uid->vendor))
58 		    && !strncmp(pos->uid.serial, uid->serial,
59 				sizeof(uid->serial)))
60 			return pos;
61 	};
62 	return NULL;
63 }
64 
65 static struct alias_lcu *_find_lcu(struct alias_server *server,
66 				   struct dasd_uid *uid)
67 {
68 	struct alias_lcu *pos;
69 	list_for_each_entry(pos, &server->lculist, lcu) {
70 		if (pos->uid.ssid == uid->ssid)
71 			return pos;
72 	};
73 	return NULL;
74 }
75 
76 static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
77 					   struct dasd_uid *uid)
78 {
79 	struct alias_pav_group *pos;
80 	__u8 search_unit_addr;
81 
82 	/* for hyper pav there is only one group */
83 	if (lcu->pav == HYPER_PAV) {
84 		if (list_empty(&lcu->grouplist))
85 			return NULL;
86 		else
87 			return list_first_entry(&lcu->grouplist,
88 						struct alias_pav_group, group);
89 	}
90 
91 	/* for base pav we have to find the group that matches the base */
92 	if (uid->type == UA_BASE_DEVICE)
93 		search_unit_addr = uid->real_unit_addr;
94 	else
95 		search_unit_addr = uid->base_unit_addr;
96 	list_for_each_entry(pos, &lcu->grouplist, group) {
97 		if (pos->uid.base_unit_addr == search_unit_addr &&
98 		    !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
99 			return pos;
100 	};
101 	return NULL;
102 }
103 
104 static struct alias_server *_allocate_server(struct dasd_uid *uid)
105 {
106 	struct alias_server *server;
107 
108 	server = kzalloc(sizeof(*server), GFP_KERNEL);
109 	if (!server)
110 		return ERR_PTR(-ENOMEM);
111 	memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
112 	memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
113 	INIT_LIST_HEAD(&server->server);
114 	INIT_LIST_HEAD(&server->lculist);
115 	return server;
116 }
117 
118 static void _free_server(struct alias_server *server)
119 {
120 	kfree(server);
121 }
122 
123 static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
124 {
125 	struct alias_lcu *lcu;
126 
127 	lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
128 	if (!lcu)
129 		return ERR_PTR(-ENOMEM);
130 	lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
131 	if (!lcu->uac)
132 		goto out_err1;
133 	lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
134 	if (!lcu->rsu_cqr)
135 		goto out_err2;
136 	lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
137 				       GFP_KERNEL | GFP_DMA);
138 	if (!lcu->rsu_cqr->cpaddr)
139 		goto out_err3;
140 	lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
141 	if (!lcu->rsu_cqr->data)
142 		goto out_err4;
143 
144 	memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
145 	memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
146 	lcu->uid.ssid = uid->ssid;
147 	lcu->pav = NO_PAV;
148 	lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
149 	INIT_LIST_HEAD(&lcu->lcu);
150 	INIT_LIST_HEAD(&lcu->inactive_devices);
151 	INIT_LIST_HEAD(&lcu->active_devices);
152 	INIT_LIST_HEAD(&lcu->grouplist);
153 	INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
154 	INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
155 	spin_lock_init(&lcu->lock);
156 	init_completion(&lcu->lcu_setup);
157 	return lcu;
158 
159 out_err4:
160 	kfree(lcu->rsu_cqr->cpaddr);
161 out_err3:
162 	kfree(lcu->rsu_cqr);
163 out_err2:
164 	kfree(lcu->uac);
165 out_err1:
166 	kfree(lcu);
167 	return ERR_PTR(-ENOMEM);
168 }
169 
170 static void _free_lcu(struct alias_lcu *lcu)
171 {
172 	kfree(lcu->rsu_cqr->data);
173 	kfree(lcu->rsu_cqr->cpaddr);
174 	kfree(lcu->rsu_cqr);
175 	kfree(lcu->uac);
176 	kfree(lcu);
177 }
178 
179 /*
180  * This is the function that will allocate all the server and lcu data,
181  * so this function must be called first for a new device.
182  * If the return value is 1, the lcu was already known before, if it
183  * is 0, this is a new lcu.
184  * Negative return code indicates that something went wrong (e.g. -ENOMEM)
185  */
186 int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
187 {
188 	struct dasd_eckd_private *private;
189 	unsigned long flags;
190 	struct alias_server *server, *newserver;
191 	struct alias_lcu *lcu, *newlcu;
192 	int is_lcu_known;
193 	struct dasd_uid uid;
194 
195 	private = (struct dasd_eckd_private *) device->private;
196 
197 	device->discipline->get_uid(device, &uid);
198 	spin_lock_irqsave(&aliastree.lock, flags);
199 	is_lcu_known = 1;
200 	server = _find_server(&uid);
201 	if (!server) {
202 		spin_unlock_irqrestore(&aliastree.lock, flags);
203 		newserver = _allocate_server(&uid);
204 		if (IS_ERR(newserver))
205 			return PTR_ERR(newserver);
206 		spin_lock_irqsave(&aliastree.lock, flags);
207 		server = _find_server(&uid);
208 		if (!server) {
209 			list_add(&newserver->server, &aliastree.serverlist);
210 			server = newserver;
211 			is_lcu_known = 0;
212 		} else {
213 			/* someone was faster */
214 			_free_server(newserver);
215 		}
216 	}
217 
218 	lcu = _find_lcu(server, &uid);
219 	if (!lcu) {
220 		spin_unlock_irqrestore(&aliastree.lock, flags);
221 		newlcu = _allocate_lcu(&uid);
222 		if (IS_ERR(newlcu))
223 			return PTR_ERR(newlcu);
224 		spin_lock_irqsave(&aliastree.lock, flags);
225 		lcu = _find_lcu(server, &uid);
226 		if (!lcu) {
227 			list_add(&newlcu->lcu, &server->lculist);
228 			lcu = newlcu;
229 			is_lcu_known = 0;
230 		} else {
231 			/* someone was faster */
232 			_free_lcu(newlcu);
233 		}
234 		is_lcu_known = 0;
235 	}
236 	spin_lock(&lcu->lock);
237 	list_add(&device->alias_list, &lcu->inactive_devices);
238 	private->lcu = lcu;
239 	spin_unlock(&lcu->lock);
240 	spin_unlock_irqrestore(&aliastree.lock, flags);
241 
242 	return is_lcu_known;
243 }
244 
245 /*
246  * The first device to be registered on an LCU will have to do
247  * some additional setup steps to configure that LCU on the
248  * storage server. All further devices should wait with their
249  * initialization until the first device is done.
250  * To synchronize this work, the first device will call
251  * dasd_alias_lcu_setup_complete when it is done, and all
252  * other devices will wait for it with dasd_alias_wait_for_lcu_setup.
253  */
254 void dasd_alias_lcu_setup_complete(struct dasd_device *device)
255 {
256 	struct dasd_eckd_private *private;
257 	unsigned long flags;
258 	struct alias_server *server;
259 	struct alias_lcu *lcu;
260 	struct dasd_uid uid;
261 
262 	private = (struct dasd_eckd_private *) device->private;
263 	device->discipline->get_uid(device, &uid);
264 	lcu = NULL;
265 	spin_lock_irqsave(&aliastree.lock, flags);
266 	server = _find_server(&uid);
267 	if (server)
268 		lcu = _find_lcu(server, &uid);
269 	spin_unlock_irqrestore(&aliastree.lock, flags);
270 	if (!lcu) {
271 		DBF_EVENT_DEVID(DBF_ERR, device->cdev,
272 				"could not find lcu for %04x %02x",
273 				uid.ssid, uid.real_unit_addr);
274 		WARN_ON(1);
275 		return;
276 	}
277 	complete_all(&lcu->lcu_setup);
278 }
279 
280 void dasd_alias_wait_for_lcu_setup(struct dasd_device *device)
281 {
282 	struct dasd_eckd_private *private;
283 	unsigned long flags;
284 	struct alias_server *server;
285 	struct alias_lcu *lcu;
286 	struct dasd_uid uid;
287 
288 	private = (struct dasd_eckd_private *) device->private;
289 	device->discipline->get_uid(device, &uid);
290 	lcu = NULL;
291 	spin_lock_irqsave(&aliastree.lock, flags);
292 	server = _find_server(&uid);
293 	if (server)
294 		lcu = _find_lcu(server, &uid);
295 	spin_unlock_irqrestore(&aliastree.lock, flags);
296 	if (!lcu) {
297 		DBF_EVENT_DEVID(DBF_ERR, device->cdev,
298 				"could not find lcu for %04x %02x",
299 				uid.ssid, uid.real_unit_addr);
300 		WARN_ON(1);
301 		return;
302 	}
303 	wait_for_completion(&lcu->lcu_setup);
304 }
305 
306 /*
307  * This function removes a device from the scope of alias management.
308  * The complicated part is to make sure that it is not in use by
309  * any of the workers. If necessary cancel the work.
310  */
311 void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
312 {
313 	struct dasd_eckd_private *private;
314 	unsigned long flags;
315 	struct alias_lcu *lcu;
316 	struct alias_server *server;
317 	int was_pending;
318 	struct dasd_uid uid;
319 
320 	private = (struct dasd_eckd_private *) device->private;
321 	lcu = private->lcu;
322 	device->discipline->get_uid(device, &uid);
323 	spin_lock_irqsave(&lcu->lock, flags);
324 	list_del_init(&device->alias_list);
325 	/* make sure that the workers don't use this device */
326 	if (device == lcu->suc_data.device) {
327 		spin_unlock_irqrestore(&lcu->lock, flags);
328 		cancel_work_sync(&lcu->suc_data.worker);
329 		spin_lock_irqsave(&lcu->lock, flags);
330 		if (device == lcu->suc_data.device)
331 			lcu->suc_data.device = NULL;
332 	}
333 	was_pending = 0;
334 	if (device == lcu->ruac_data.device) {
335 		spin_unlock_irqrestore(&lcu->lock, flags);
336 		was_pending = 1;
337 		cancel_delayed_work_sync(&lcu->ruac_data.dwork);
338 		spin_lock_irqsave(&lcu->lock, flags);
339 		if (device == lcu->ruac_data.device)
340 			lcu->ruac_data.device = NULL;
341 	}
342 	private->lcu = NULL;
343 	spin_unlock_irqrestore(&lcu->lock, flags);
344 
345 	spin_lock_irqsave(&aliastree.lock, flags);
346 	spin_lock(&lcu->lock);
347 	if (list_empty(&lcu->grouplist) &&
348 	    list_empty(&lcu->active_devices) &&
349 	    list_empty(&lcu->inactive_devices)) {
350 		list_del(&lcu->lcu);
351 		spin_unlock(&lcu->lock);
352 		_free_lcu(lcu);
353 		lcu = NULL;
354 	} else {
355 		if (was_pending)
356 			_schedule_lcu_update(lcu, NULL);
357 		spin_unlock(&lcu->lock);
358 	}
359 	server = _find_server(&uid);
360 	if (server && list_empty(&server->lculist)) {
361 		list_del(&server->server);
362 		_free_server(server);
363 	}
364 	spin_unlock_irqrestore(&aliastree.lock, flags);
365 }
366 
367 /*
368  * This function assumes that the unit address configuration stored
369  * in the lcu is up to date and will update the device uid before
370  * adding it to a pav group.
371  */
372 
373 static int _add_device_to_lcu(struct alias_lcu *lcu,
374 			      struct dasd_device *device,
375 			      struct dasd_device *pos)
376 {
377 
378 	struct dasd_eckd_private *private;
379 	struct alias_pav_group *group;
380 	struct dasd_uid uid;
381 	unsigned long flags;
382 
383 	private = (struct dasd_eckd_private *) device->private;
384 
385 	/* only lock if not already locked */
386 	if (device != pos)
387 		spin_lock_irqsave_nested(get_ccwdev_lock(device->cdev), flags,
388 					 CDEV_NESTED_SECOND);
389 	private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
390 	private->uid.base_unit_addr =
391 		lcu->uac->unit[private->uid.real_unit_addr].base_ua;
392 	uid = private->uid;
393 
394 	if (device != pos)
395 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
396 
397 	/* if we have no PAV anyway, we don't need to bother with PAV groups */
398 	if (lcu->pav == NO_PAV) {
399 		list_move(&device->alias_list, &lcu->active_devices);
400 		return 0;
401 	}
402 
403 	group = _find_group(lcu, &uid);
404 	if (!group) {
405 		group = kzalloc(sizeof(*group), GFP_ATOMIC);
406 		if (!group)
407 			return -ENOMEM;
408 		memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
409 		memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
410 		group->uid.ssid = uid.ssid;
411 		if (uid.type == UA_BASE_DEVICE)
412 			group->uid.base_unit_addr = uid.real_unit_addr;
413 		else
414 			group->uid.base_unit_addr = uid.base_unit_addr;
415 		memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
416 		INIT_LIST_HEAD(&group->group);
417 		INIT_LIST_HEAD(&group->baselist);
418 		INIT_LIST_HEAD(&group->aliaslist);
419 		list_add(&group->group, &lcu->grouplist);
420 	}
421 	if (uid.type == UA_BASE_DEVICE)
422 		list_move(&device->alias_list, &group->baselist);
423 	else
424 		list_move(&device->alias_list, &group->aliaslist);
425 	private->pavgroup = group;
426 	return 0;
427 };
428 
429 static void _remove_device_from_lcu(struct alias_lcu *lcu,
430 				    struct dasd_device *device)
431 {
432 	struct dasd_eckd_private *private;
433 	struct alias_pav_group *group;
434 
435 	private = (struct dasd_eckd_private *) device->private;
436 	list_move(&device->alias_list, &lcu->inactive_devices);
437 	group = private->pavgroup;
438 	if (!group)
439 		return;
440 	private->pavgroup = NULL;
441 	if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
442 		list_del(&group->group);
443 		kfree(group);
444 		return;
445 	}
446 	if (group->next == device)
447 		group->next = NULL;
448 };
449 
450 static int read_unit_address_configuration(struct dasd_device *device,
451 					   struct alias_lcu *lcu)
452 {
453 	struct dasd_psf_prssd_data *prssdp;
454 	struct dasd_ccw_req *cqr;
455 	struct ccw1 *ccw;
456 	int rc;
457 	unsigned long flags;
458 
459 	cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
460 				   (sizeof(struct dasd_psf_prssd_data)),
461 				   device);
462 	if (IS_ERR(cqr))
463 		return PTR_ERR(cqr);
464 	cqr->startdev = device;
465 	cqr->memdev = device;
466 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
467 	cqr->retries = 10;
468 	cqr->expires = 20 * HZ;
469 
470 	/* Prepare for Read Subsystem Data */
471 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
472 	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
473 	prssdp->order = PSF_ORDER_PRSSD;
474 	prssdp->suborder = 0x0e;	/* Read unit address configuration */
475 	/* all other bytes of prssdp must be zero */
476 
477 	ccw = cqr->cpaddr;
478 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
479 	ccw->count = sizeof(struct dasd_psf_prssd_data);
480 	ccw->flags |= CCW_FLAG_CC;
481 	ccw->cda = (__u32)(addr_t) prssdp;
482 
483 	/* Read Subsystem Data - feature codes */
484 	memset(lcu->uac, 0, sizeof(*(lcu->uac)));
485 
486 	ccw++;
487 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
488 	ccw->count = sizeof(*(lcu->uac));
489 	ccw->cda = (__u32)(addr_t) lcu->uac;
490 
491 	cqr->buildclk = get_clock();
492 	cqr->status = DASD_CQR_FILLED;
493 
494 	/* need to unset flag here to detect race with summary unit check */
495 	spin_lock_irqsave(&lcu->lock, flags);
496 	lcu->flags &= ~NEED_UAC_UPDATE;
497 	spin_unlock_irqrestore(&lcu->lock, flags);
498 
499 	do {
500 		rc = dasd_sleep_on(cqr);
501 	} while (rc && (cqr->retries > 0));
502 	if (rc) {
503 		spin_lock_irqsave(&lcu->lock, flags);
504 		lcu->flags |= NEED_UAC_UPDATE;
505 		spin_unlock_irqrestore(&lcu->lock, flags);
506 	}
507 	dasd_kfree_request(cqr, cqr->memdev);
508 	return rc;
509 }
510 
511 static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
512 {
513 	unsigned long flags;
514 	struct alias_pav_group *pavgroup, *tempgroup;
515 	struct dasd_device *device, *tempdev;
516 	int i, rc;
517 	struct dasd_eckd_private *private;
518 
519 	spin_lock_irqsave(&lcu->lock, flags);
520 	list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
521 		list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
522 					 alias_list) {
523 			list_move(&device->alias_list, &lcu->active_devices);
524 			private = (struct dasd_eckd_private *) device->private;
525 			private->pavgroup = NULL;
526 		}
527 		list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
528 					 alias_list) {
529 			list_move(&device->alias_list, &lcu->active_devices);
530 			private = (struct dasd_eckd_private *) device->private;
531 			private->pavgroup = NULL;
532 		}
533 		list_del(&pavgroup->group);
534 		kfree(pavgroup);
535 	}
536 	spin_unlock_irqrestore(&lcu->lock, flags);
537 
538 	rc = read_unit_address_configuration(refdev, lcu);
539 	if (rc)
540 		return rc;
541 
542 	/* need to take cdev lock before lcu lock */
543 	spin_lock_irqsave_nested(get_ccwdev_lock(refdev->cdev), flags,
544 				 CDEV_NESTED_FIRST);
545 	spin_lock(&lcu->lock);
546 	lcu->pav = NO_PAV;
547 	for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
548 		switch (lcu->uac->unit[i].ua_type) {
549 		case UA_BASE_PAV_ALIAS:
550 			lcu->pav = BASE_PAV;
551 			break;
552 		case UA_HYPER_PAV_ALIAS:
553 			lcu->pav = HYPER_PAV;
554 			break;
555 		}
556 		if (lcu->pav != NO_PAV)
557 			break;
558 	}
559 
560 	list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
561 				 alias_list) {
562 		_add_device_to_lcu(lcu, device, refdev);
563 	}
564 	spin_unlock(&lcu->lock);
565 	spin_unlock_irqrestore(get_ccwdev_lock(refdev->cdev), flags);
566 	return 0;
567 }
568 
569 static void lcu_update_work(struct work_struct *work)
570 {
571 	struct alias_lcu *lcu;
572 	struct read_uac_work_data *ruac_data;
573 	struct dasd_device *device;
574 	unsigned long flags;
575 	int rc;
576 
577 	ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
578 	lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
579 	device = ruac_data->device;
580 	rc = _lcu_update(device, lcu);
581 	/*
582 	 * Need to check flags again, as there could have been another
583 	 * prepare_update or a new device a new device while we were still
584 	 * processing the data
585 	 */
586 	spin_lock_irqsave(&lcu->lock, flags);
587 	if (rc || (lcu->flags & NEED_UAC_UPDATE)) {
588 		DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
589 			    " alias data in lcu (rc = %d), retry later", rc);
590 		schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
591 	} else {
592 		lcu->ruac_data.device = NULL;
593 		lcu->flags &= ~UPDATE_PENDING;
594 	}
595 	spin_unlock_irqrestore(&lcu->lock, flags);
596 }
597 
598 static int _schedule_lcu_update(struct alias_lcu *lcu,
599 				struct dasd_device *device)
600 {
601 	struct dasd_device *usedev = NULL;
602 	struct alias_pav_group *group;
603 
604 	lcu->flags |= NEED_UAC_UPDATE;
605 	if (lcu->ruac_data.device) {
606 		/* already scheduled or running */
607 		return 0;
608 	}
609 	if (device && !list_empty(&device->alias_list))
610 		usedev = device;
611 
612 	if (!usedev && !list_empty(&lcu->grouplist)) {
613 		group = list_first_entry(&lcu->grouplist,
614 					 struct alias_pav_group, group);
615 		if (!list_empty(&group->baselist))
616 			usedev = list_first_entry(&group->baselist,
617 						  struct dasd_device,
618 						  alias_list);
619 		else if (!list_empty(&group->aliaslist))
620 			usedev = list_first_entry(&group->aliaslist,
621 						  struct dasd_device,
622 						  alias_list);
623 	}
624 	if (!usedev && !list_empty(&lcu->active_devices)) {
625 		usedev = list_first_entry(&lcu->active_devices,
626 					  struct dasd_device, alias_list);
627 	}
628 	/*
629 	 * if we haven't found a proper device yet, give up for now, the next
630 	 * device that will be set active will trigger an lcu update
631 	 */
632 	if (!usedev)
633 		return -EINVAL;
634 	lcu->ruac_data.device = usedev;
635 	schedule_delayed_work(&lcu->ruac_data.dwork, 0);
636 	return 0;
637 }
638 
639 int dasd_alias_add_device(struct dasd_device *device)
640 {
641 	struct dasd_eckd_private *private;
642 	struct alias_lcu *lcu;
643 	unsigned long flags;
644 	int rc;
645 
646 	private = (struct dasd_eckd_private *) device->private;
647 	lcu = private->lcu;
648 	rc = 0;
649 
650 	/* need to take cdev lock before lcu lock */
651 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
652 	spin_lock(&lcu->lock);
653 	if (!(lcu->flags & UPDATE_PENDING)) {
654 		rc = _add_device_to_lcu(lcu, device, device);
655 		if (rc)
656 			lcu->flags |= UPDATE_PENDING;
657 	}
658 	if (lcu->flags & UPDATE_PENDING) {
659 		list_move(&device->alias_list, &lcu->active_devices);
660 		_schedule_lcu_update(lcu, device);
661 	}
662 	spin_unlock(&lcu->lock);
663 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
664 	return rc;
665 }
666 
667 int dasd_alias_update_add_device(struct dasd_device *device)
668 {
669 	struct dasd_eckd_private *private;
670 	private = (struct dasd_eckd_private *) device->private;
671 	private->lcu->flags |= UPDATE_PENDING;
672 	return dasd_alias_add_device(device);
673 }
674 
675 int dasd_alias_remove_device(struct dasd_device *device)
676 {
677 	struct dasd_eckd_private *private;
678 	struct alias_lcu *lcu;
679 	unsigned long flags;
680 
681 	private = (struct dasd_eckd_private *) device->private;
682 	lcu = private->lcu;
683 	spin_lock_irqsave(&lcu->lock, flags);
684 	_remove_device_from_lcu(lcu, device);
685 	spin_unlock_irqrestore(&lcu->lock, flags);
686 	return 0;
687 }
688 
689 struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
690 {
691 
692 	struct dasd_device *alias_device;
693 	struct alias_pav_group *group;
694 	struct alias_lcu *lcu;
695 	struct dasd_eckd_private *private, *alias_priv;
696 	unsigned long flags;
697 
698 	private = (struct dasd_eckd_private *) base_device->private;
699 	group = private->pavgroup;
700 	lcu = private->lcu;
701 	if (!group || !lcu)
702 		return NULL;
703 	if (lcu->pav == NO_PAV ||
704 	    lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
705 		return NULL;
706 
707 	spin_lock_irqsave(&lcu->lock, flags);
708 	alias_device = group->next;
709 	if (!alias_device) {
710 		if (list_empty(&group->aliaslist)) {
711 			spin_unlock_irqrestore(&lcu->lock, flags);
712 			return NULL;
713 		} else {
714 			alias_device = list_first_entry(&group->aliaslist,
715 							struct dasd_device,
716 							alias_list);
717 		}
718 	}
719 	if (list_is_last(&alias_device->alias_list, &group->aliaslist))
720 		group->next = list_first_entry(&group->aliaslist,
721 					       struct dasd_device, alias_list);
722 	else
723 		group->next = list_first_entry(&alias_device->alias_list,
724 					       struct dasd_device, alias_list);
725 	spin_unlock_irqrestore(&lcu->lock, flags);
726 	alias_priv = (struct dasd_eckd_private *) alias_device->private;
727 	if ((alias_priv->count < private->count) && !alias_device->stopped)
728 		return alias_device;
729 	else
730 		return NULL;
731 }
732 
733 /*
734  * Summary unit check handling depends on the way alias devices
735  * are handled so it is done here rather then in dasd_eckd.c
736  */
737 static int reset_summary_unit_check(struct alias_lcu *lcu,
738 				    struct dasd_device *device,
739 				    char reason)
740 {
741 	struct dasd_ccw_req *cqr;
742 	int rc = 0;
743 	struct ccw1 *ccw;
744 
745 	cqr = lcu->rsu_cqr;
746 	strncpy((char *) &cqr->magic, "ECKD", 4);
747 	ASCEBC((char *) &cqr->magic, 4);
748 	ccw = cqr->cpaddr;
749 	ccw->cmd_code = DASD_ECKD_CCW_RSCK;
750 	ccw->flags = 0 ;
751 	ccw->count = 16;
752 	ccw->cda = (__u32)(addr_t) cqr->data;
753 	((char *)cqr->data)[0] = reason;
754 
755 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
756 	cqr->retries = 255;	/* set retry counter to enable basic ERP */
757 	cqr->startdev = device;
758 	cqr->memdev = device;
759 	cqr->block = NULL;
760 	cqr->expires = 5 * HZ;
761 	cqr->buildclk = get_clock();
762 	cqr->status = DASD_CQR_FILLED;
763 
764 	rc = dasd_sleep_on_immediatly(cqr);
765 	return rc;
766 }
767 
768 static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
769 {
770 	struct alias_pav_group *pavgroup;
771 	struct dasd_device *device;
772 	struct dasd_eckd_private *private;
773 	unsigned long flags;
774 
775 	/* active and inactive list can contain alias as well as base devices */
776 	list_for_each_entry(device, &lcu->active_devices, alias_list) {
777 		private = (struct dasd_eckd_private *) device->private;
778 		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
779 		if (private->uid.type != UA_BASE_DEVICE) {
780 			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
781 					       flags);
782 			continue;
783 		}
784 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
785 		dasd_schedule_block_bh(device->block);
786 		dasd_schedule_device_bh(device);
787 	}
788 	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
789 		private = (struct dasd_eckd_private *) device->private;
790 		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
791 		if (private->uid.type != UA_BASE_DEVICE) {
792 			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
793 					       flags);
794 			continue;
795 		}
796 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
797 		dasd_schedule_block_bh(device->block);
798 		dasd_schedule_device_bh(device);
799 	}
800 	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
801 		list_for_each_entry(device, &pavgroup->baselist, alias_list) {
802 			dasd_schedule_block_bh(device->block);
803 			dasd_schedule_device_bh(device);
804 		}
805 	}
806 }
807 
808 static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
809 {
810 	struct alias_pav_group *pavgroup;
811 	struct dasd_device *device, *temp;
812 	struct dasd_eckd_private *private;
813 	int rc;
814 	unsigned long flags;
815 	LIST_HEAD(active);
816 
817 	/*
818 	 * Problem here ist that dasd_flush_device_queue may wait
819 	 * for termination of a request to complete. We can't keep
820 	 * the lcu lock during that time, so we must assume that
821 	 * the lists may have changed.
822 	 * Idea: first gather all active alias devices in a separate list,
823 	 * then flush the first element of this list unlocked, and afterwards
824 	 * check if it is still on the list before moving it to the
825 	 * active_devices list.
826 	 */
827 
828 	spin_lock_irqsave(&lcu->lock, flags);
829 	list_for_each_entry_safe(device, temp, &lcu->active_devices,
830 				 alias_list) {
831 		private = (struct dasd_eckd_private *) device->private;
832 		if (private->uid.type == UA_BASE_DEVICE)
833 			continue;
834 		list_move(&device->alias_list, &active);
835 	}
836 
837 	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
838 		list_splice_init(&pavgroup->aliaslist, &active);
839 	}
840 	while (!list_empty(&active)) {
841 		device = list_first_entry(&active, struct dasd_device,
842 					  alias_list);
843 		spin_unlock_irqrestore(&lcu->lock, flags);
844 		rc = dasd_flush_device_queue(device);
845 		spin_lock_irqsave(&lcu->lock, flags);
846 		/*
847 		 * only move device around if it wasn't moved away while we
848 		 * were waiting for the flush
849 		 */
850 		if (device == list_first_entry(&active,
851 					       struct dasd_device, alias_list))
852 			list_move(&device->alias_list, &lcu->active_devices);
853 	}
854 	spin_unlock_irqrestore(&lcu->lock, flags);
855 }
856 
857 static void __stop_device_on_lcu(struct dasd_device *device,
858 				 struct dasd_device *pos)
859 {
860 	/* If pos == device then device is already locked! */
861 	if (pos == device) {
862 		dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
863 		return;
864 	}
865 	spin_lock(get_ccwdev_lock(pos->cdev));
866 	dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
867 	spin_unlock(get_ccwdev_lock(pos->cdev));
868 }
869 
870 /*
871  * This function is called in interrupt context, so the
872  * cdev lock for device is already locked!
873  */
874 static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
875 				     struct dasd_device *device)
876 {
877 	struct alias_pav_group *pavgroup;
878 	struct dasd_device *pos;
879 
880 	list_for_each_entry(pos, &lcu->active_devices, alias_list)
881 		__stop_device_on_lcu(device, pos);
882 	list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
883 		__stop_device_on_lcu(device, pos);
884 	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
885 		list_for_each_entry(pos, &pavgroup->baselist, alias_list)
886 			__stop_device_on_lcu(device, pos);
887 		list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
888 			__stop_device_on_lcu(device, pos);
889 	}
890 }
891 
892 static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
893 {
894 	struct alias_pav_group *pavgroup;
895 	struct dasd_device *device;
896 	unsigned long flags;
897 
898 	list_for_each_entry(device, &lcu->active_devices, alias_list) {
899 		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
900 		dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
901 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
902 	}
903 
904 	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
905 		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
906 		dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
907 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
908 	}
909 
910 	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
911 		list_for_each_entry(device, &pavgroup->baselist, alias_list) {
912 			spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
913 			dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
914 			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
915 					       flags);
916 		}
917 		list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
918 			spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
919 			dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
920 			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
921 					       flags);
922 		}
923 	}
924 }
925 
926 static void summary_unit_check_handling_work(struct work_struct *work)
927 {
928 	struct alias_lcu *lcu;
929 	struct summary_unit_check_work_data *suc_data;
930 	unsigned long flags;
931 	struct dasd_device *device;
932 
933 	suc_data = container_of(work, struct summary_unit_check_work_data,
934 				worker);
935 	lcu = container_of(suc_data, struct alias_lcu, suc_data);
936 	device = suc_data->device;
937 
938 	/* 1. flush alias devices */
939 	flush_all_alias_devices_on_lcu(lcu);
940 
941 	/* 2. reset summary unit check */
942 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
943 	dasd_device_remove_stop_bits(device,
944 				     (DASD_STOPPED_SU | DASD_STOPPED_PENDING));
945 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
946 	reset_summary_unit_check(lcu, device, suc_data->reason);
947 
948 	spin_lock_irqsave(&lcu->lock, flags);
949 	_unstop_all_devices_on_lcu(lcu);
950 	_restart_all_base_devices_on_lcu(lcu);
951 	/* 3. read new alias configuration */
952 	_schedule_lcu_update(lcu, device);
953 	lcu->suc_data.device = NULL;
954 	spin_unlock_irqrestore(&lcu->lock, flags);
955 }
956 
957 /*
958  * note: this will be called from int handler context (cdev locked)
959  */
960 void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
961 					  struct irb *irb)
962 {
963 	struct alias_lcu *lcu;
964 	char reason;
965 	struct dasd_eckd_private *private;
966 	char *sense;
967 
968 	private = (struct dasd_eckd_private *) device->private;
969 
970 	sense = dasd_get_sense(irb);
971 	if (sense) {
972 		reason = sense[8];
973 		DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
974 			    "eckd handle summary unit check: reason", reason);
975 	} else {
976 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
977 			    "eckd handle summary unit check:"
978 			    " no reason code available");
979 		return;
980 	}
981 
982 	lcu = private->lcu;
983 	if (!lcu) {
984 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
985 			    "device not ready to handle summary"
986 			    " unit check (no lcu structure)");
987 		return;
988 	}
989 	spin_lock(&lcu->lock);
990 	_stop_all_devices_on_lcu(lcu, device);
991 	/* prepare for lcu_update */
992 	private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
993 	/* If this device is about to be removed just return and wait for
994 	 * the next interrupt on a different device
995 	 */
996 	if (list_empty(&device->alias_list)) {
997 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
998 			    "device is in offline processing,"
999 			    " don't do summary unit check handling");
1000 		spin_unlock(&lcu->lock);
1001 		return;
1002 	}
1003 	if (lcu->suc_data.device) {
1004 		/* already scheduled or running */
1005 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1006 			    "previous instance of summary unit check worker"
1007 			    " still pending");
1008 		spin_unlock(&lcu->lock);
1009 		return ;
1010 	}
1011 	lcu->suc_data.reason = reason;
1012 	lcu->suc_data.device = device;
1013 	spin_unlock(&lcu->lock);
1014 	schedule_work(&lcu->suc_data.worker);
1015 };
1016