xref: /linux/drivers/s390/block/dasd_alias.c (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 /*
2  * PAV alias management for the DASD ECKD discipline
3  *
4  * Copyright IBM Corporation, 2007
5  * Author(s): Stefan Weinhuber <wein@de.ibm.com>
6  */
7 
8 #define KMSG_COMPONENT "dasd-eckd"
9 
10 #include <linux/list.h>
11 #include <linux/slab.h>
12 #include <asm/ebcdic.h>
13 #include "dasd_int.h"
14 #include "dasd_eckd.h"
15 
16 #ifdef PRINTK_HEADER
17 #undef PRINTK_HEADER
18 #endif				/* PRINTK_HEADER */
19 #define PRINTK_HEADER "dasd(eckd):"
20 
21 
22 /*
23  * General concept of alias management:
24  * - PAV and DASD alias management is specific to the eckd discipline.
25  * - A device is connected to an lcu as long as the device exists.
26  *   dasd_alias_make_device_known_to_lcu will be called wenn the
27  *   device is checked by the eckd discipline and
28  *   dasd_alias_disconnect_device_from_lcu will be called
29  *   before the device is deleted.
30  * - The dasd_alias_add_device / dasd_alias_remove_device
31  *   functions mark the point when a device is 'ready for service'.
32  * - A summary unit check is a rare occasion, but it is mandatory to
33  *   support it. It requires some complex recovery actions before the
34  *   devices can be used again (see dasd_alias_handle_summary_unit_check).
35  * - dasd_alias_get_start_dev will find an alias device that can be used
36  *   instead of the base device and does some (very simple) load balancing.
37  *   This is the function that gets called for each I/O, so when improving
38  *   something, this function should get faster or better, the rest has just
39  *   to be correct.
40  */
41 
42 
43 static void summary_unit_check_handling_work(struct work_struct *);
44 static void lcu_update_work(struct work_struct *);
45 static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
46 
47 static struct alias_root aliastree = {
48 	.serverlist = LIST_HEAD_INIT(aliastree.serverlist),
49 	.lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
50 };
51 
52 static struct alias_server *_find_server(struct dasd_uid *uid)
53 {
54 	struct alias_server *pos;
55 	list_for_each_entry(pos, &aliastree.serverlist, server) {
56 		if (!strncmp(pos->uid.vendor, uid->vendor,
57 			     sizeof(uid->vendor))
58 		    && !strncmp(pos->uid.serial, uid->serial,
59 				sizeof(uid->serial)))
60 			return pos;
61 	};
62 	return NULL;
63 }
64 
65 static struct alias_lcu *_find_lcu(struct alias_server *server,
66 				   struct dasd_uid *uid)
67 {
68 	struct alias_lcu *pos;
69 	list_for_each_entry(pos, &server->lculist, lcu) {
70 		if (pos->uid.ssid == uid->ssid)
71 			return pos;
72 	};
73 	return NULL;
74 }
75 
76 static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
77 					   struct dasd_uid *uid)
78 {
79 	struct alias_pav_group *pos;
80 	__u8 search_unit_addr;
81 
82 	/* for hyper pav there is only one group */
83 	if (lcu->pav == HYPER_PAV) {
84 		if (list_empty(&lcu->grouplist))
85 			return NULL;
86 		else
87 			return list_first_entry(&lcu->grouplist,
88 						struct alias_pav_group, group);
89 	}
90 
91 	/* for base pav we have to find the group that matches the base */
92 	if (uid->type == UA_BASE_DEVICE)
93 		search_unit_addr = uid->real_unit_addr;
94 	else
95 		search_unit_addr = uid->base_unit_addr;
96 	list_for_each_entry(pos, &lcu->grouplist, group) {
97 		if (pos->uid.base_unit_addr == search_unit_addr &&
98 		    !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
99 			return pos;
100 	};
101 	return NULL;
102 }
103 
104 static struct alias_server *_allocate_server(struct dasd_uid *uid)
105 {
106 	struct alias_server *server;
107 
108 	server = kzalloc(sizeof(*server), GFP_KERNEL);
109 	if (!server)
110 		return ERR_PTR(-ENOMEM);
111 	memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
112 	memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
113 	INIT_LIST_HEAD(&server->server);
114 	INIT_LIST_HEAD(&server->lculist);
115 	return server;
116 }
117 
118 static void _free_server(struct alias_server *server)
119 {
120 	kfree(server);
121 }
122 
123 static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
124 {
125 	struct alias_lcu *lcu;
126 
127 	lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
128 	if (!lcu)
129 		return ERR_PTR(-ENOMEM);
130 	lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
131 	if (!lcu->uac)
132 		goto out_err1;
133 	lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
134 	if (!lcu->rsu_cqr)
135 		goto out_err2;
136 	lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
137 				       GFP_KERNEL | GFP_DMA);
138 	if (!lcu->rsu_cqr->cpaddr)
139 		goto out_err3;
140 	lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
141 	if (!lcu->rsu_cqr->data)
142 		goto out_err4;
143 
144 	memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
145 	memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
146 	lcu->uid.ssid = uid->ssid;
147 	lcu->pav = NO_PAV;
148 	lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
149 	INIT_LIST_HEAD(&lcu->lcu);
150 	INIT_LIST_HEAD(&lcu->inactive_devices);
151 	INIT_LIST_HEAD(&lcu->active_devices);
152 	INIT_LIST_HEAD(&lcu->grouplist);
153 	INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
154 	INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
155 	spin_lock_init(&lcu->lock);
156 	init_completion(&lcu->lcu_setup);
157 	return lcu;
158 
159 out_err4:
160 	kfree(lcu->rsu_cqr->cpaddr);
161 out_err3:
162 	kfree(lcu->rsu_cqr);
163 out_err2:
164 	kfree(lcu->uac);
165 out_err1:
166 	kfree(lcu);
167 	return ERR_PTR(-ENOMEM);
168 }
169 
170 static void _free_lcu(struct alias_lcu *lcu)
171 {
172 	kfree(lcu->rsu_cqr->data);
173 	kfree(lcu->rsu_cqr->cpaddr);
174 	kfree(lcu->rsu_cqr);
175 	kfree(lcu->uac);
176 	kfree(lcu);
177 }
178 
179 /*
180  * This is the function that will allocate all the server and lcu data,
181  * so this function must be called first for a new device.
182  * If the return value is 1, the lcu was already known before, if it
183  * is 0, this is a new lcu.
184  * Negative return code indicates that something went wrong (e.g. -ENOMEM)
185  */
186 int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
187 {
188 	struct dasd_eckd_private *private;
189 	unsigned long flags;
190 	struct alias_server *server, *newserver;
191 	struct alias_lcu *lcu, *newlcu;
192 	int is_lcu_known;
193 	struct dasd_uid uid;
194 
195 	private = (struct dasd_eckd_private *) device->private;
196 
197 	device->discipline->get_uid(device, &uid);
198 	spin_lock_irqsave(&aliastree.lock, flags);
199 	is_lcu_known = 1;
200 	server = _find_server(&uid);
201 	if (!server) {
202 		spin_unlock_irqrestore(&aliastree.lock, flags);
203 		newserver = _allocate_server(&uid);
204 		if (IS_ERR(newserver))
205 			return PTR_ERR(newserver);
206 		spin_lock_irqsave(&aliastree.lock, flags);
207 		server = _find_server(&uid);
208 		if (!server) {
209 			list_add(&newserver->server, &aliastree.serverlist);
210 			server = newserver;
211 			is_lcu_known = 0;
212 		} else {
213 			/* someone was faster */
214 			_free_server(newserver);
215 		}
216 	}
217 
218 	lcu = _find_lcu(server, &uid);
219 	if (!lcu) {
220 		spin_unlock_irqrestore(&aliastree.lock, flags);
221 		newlcu = _allocate_lcu(&uid);
222 		if (IS_ERR(newlcu))
223 			return PTR_ERR(newlcu);
224 		spin_lock_irqsave(&aliastree.lock, flags);
225 		lcu = _find_lcu(server, &uid);
226 		if (!lcu) {
227 			list_add(&newlcu->lcu, &server->lculist);
228 			lcu = newlcu;
229 			is_lcu_known = 0;
230 		} else {
231 			/* someone was faster */
232 			_free_lcu(newlcu);
233 		}
234 		is_lcu_known = 0;
235 	}
236 	spin_lock(&lcu->lock);
237 	list_add(&device->alias_list, &lcu->inactive_devices);
238 	private->lcu = lcu;
239 	spin_unlock(&lcu->lock);
240 	spin_unlock_irqrestore(&aliastree.lock, flags);
241 
242 	return is_lcu_known;
243 }
244 
245 /*
246  * The first device to be registered on an LCU will have to do
247  * some additional setup steps to configure that LCU on the
248  * storage server. All further devices should wait with their
249  * initialization until the first device is done.
250  * To synchronize this work, the first device will call
251  * dasd_alias_lcu_setup_complete when it is done, and all
252  * other devices will wait for it with dasd_alias_wait_for_lcu_setup.
253  */
254 void dasd_alias_lcu_setup_complete(struct dasd_device *device)
255 {
256 	unsigned long flags;
257 	struct alias_server *server;
258 	struct alias_lcu *lcu;
259 	struct dasd_uid uid;
260 
261 	device->discipline->get_uid(device, &uid);
262 	lcu = NULL;
263 	spin_lock_irqsave(&aliastree.lock, flags);
264 	server = _find_server(&uid);
265 	if (server)
266 		lcu = _find_lcu(server, &uid);
267 	spin_unlock_irqrestore(&aliastree.lock, flags);
268 	if (!lcu) {
269 		DBF_EVENT_DEVID(DBF_ERR, device->cdev,
270 				"could not find lcu for %04x %02x",
271 				uid.ssid, uid.real_unit_addr);
272 		WARN_ON(1);
273 		return;
274 	}
275 	complete_all(&lcu->lcu_setup);
276 }
277 
278 void dasd_alias_wait_for_lcu_setup(struct dasd_device *device)
279 {
280 	unsigned long flags;
281 	struct alias_server *server;
282 	struct alias_lcu *lcu;
283 	struct dasd_uid uid;
284 
285 	device->discipline->get_uid(device, &uid);
286 	lcu = NULL;
287 	spin_lock_irqsave(&aliastree.lock, flags);
288 	server = _find_server(&uid);
289 	if (server)
290 		lcu = _find_lcu(server, &uid);
291 	spin_unlock_irqrestore(&aliastree.lock, flags);
292 	if (!lcu) {
293 		DBF_EVENT_DEVID(DBF_ERR, device->cdev,
294 				"could not find lcu for %04x %02x",
295 				uid.ssid, uid.real_unit_addr);
296 		WARN_ON(1);
297 		return;
298 	}
299 	wait_for_completion(&lcu->lcu_setup);
300 }
301 
302 /*
303  * This function removes a device from the scope of alias management.
304  * The complicated part is to make sure that it is not in use by
305  * any of the workers. If necessary cancel the work.
306  */
307 void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
308 {
309 	struct dasd_eckd_private *private;
310 	unsigned long flags;
311 	struct alias_lcu *lcu;
312 	struct alias_server *server;
313 	int was_pending;
314 	struct dasd_uid uid;
315 
316 	private = (struct dasd_eckd_private *) device->private;
317 	lcu = private->lcu;
318 	/* nothing to do if already disconnected */
319 	if (!lcu)
320 		return;
321 	device->discipline->get_uid(device, &uid);
322 	spin_lock_irqsave(&lcu->lock, flags);
323 	list_del_init(&device->alias_list);
324 	/* make sure that the workers don't use this device */
325 	if (device == lcu->suc_data.device) {
326 		spin_unlock_irqrestore(&lcu->lock, flags);
327 		cancel_work_sync(&lcu->suc_data.worker);
328 		spin_lock_irqsave(&lcu->lock, flags);
329 		if (device == lcu->suc_data.device)
330 			lcu->suc_data.device = NULL;
331 	}
332 	was_pending = 0;
333 	if (device == lcu->ruac_data.device) {
334 		spin_unlock_irqrestore(&lcu->lock, flags);
335 		was_pending = 1;
336 		cancel_delayed_work_sync(&lcu->ruac_data.dwork);
337 		spin_lock_irqsave(&lcu->lock, flags);
338 		if (device == lcu->ruac_data.device)
339 			lcu->ruac_data.device = NULL;
340 	}
341 	private->lcu = NULL;
342 	spin_unlock_irqrestore(&lcu->lock, flags);
343 
344 	spin_lock_irqsave(&aliastree.lock, flags);
345 	spin_lock(&lcu->lock);
346 	if (list_empty(&lcu->grouplist) &&
347 	    list_empty(&lcu->active_devices) &&
348 	    list_empty(&lcu->inactive_devices)) {
349 		list_del(&lcu->lcu);
350 		spin_unlock(&lcu->lock);
351 		_free_lcu(lcu);
352 		lcu = NULL;
353 	} else {
354 		if (was_pending)
355 			_schedule_lcu_update(lcu, NULL);
356 		spin_unlock(&lcu->lock);
357 	}
358 	server = _find_server(&uid);
359 	if (server && list_empty(&server->lculist)) {
360 		list_del(&server->server);
361 		_free_server(server);
362 	}
363 	spin_unlock_irqrestore(&aliastree.lock, flags);
364 }
365 
366 /*
367  * This function assumes that the unit address configuration stored
368  * in the lcu is up to date and will update the device uid before
369  * adding it to a pav group.
370  */
371 
372 static int _add_device_to_lcu(struct alias_lcu *lcu,
373 			      struct dasd_device *device,
374 			      struct dasd_device *pos)
375 {
376 
377 	struct dasd_eckd_private *private;
378 	struct alias_pav_group *group;
379 	struct dasd_uid uid;
380 	unsigned long flags;
381 
382 	private = (struct dasd_eckd_private *) device->private;
383 
384 	/* only lock if not already locked */
385 	if (device != pos)
386 		spin_lock_irqsave_nested(get_ccwdev_lock(device->cdev), flags,
387 					 CDEV_NESTED_SECOND);
388 	private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
389 	private->uid.base_unit_addr =
390 		lcu->uac->unit[private->uid.real_unit_addr].base_ua;
391 	uid = private->uid;
392 
393 	if (device != pos)
394 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
395 
396 	/* if we have no PAV anyway, we don't need to bother with PAV groups */
397 	if (lcu->pav == NO_PAV) {
398 		list_move(&device->alias_list, &lcu->active_devices);
399 		return 0;
400 	}
401 
402 	group = _find_group(lcu, &uid);
403 	if (!group) {
404 		group = kzalloc(sizeof(*group), GFP_ATOMIC);
405 		if (!group)
406 			return -ENOMEM;
407 		memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
408 		memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
409 		group->uid.ssid = uid.ssid;
410 		if (uid.type == UA_BASE_DEVICE)
411 			group->uid.base_unit_addr = uid.real_unit_addr;
412 		else
413 			group->uid.base_unit_addr = uid.base_unit_addr;
414 		memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
415 		INIT_LIST_HEAD(&group->group);
416 		INIT_LIST_HEAD(&group->baselist);
417 		INIT_LIST_HEAD(&group->aliaslist);
418 		list_add(&group->group, &lcu->grouplist);
419 	}
420 	if (uid.type == UA_BASE_DEVICE)
421 		list_move(&device->alias_list, &group->baselist);
422 	else
423 		list_move(&device->alias_list, &group->aliaslist);
424 	private->pavgroup = group;
425 	return 0;
426 };
427 
428 static void _remove_device_from_lcu(struct alias_lcu *lcu,
429 				    struct dasd_device *device)
430 {
431 	struct dasd_eckd_private *private;
432 	struct alias_pav_group *group;
433 
434 	private = (struct dasd_eckd_private *) device->private;
435 	list_move(&device->alias_list, &lcu->inactive_devices);
436 	group = private->pavgroup;
437 	if (!group)
438 		return;
439 	private->pavgroup = NULL;
440 	if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
441 		list_del(&group->group);
442 		kfree(group);
443 		return;
444 	}
445 	if (group->next == device)
446 		group->next = NULL;
447 };
448 
449 static int read_unit_address_configuration(struct dasd_device *device,
450 					   struct alias_lcu *lcu)
451 {
452 	struct dasd_psf_prssd_data *prssdp;
453 	struct dasd_ccw_req *cqr;
454 	struct ccw1 *ccw;
455 	int rc;
456 	unsigned long flags;
457 
458 	cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
459 				   (sizeof(struct dasd_psf_prssd_data)),
460 				   device);
461 	if (IS_ERR(cqr))
462 		return PTR_ERR(cqr);
463 	cqr->startdev = device;
464 	cqr->memdev = device;
465 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
466 	cqr->retries = 10;
467 	cqr->expires = 20 * HZ;
468 
469 	/* Prepare for Read Subsystem Data */
470 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
471 	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
472 	prssdp->order = PSF_ORDER_PRSSD;
473 	prssdp->suborder = 0x0e;	/* Read unit address configuration */
474 	/* all other bytes of prssdp must be zero */
475 
476 	ccw = cqr->cpaddr;
477 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
478 	ccw->count = sizeof(struct dasd_psf_prssd_data);
479 	ccw->flags |= CCW_FLAG_CC;
480 	ccw->cda = (__u32)(addr_t) prssdp;
481 
482 	/* Read Subsystem Data - feature codes */
483 	memset(lcu->uac, 0, sizeof(*(lcu->uac)));
484 
485 	ccw++;
486 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
487 	ccw->count = sizeof(*(lcu->uac));
488 	ccw->cda = (__u32)(addr_t) lcu->uac;
489 
490 	cqr->buildclk = get_clock();
491 	cqr->status = DASD_CQR_FILLED;
492 
493 	/* need to unset flag here to detect race with summary unit check */
494 	spin_lock_irqsave(&lcu->lock, flags);
495 	lcu->flags &= ~NEED_UAC_UPDATE;
496 	spin_unlock_irqrestore(&lcu->lock, flags);
497 
498 	do {
499 		rc = dasd_sleep_on(cqr);
500 	} while (rc && (cqr->retries > 0));
501 	if (rc) {
502 		spin_lock_irqsave(&lcu->lock, flags);
503 		lcu->flags |= NEED_UAC_UPDATE;
504 		spin_unlock_irqrestore(&lcu->lock, flags);
505 	}
506 	dasd_kfree_request(cqr, cqr->memdev);
507 	return rc;
508 }
509 
510 static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
511 {
512 	unsigned long flags;
513 	struct alias_pav_group *pavgroup, *tempgroup;
514 	struct dasd_device *device, *tempdev;
515 	int i, rc;
516 	struct dasd_eckd_private *private;
517 
518 	spin_lock_irqsave(&lcu->lock, flags);
519 	list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
520 		list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
521 					 alias_list) {
522 			list_move(&device->alias_list, &lcu->active_devices);
523 			private = (struct dasd_eckd_private *) device->private;
524 			private->pavgroup = NULL;
525 		}
526 		list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
527 					 alias_list) {
528 			list_move(&device->alias_list, &lcu->active_devices);
529 			private = (struct dasd_eckd_private *) device->private;
530 			private->pavgroup = NULL;
531 		}
532 		list_del(&pavgroup->group);
533 		kfree(pavgroup);
534 	}
535 	spin_unlock_irqrestore(&lcu->lock, flags);
536 
537 	rc = read_unit_address_configuration(refdev, lcu);
538 	if (rc)
539 		return rc;
540 
541 	/* need to take cdev lock before lcu lock */
542 	spin_lock_irqsave_nested(get_ccwdev_lock(refdev->cdev), flags,
543 				 CDEV_NESTED_FIRST);
544 	spin_lock(&lcu->lock);
545 	lcu->pav = NO_PAV;
546 	for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
547 		switch (lcu->uac->unit[i].ua_type) {
548 		case UA_BASE_PAV_ALIAS:
549 			lcu->pav = BASE_PAV;
550 			break;
551 		case UA_HYPER_PAV_ALIAS:
552 			lcu->pav = HYPER_PAV;
553 			break;
554 		}
555 		if (lcu->pav != NO_PAV)
556 			break;
557 	}
558 
559 	list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
560 				 alias_list) {
561 		_add_device_to_lcu(lcu, device, refdev);
562 	}
563 	spin_unlock(&lcu->lock);
564 	spin_unlock_irqrestore(get_ccwdev_lock(refdev->cdev), flags);
565 	return 0;
566 }
567 
568 static void lcu_update_work(struct work_struct *work)
569 {
570 	struct alias_lcu *lcu;
571 	struct read_uac_work_data *ruac_data;
572 	struct dasd_device *device;
573 	unsigned long flags;
574 	int rc;
575 
576 	ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
577 	lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
578 	device = ruac_data->device;
579 	rc = _lcu_update(device, lcu);
580 	/*
581 	 * Need to check flags again, as there could have been another
582 	 * prepare_update or a new device a new device while we were still
583 	 * processing the data
584 	 */
585 	spin_lock_irqsave(&lcu->lock, flags);
586 	if (rc || (lcu->flags & NEED_UAC_UPDATE)) {
587 		DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
588 			    " alias data in lcu (rc = %d), retry later", rc);
589 		schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
590 	} else {
591 		lcu->ruac_data.device = NULL;
592 		lcu->flags &= ~UPDATE_PENDING;
593 	}
594 	spin_unlock_irqrestore(&lcu->lock, flags);
595 }
596 
597 static int _schedule_lcu_update(struct alias_lcu *lcu,
598 				struct dasd_device *device)
599 {
600 	struct dasd_device *usedev = NULL;
601 	struct alias_pav_group *group;
602 
603 	lcu->flags |= NEED_UAC_UPDATE;
604 	if (lcu->ruac_data.device) {
605 		/* already scheduled or running */
606 		return 0;
607 	}
608 	if (device && !list_empty(&device->alias_list))
609 		usedev = device;
610 
611 	if (!usedev && !list_empty(&lcu->grouplist)) {
612 		group = list_first_entry(&lcu->grouplist,
613 					 struct alias_pav_group, group);
614 		if (!list_empty(&group->baselist))
615 			usedev = list_first_entry(&group->baselist,
616 						  struct dasd_device,
617 						  alias_list);
618 		else if (!list_empty(&group->aliaslist))
619 			usedev = list_first_entry(&group->aliaslist,
620 						  struct dasd_device,
621 						  alias_list);
622 	}
623 	if (!usedev && !list_empty(&lcu->active_devices)) {
624 		usedev = list_first_entry(&lcu->active_devices,
625 					  struct dasd_device, alias_list);
626 	}
627 	/*
628 	 * if we haven't found a proper device yet, give up for now, the next
629 	 * device that will be set active will trigger an lcu update
630 	 */
631 	if (!usedev)
632 		return -EINVAL;
633 	lcu->ruac_data.device = usedev;
634 	schedule_delayed_work(&lcu->ruac_data.dwork, 0);
635 	return 0;
636 }
637 
638 int dasd_alias_add_device(struct dasd_device *device)
639 {
640 	struct dasd_eckd_private *private;
641 	struct alias_lcu *lcu;
642 	unsigned long flags;
643 	int rc;
644 
645 	private = (struct dasd_eckd_private *) device->private;
646 	lcu = private->lcu;
647 	rc = 0;
648 
649 	/* need to take cdev lock before lcu lock */
650 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
651 	spin_lock(&lcu->lock);
652 	if (!(lcu->flags & UPDATE_PENDING)) {
653 		rc = _add_device_to_lcu(lcu, device, device);
654 		if (rc)
655 			lcu->flags |= UPDATE_PENDING;
656 	}
657 	if (lcu->flags & UPDATE_PENDING) {
658 		list_move(&device->alias_list, &lcu->active_devices);
659 		_schedule_lcu_update(lcu, device);
660 	}
661 	spin_unlock(&lcu->lock);
662 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
663 	return rc;
664 }
665 
666 int dasd_alias_update_add_device(struct dasd_device *device)
667 {
668 	struct dasd_eckd_private *private;
669 	private = (struct dasd_eckd_private *) device->private;
670 	private->lcu->flags |= UPDATE_PENDING;
671 	return dasd_alias_add_device(device);
672 }
673 
674 int dasd_alias_remove_device(struct dasd_device *device)
675 {
676 	struct dasd_eckd_private *private;
677 	struct alias_lcu *lcu;
678 	unsigned long flags;
679 
680 	private = (struct dasd_eckd_private *) device->private;
681 	lcu = private->lcu;
682 	/* nothing to do if already removed */
683 	if (!lcu)
684 		return 0;
685 	spin_lock_irqsave(&lcu->lock, flags);
686 	_remove_device_from_lcu(lcu, device);
687 	spin_unlock_irqrestore(&lcu->lock, flags);
688 	return 0;
689 }
690 
691 struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
692 {
693 
694 	struct dasd_device *alias_device;
695 	struct alias_pav_group *group;
696 	struct alias_lcu *lcu;
697 	struct dasd_eckd_private *private, *alias_priv;
698 	unsigned long flags;
699 
700 	private = (struct dasd_eckd_private *) base_device->private;
701 	group = private->pavgroup;
702 	lcu = private->lcu;
703 	if (!group || !lcu)
704 		return NULL;
705 	if (lcu->pav == NO_PAV ||
706 	    lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
707 		return NULL;
708 	if (unlikely(!(private->features.feature[8] & 0x01))) {
709 		/*
710 		 * PAV enabled but prefix not, very unlikely
711 		 * seems to be a lost pathgroup
712 		 * use base device to do IO
713 		 */
714 		DBF_DEV_EVENT(DBF_ERR, base_device, "%s",
715 			      "Prefix not enabled with PAV enabled\n");
716 		return NULL;
717 	}
718 
719 	spin_lock_irqsave(&lcu->lock, flags);
720 	alias_device = group->next;
721 	if (!alias_device) {
722 		if (list_empty(&group->aliaslist)) {
723 			spin_unlock_irqrestore(&lcu->lock, flags);
724 			return NULL;
725 		} else {
726 			alias_device = list_first_entry(&group->aliaslist,
727 							struct dasd_device,
728 							alias_list);
729 		}
730 	}
731 	if (list_is_last(&alias_device->alias_list, &group->aliaslist))
732 		group->next = list_first_entry(&group->aliaslist,
733 					       struct dasd_device, alias_list);
734 	else
735 		group->next = list_first_entry(&alias_device->alias_list,
736 					       struct dasd_device, alias_list);
737 	spin_unlock_irqrestore(&lcu->lock, flags);
738 	alias_priv = (struct dasd_eckd_private *) alias_device->private;
739 	if ((alias_priv->count < private->count) && !alias_device->stopped)
740 		return alias_device;
741 	else
742 		return NULL;
743 }
744 
745 /*
746  * Summary unit check handling depends on the way alias devices
747  * are handled so it is done here rather then in dasd_eckd.c
748  */
749 static int reset_summary_unit_check(struct alias_lcu *lcu,
750 				    struct dasd_device *device,
751 				    char reason)
752 {
753 	struct dasd_ccw_req *cqr;
754 	int rc = 0;
755 	struct ccw1 *ccw;
756 
757 	cqr = lcu->rsu_cqr;
758 	strncpy((char *) &cqr->magic, "ECKD", 4);
759 	ASCEBC((char *) &cqr->magic, 4);
760 	ccw = cqr->cpaddr;
761 	ccw->cmd_code = DASD_ECKD_CCW_RSCK;
762 	ccw->flags = 0 ;
763 	ccw->count = 16;
764 	ccw->cda = (__u32)(addr_t) cqr->data;
765 	((char *)cqr->data)[0] = reason;
766 
767 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
768 	cqr->retries = 255;	/* set retry counter to enable basic ERP */
769 	cqr->startdev = device;
770 	cqr->memdev = device;
771 	cqr->block = NULL;
772 	cqr->expires = 5 * HZ;
773 	cqr->buildclk = get_clock();
774 	cqr->status = DASD_CQR_FILLED;
775 
776 	rc = dasd_sleep_on_immediatly(cqr);
777 	return rc;
778 }
779 
780 static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
781 {
782 	struct alias_pav_group *pavgroup;
783 	struct dasd_device *device;
784 	struct dasd_eckd_private *private;
785 	unsigned long flags;
786 
787 	/* active and inactive list can contain alias as well as base devices */
788 	list_for_each_entry(device, &lcu->active_devices, alias_list) {
789 		private = (struct dasd_eckd_private *) device->private;
790 		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
791 		if (private->uid.type != UA_BASE_DEVICE) {
792 			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
793 					       flags);
794 			continue;
795 		}
796 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
797 		dasd_schedule_block_bh(device->block);
798 		dasd_schedule_device_bh(device);
799 	}
800 	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
801 		private = (struct dasd_eckd_private *) device->private;
802 		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
803 		if (private->uid.type != UA_BASE_DEVICE) {
804 			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
805 					       flags);
806 			continue;
807 		}
808 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
809 		dasd_schedule_block_bh(device->block);
810 		dasd_schedule_device_bh(device);
811 	}
812 	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
813 		list_for_each_entry(device, &pavgroup->baselist, alias_list) {
814 			dasd_schedule_block_bh(device->block);
815 			dasd_schedule_device_bh(device);
816 		}
817 	}
818 }
819 
820 static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
821 {
822 	struct alias_pav_group *pavgroup;
823 	struct dasd_device *device, *temp;
824 	struct dasd_eckd_private *private;
825 	int rc;
826 	unsigned long flags;
827 	LIST_HEAD(active);
828 
829 	/*
830 	 * Problem here ist that dasd_flush_device_queue may wait
831 	 * for termination of a request to complete. We can't keep
832 	 * the lcu lock during that time, so we must assume that
833 	 * the lists may have changed.
834 	 * Idea: first gather all active alias devices in a separate list,
835 	 * then flush the first element of this list unlocked, and afterwards
836 	 * check if it is still on the list before moving it to the
837 	 * active_devices list.
838 	 */
839 
840 	spin_lock_irqsave(&lcu->lock, flags);
841 	list_for_each_entry_safe(device, temp, &lcu->active_devices,
842 				 alias_list) {
843 		private = (struct dasd_eckd_private *) device->private;
844 		if (private->uid.type == UA_BASE_DEVICE)
845 			continue;
846 		list_move(&device->alias_list, &active);
847 	}
848 
849 	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
850 		list_splice_init(&pavgroup->aliaslist, &active);
851 	}
852 	while (!list_empty(&active)) {
853 		device = list_first_entry(&active, struct dasd_device,
854 					  alias_list);
855 		spin_unlock_irqrestore(&lcu->lock, flags);
856 		rc = dasd_flush_device_queue(device);
857 		spin_lock_irqsave(&lcu->lock, flags);
858 		/*
859 		 * only move device around if it wasn't moved away while we
860 		 * were waiting for the flush
861 		 */
862 		if (device == list_first_entry(&active,
863 					       struct dasd_device, alias_list))
864 			list_move(&device->alias_list, &lcu->active_devices);
865 	}
866 	spin_unlock_irqrestore(&lcu->lock, flags);
867 }
868 
869 static void __stop_device_on_lcu(struct dasd_device *device,
870 				 struct dasd_device *pos)
871 {
872 	/* If pos == device then device is already locked! */
873 	if (pos == device) {
874 		dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
875 		return;
876 	}
877 	spin_lock(get_ccwdev_lock(pos->cdev));
878 	dasd_device_set_stop_bits(pos, DASD_STOPPED_SU);
879 	spin_unlock(get_ccwdev_lock(pos->cdev));
880 }
881 
882 /*
883  * This function is called in interrupt context, so the
884  * cdev lock for device is already locked!
885  */
886 static void _stop_all_devices_on_lcu(struct alias_lcu *lcu,
887 				     struct dasd_device *device)
888 {
889 	struct alias_pav_group *pavgroup;
890 	struct dasd_device *pos;
891 
892 	list_for_each_entry(pos, &lcu->active_devices, alias_list)
893 		__stop_device_on_lcu(device, pos);
894 	list_for_each_entry(pos, &lcu->inactive_devices, alias_list)
895 		__stop_device_on_lcu(device, pos);
896 	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
897 		list_for_each_entry(pos, &pavgroup->baselist, alias_list)
898 			__stop_device_on_lcu(device, pos);
899 		list_for_each_entry(pos, &pavgroup->aliaslist, alias_list)
900 			__stop_device_on_lcu(device, pos);
901 	}
902 }
903 
904 static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
905 {
906 	struct alias_pav_group *pavgroup;
907 	struct dasd_device *device;
908 	unsigned long flags;
909 
910 	list_for_each_entry(device, &lcu->active_devices, alias_list) {
911 		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
912 		dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
913 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
914 	}
915 
916 	list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
917 		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
918 		dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
919 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
920 	}
921 
922 	list_for_each_entry(pavgroup, &lcu->grouplist, group) {
923 		list_for_each_entry(device, &pavgroup->baselist, alias_list) {
924 			spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
925 			dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
926 			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
927 					       flags);
928 		}
929 		list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
930 			spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
931 			dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
932 			spin_unlock_irqrestore(get_ccwdev_lock(device->cdev),
933 					       flags);
934 		}
935 	}
936 }
937 
938 static void summary_unit_check_handling_work(struct work_struct *work)
939 {
940 	struct alias_lcu *lcu;
941 	struct summary_unit_check_work_data *suc_data;
942 	unsigned long flags;
943 	struct dasd_device *device;
944 
945 	suc_data = container_of(work, struct summary_unit_check_work_data,
946 				worker);
947 	lcu = container_of(suc_data, struct alias_lcu, suc_data);
948 	device = suc_data->device;
949 
950 	/* 1. flush alias devices */
951 	flush_all_alias_devices_on_lcu(lcu);
952 
953 	/* 2. reset summary unit check */
954 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
955 	dasd_device_remove_stop_bits(device,
956 				     (DASD_STOPPED_SU | DASD_STOPPED_PENDING));
957 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
958 	reset_summary_unit_check(lcu, device, suc_data->reason);
959 
960 	spin_lock_irqsave(&lcu->lock, flags);
961 	_unstop_all_devices_on_lcu(lcu);
962 	_restart_all_base_devices_on_lcu(lcu);
963 	/* 3. read new alias configuration */
964 	_schedule_lcu_update(lcu, device);
965 	lcu->suc_data.device = NULL;
966 	spin_unlock_irqrestore(&lcu->lock, flags);
967 }
968 
969 /*
970  * note: this will be called from int handler context (cdev locked)
971  */
972 void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
973 					  struct irb *irb)
974 {
975 	struct alias_lcu *lcu;
976 	char reason;
977 	struct dasd_eckd_private *private;
978 	char *sense;
979 
980 	private = (struct dasd_eckd_private *) device->private;
981 
982 	sense = dasd_get_sense(irb);
983 	if (sense) {
984 		reason = sense[8];
985 		DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
986 			    "eckd handle summary unit check: reason", reason);
987 	} else {
988 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
989 			    "eckd handle summary unit check:"
990 			    " no reason code available");
991 		return;
992 	}
993 
994 	lcu = private->lcu;
995 	if (!lcu) {
996 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
997 			    "device not ready to handle summary"
998 			    " unit check (no lcu structure)");
999 		return;
1000 	}
1001 	spin_lock(&lcu->lock);
1002 	_stop_all_devices_on_lcu(lcu, device);
1003 	/* prepare for lcu_update */
1004 	private->lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
1005 	/* If this device is about to be removed just return and wait for
1006 	 * the next interrupt on a different device
1007 	 */
1008 	if (list_empty(&device->alias_list)) {
1009 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1010 			    "device is in offline processing,"
1011 			    " don't do summary unit check handling");
1012 		spin_unlock(&lcu->lock);
1013 		return;
1014 	}
1015 	if (lcu->suc_data.device) {
1016 		/* already scheduled or running */
1017 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1018 			    "previous instance of summary unit check worker"
1019 			    " still pending");
1020 		spin_unlock(&lcu->lock);
1021 		return ;
1022 	}
1023 	lcu->suc_data.reason = reason;
1024 	lcu->suc_data.device = device;
1025 	spin_unlock(&lcu->lock);
1026 	schedule_work(&lcu->suc_data.worker);
1027 };
1028