1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PAV alias management for the DASD ECKD discipline
4 *
5 * Copyright IBM Corp. 2007
6 * Author(s): Stefan Weinhuber <wein@de.ibm.com>
7 */
8
9 #include <linux/list.h>
10 #include <linux/slab.h>
11 #include <asm/ebcdic.h>
12 #include "dasd_int.h"
13 #include "dasd_eckd.h"
14
15 /*
16 * General concept of alias management:
17 * - PAV and DASD alias management is specific to the eckd discipline.
18 * - A device is connected to an lcu as long as the device exists.
19 * dasd_alias_make_device_known_to_lcu will be called wenn the
20 * device is checked by the eckd discipline and
21 * dasd_alias_disconnect_device_from_lcu will be called
22 * before the device is deleted.
23 * - The dasd_alias_add_device / dasd_alias_remove_device
24 * functions mark the point when a device is 'ready for service'.
25 * - A summary unit check is a rare occasion, but it is mandatory to
26 * support it. It requires some complex recovery actions before the
27 * devices can be used again (see dasd_alias_handle_summary_unit_check).
28 * - dasd_alias_get_start_dev will find an alias device that can be used
29 * instead of the base device and does some (very simple) load balancing.
30 * This is the function that gets called for each I/O, so when improving
31 * something, this function should get faster or better, the rest has just
32 * to be correct.
33 */
34
35
36 static void summary_unit_check_handling_work(struct work_struct *);
37 static void lcu_update_work(struct work_struct *);
38 static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *);
39
40 static struct alias_root aliastree = {
41 .serverlist = LIST_HEAD_INIT(aliastree.serverlist),
42 .lock = __SPIN_LOCK_UNLOCKED(aliastree.lock),
43 };
44
_find_server(struct dasd_uid * uid)45 static struct alias_server *_find_server(struct dasd_uid *uid)
46 {
47 struct alias_server *pos;
48 list_for_each_entry(pos, &aliastree.serverlist, server) {
49 if (!strncmp(pos->uid.vendor, uid->vendor,
50 sizeof(uid->vendor))
51 && !strncmp(pos->uid.serial, uid->serial,
52 sizeof(uid->serial)))
53 return pos;
54 }
55 return NULL;
56 }
57
_find_lcu(struct alias_server * server,struct dasd_uid * uid)58 static struct alias_lcu *_find_lcu(struct alias_server *server,
59 struct dasd_uid *uid)
60 {
61 struct alias_lcu *pos;
62 list_for_each_entry(pos, &server->lculist, lcu) {
63 if (pos->uid.ssid == uid->ssid)
64 return pos;
65 }
66 return NULL;
67 }
68
_find_group(struct alias_lcu * lcu,struct dasd_uid * uid)69 static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
70 struct dasd_uid *uid)
71 {
72 struct alias_pav_group *pos;
73 __u8 search_unit_addr;
74
75 /* for hyper pav there is only one group */
76 if (lcu->pav == HYPER_PAV) {
77 if (list_empty(&lcu->grouplist))
78 return NULL;
79 else
80 return list_first_entry(&lcu->grouplist,
81 struct alias_pav_group, group);
82 }
83
84 /* for base pav we have to find the group that matches the base */
85 if (uid->type == UA_BASE_DEVICE)
86 search_unit_addr = uid->real_unit_addr;
87 else
88 search_unit_addr = uid->base_unit_addr;
89 list_for_each_entry(pos, &lcu->grouplist, group) {
90 if (pos->uid.base_unit_addr == search_unit_addr &&
91 !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit)))
92 return pos;
93 }
94 return NULL;
95 }
96
_allocate_server(struct dasd_uid * uid)97 static struct alias_server *_allocate_server(struct dasd_uid *uid)
98 {
99 struct alias_server *server;
100
101 server = kzalloc_obj(*server);
102 if (!server)
103 return ERR_PTR(-ENOMEM);
104 memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor));
105 memcpy(server->uid.serial, uid->serial, sizeof(uid->serial));
106 INIT_LIST_HEAD(&server->server);
107 INIT_LIST_HEAD(&server->lculist);
108 return server;
109 }
110
_free_server(struct alias_server * server)111 static void _free_server(struct alias_server *server)
112 {
113 kfree(server);
114 }
115
_allocate_lcu(struct dasd_uid * uid)116 static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid)
117 {
118 struct alias_lcu *lcu;
119
120 lcu = kzalloc_obj(*lcu);
121 if (!lcu)
122 return ERR_PTR(-ENOMEM);
123 lcu->uac = kzalloc_obj(*(lcu->uac), GFP_KERNEL | GFP_DMA);
124 if (!lcu->uac)
125 goto out_err1;
126 lcu->rsu_cqr = kzalloc_obj(*lcu->rsu_cqr, GFP_KERNEL | GFP_DMA);
127 if (!lcu->rsu_cqr)
128 goto out_err2;
129 lcu->rsu_cqr->cpaddr = kzalloc_obj(struct ccw1, GFP_KERNEL | GFP_DMA);
130 if (!lcu->rsu_cqr->cpaddr)
131 goto out_err3;
132 lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
133 if (!lcu->rsu_cqr->data)
134 goto out_err4;
135
136 memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
137 memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
138 lcu->uid.ssid = uid->ssid;
139 lcu->pav = NO_PAV;
140 lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
141 INIT_LIST_HEAD(&lcu->lcu);
142 INIT_LIST_HEAD(&lcu->inactive_devices);
143 INIT_LIST_HEAD(&lcu->active_devices);
144 INIT_LIST_HEAD(&lcu->grouplist);
145 INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
146 INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
147 spin_lock_init(&lcu->lock);
148 init_completion(&lcu->lcu_setup);
149 return lcu;
150
151 out_err4:
152 kfree(lcu->rsu_cqr->cpaddr);
153 out_err3:
154 kfree(lcu->rsu_cqr);
155 out_err2:
156 kfree(lcu->uac);
157 out_err1:
158 kfree(lcu);
159 return ERR_PTR(-ENOMEM);
160 }
161
_free_lcu(struct alias_lcu * lcu)162 static void _free_lcu(struct alias_lcu *lcu)
163 {
164 kfree(lcu->rsu_cqr->data);
165 kfree(lcu->rsu_cqr->cpaddr);
166 kfree(lcu->rsu_cqr);
167 kfree(lcu->uac);
168 kfree(lcu);
169 }
170
171 /*
172 * This is the function that will allocate all the server and lcu data,
173 * so this function must be called first for a new device.
174 * If the return value is 1, the lcu was already known before, if it
175 * is 0, this is a new lcu.
176 * Negative return code indicates that something went wrong (e.g. -ENOMEM)
177 */
dasd_alias_make_device_known_to_lcu(struct dasd_device * device)178 int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
179 {
180 struct dasd_eckd_private *private = device->private;
181 unsigned long flags;
182 struct alias_server *server, *newserver;
183 struct alias_lcu *lcu, *newlcu;
184 struct dasd_uid uid;
185
186 device->discipline->get_uid(device, &uid);
187 spin_lock_irqsave(&aliastree.lock, flags);
188 server = _find_server(&uid);
189 if (!server) {
190 spin_unlock_irqrestore(&aliastree.lock, flags);
191 newserver = _allocate_server(&uid);
192 if (IS_ERR(newserver))
193 return PTR_ERR(newserver);
194 spin_lock_irqsave(&aliastree.lock, flags);
195 server = _find_server(&uid);
196 if (!server) {
197 list_add(&newserver->server, &aliastree.serverlist);
198 server = newserver;
199 } else {
200 /* someone was faster */
201 _free_server(newserver);
202 }
203 }
204
205 lcu = _find_lcu(server, &uid);
206 if (!lcu) {
207 spin_unlock_irqrestore(&aliastree.lock, flags);
208 newlcu = _allocate_lcu(&uid);
209 if (IS_ERR(newlcu))
210 return PTR_ERR(newlcu);
211 spin_lock_irqsave(&aliastree.lock, flags);
212 lcu = _find_lcu(server, &uid);
213 if (!lcu) {
214 list_add(&newlcu->lcu, &server->lculist);
215 lcu = newlcu;
216 } else {
217 /* someone was faster */
218 _free_lcu(newlcu);
219 }
220 }
221 spin_lock(&lcu->lock);
222 list_add(&device->alias_list, &lcu->inactive_devices);
223 private->lcu = lcu;
224 spin_unlock(&lcu->lock);
225 spin_unlock_irqrestore(&aliastree.lock, flags);
226
227 return 0;
228 }
229
230 /*
231 * This function removes a device from the scope of alias management.
232 * The complicated part is to make sure that it is not in use by
233 * any of the workers. If necessary cancel the work.
234 */
dasd_alias_disconnect_device_from_lcu(struct dasd_device * device)235 void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
236 {
237 struct dasd_eckd_private *private = device->private;
238 unsigned long flags;
239 struct alias_lcu *lcu;
240 struct alias_server *server;
241 int was_pending;
242 struct dasd_uid uid;
243
244 lcu = private->lcu;
245 /* nothing to do if already disconnected */
246 if (!lcu)
247 return;
248 device->discipline->get_uid(device, &uid);
249 spin_lock_irqsave(&lcu->lock, flags);
250 /* make sure that the workers don't use this device */
251 if (device == lcu->suc_data.device) {
252 spin_unlock_irqrestore(&lcu->lock, flags);
253 cancel_work_sync(&lcu->suc_data.worker);
254 spin_lock_irqsave(&lcu->lock, flags);
255 if (device == lcu->suc_data.device) {
256 dasd_put_device(device);
257 lcu->suc_data.device = NULL;
258 }
259 }
260 was_pending = 0;
261 if (device == lcu->ruac_data.device) {
262 spin_unlock_irqrestore(&lcu->lock, flags);
263 was_pending = 1;
264 cancel_delayed_work_sync(&lcu->ruac_data.dwork);
265 spin_lock_irqsave(&lcu->lock, flags);
266 if (device == lcu->ruac_data.device) {
267 dasd_put_device(device);
268 lcu->ruac_data.device = NULL;
269 }
270 }
271 private->lcu = NULL;
272 spin_unlock_irqrestore(&lcu->lock, flags);
273
274 spin_lock_irqsave(&aliastree.lock, flags);
275 spin_lock(&lcu->lock);
276 list_del_init(&device->alias_list);
277 if (list_empty(&lcu->grouplist) &&
278 list_empty(&lcu->active_devices) &&
279 list_empty(&lcu->inactive_devices)) {
280 list_del(&lcu->lcu);
281 spin_unlock(&lcu->lock);
282 _free_lcu(lcu);
283 lcu = NULL;
284 } else {
285 if (was_pending)
286 _schedule_lcu_update(lcu, NULL);
287 spin_unlock(&lcu->lock);
288 }
289 server = _find_server(&uid);
290 if (server && list_empty(&server->lculist)) {
291 list_del(&server->server);
292 _free_server(server);
293 }
294 spin_unlock_irqrestore(&aliastree.lock, flags);
295 }
296
297 /*
298 * This function assumes that the unit address configuration stored
299 * in the lcu is up to date and will update the device uid before
300 * adding it to a pav group.
301 */
302
_add_device_to_lcu(struct alias_lcu * lcu,struct dasd_device * device,struct dasd_device * pos)303 static int _add_device_to_lcu(struct alias_lcu *lcu,
304 struct dasd_device *device,
305 struct dasd_device *pos)
306 {
307
308 struct dasd_eckd_private *private = device->private;
309 struct alias_pav_group *group;
310 struct dasd_uid uid;
311
312 spin_lock(get_ccwdev_lock(device->cdev));
313 private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
314 private->uid.base_unit_addr =
315 lcu->uac->unit[private->uid.real_unit_addr].base_ua;
316 uid = private->uid;
317 spin_unlock(get_ccwdev_lock(device->cdev));
318 /* if we have no PAV anyway, we don't need to bother with PAV groups */
319 if (lcu->pav == NO_PAV) {
320 list_move(&device->alias_list, &lcu->active_devices);
321 return 0;
322 }
323 group = _find_group(lcu, &uid);
324 if (!group) {
325 group = kzalloc_obj(*group, GFP_ATOMIC);
326 if (!group)
327 return -ENOMEM;
328 memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor));
329 memcpy(group->uid.serial, uid.serial, sizeof(uid.serial));
330 group->uid.ssid = uid.ssid;
331 if (uid.type == UA_BASE_DEVICE)
332 group->uid.base_unit_addr = uid.real_unit_addr;
333 else
334 group->uid.base_unit_addr = uid.base_unit_addr;
335 memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit));
336 INIT_LIST_HEAD(&group->group);
337 INIT_LIST_HEAD(&group->baselist);
338 INIT_LIST_HEAD(&group->aliaslist);
339 list_add(&group->group, &lcu->grouplist);
340 }
341 if (uid.type == UA_BASE_DEVICE)
342 list_move(&device->alias_list, &group->baselist);
343 else
344 list_move(&device->alias_list, &group->aliaslist);
345 private->pavgroup = group;
346 return 0;
347 };
348
_remove_device_from_lcu(struct alias_lcu * lcu,struct dasd_device * device)349 static void _remove_device_from_lcu(struct alias_lcu *lcu,
350 struct dasd_device *device)
351 {
352 struct dasd_eckd_private *private = device->private;
353 struct alias_pav_group *group;
354
355 list_move(&device->alias_list, &lcu->inactive_devices);
356 group = private->pavgroup;
357 if (!group)
358 return;
359 private->pavgroup = NULL;
360 if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) {
361 list_del(&group->group);
362 kfree(group);
363 return;
364 }
365 if (group->next == device)
366 group->next = NULL;
367 };
368
369 static int
suborder_not_supported(struct dasd_ccw_req * cqr)370 suborder_not_supported(struct dasd_ccw_req *cqr)
371 {
372 char *sense;
373 char reason;
374 char msg_format;
375 char msg_no;
376
377 /*
378 * intrc values ENODEV, ENOLINK and EPERM
379 * will be optained from sleep_on to indicate that no
380 * IO operation can be started
381 */
382 if (cqr->intrc == -ENODEV)
383 return 1;
384
385 if (cqr->intrc == -ENOLINK)
386 return 1;
387
388 if (cqr->intrc == -EPERM)
389 return 1;
390
391 sense = dasd_get_sense(&cqr->irb);
392 if (!sense)
393 return 0;
394
395 reason = sense[0];
396 msg_format = (sense[7] & 0xF0);
397 msg_no = (sense[7] & 0x0F);
398
399 /* command reject, Format 0 MSG 4 - invalid parameter */
400 if ((reason == 0x80) && (msg_format == 0x00) && (msg_no == 0x04))
401 return 1;
402
403 return 0;
404 }
405
read_unit_address_configuration(struct dasd_device * device,struct alias_lcu * lcu)406 static int read_unit_address_configuration(struct dasd_device *device,
407 struct alias_lcu *lcu)
408 {
409 struct dasd_psf_prssd_data *prssdp;
410 struct dasd_ccw_req *cqr;
411 struct ccw1 *ccw;
412 int rc;
413 unsigned long flags;
414
415 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
416 (sizeof(struct dasd_psf_prssd_data)),
417 device, NULL);
418 if (IS_ERR(cqr))
419 return PTR_ERR(cqr);
420 cqr->startdev = device;
421 cqr->memdev = device;
422 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
423 cqr->retries = 10;
424 cqr->expires = 20 * HZ;
425
426 /* Prepare for Read Subsystem Data */
427 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
428 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
429 prssdp->order = PSF_ORDER_PRSSD;
430 prssdp->suborder = 0x0e; /* Read unit address configuration */
431 /* all other bytes of prssdp must be zero */
432
433 ccw = cqr->cpaddr;
434 ccw->cmd_code = DASD_ECKD_CCW_PSF;
435 ccw->count = sizeof(struct dasd_psf_prssd_data);
436 ccw->flags |= CCW_FLAG_CC;
437 ccw->cda = virt_to_dma32(prssdp);
438
439 /* Read Subsystem Data - feature codes */
440 memset(lcu->uac, 0, sizeof(*(lcu->uac)));
441
442 ccw++;
443 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
444 ccw->count = sizeof(*(lcu->uac));
445 ccw->cda = virt_to_dma32(lcu->uac);
446
447 cqr->buildclk = get_tod_clock();
448 cqr->status = DASD_CQR_FILLED;
449
450 /* need to unset flag here to detect race with summary unit check */
451 spin_lock_irqsave(&lcu->lock, flags);
452 lcu->flags &= ~NEED_UAC_UPDATE;
453 spin_unlock_irqrestore(&lcu->lock, flags);
454
455 rc = dasd_sleep_on(cqr);
456 if (!rc)
457 goto out;
458
459 if (suborder_not_supported(cqr)) {
460 /* suborder not supported or device unusable for IO */
461 rc = -EOPNOTSUPP;
462 } else {
463 /* IO failed but should be retried */
464 spin_lock_irqsave(&lcu->lock, flags);
465 lcu->flags |= NEED_UAC_UPDATE;
466 spin_unlock_irqrestore(&lcu->lock, flags);
467 }
468 out:
469 dasd_sfree_request(cqr, cqr->memdev);
470 return rc;
471 }
472
_lcu_update(struct dasd_device * refdev,struct alias_lcu * lcu)473 static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
474 {
475 unsigned long flags;
476 struct alias_pav_group *pavgroup, *tempgroup;
477 struct dasd_device *device, *tempdev;
478 int i, rc;
479 struct dasd_eckd_private *private;
480
481 spin_lock_irqsave(&lcu->lock, flags);
482 list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
483 list_for_each_entry_safe(device, tempdev, &pavgroup->baselist,
484 alias_list) {
485 list_move(&device->alias_list, &lcu->active_devices);
486 private = device->private;
487 private->pavgroup = NULL;
488 }
489 list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist,
490 alias_list) {
491 list_move(&device->alias_list, &lcu->active_devices);
492 private = device->private;
493 private->pavgroup = NULL;
494 }
495 list_del(&pavgroup->group);
496 kfree(pavgroup);
497 }
498 spin_unlock_irqrestore(&lcu->lock, flags);
499
500 rc = read_unit_address_configuration(refdev, lcu);
501 if (rc)
502 return rc;
503
504 spin_lock_irqsave(&lcu->lock, flags);
505 /*
506 * there is another update needed skip the remaining handling
507 * the data might already be outdated
508 * but especially do not add the device to an LCU with pending
509 * update
510 */
511 if (lcu->flags & NEED_UAC_UPDATE)
512 goto out;
513 lcu->pav = NO_PAV;
514 for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
515 switch (lcu->uac->unit[i].ua_type) {
516 case UA_BASE_PAV_ALIAS:
517 lcu->pav = BASE_PAV;
518 break;
519 case UA_HYPER_PAV_ALIAS:
520 lcu->pav = HYPER_PAV;
521 break;
522 }
523 if (lcu->pav != NO_PAV)
524 break;
525 }
526
527 list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
528 alias_list) {
529 _add_device_to_lcu(lcu, device, refdev);
530 }
531 out:
532 spin_unlock_irqrestore(&lcu->lock, flags);
533 return 0;
534 }
535
lcu_update_work(struct work_struct * work)536 static void lcu_update_work(struct work_struct *work)
537 {
538 struct alias_lcu *lcu;
539 struct read_uac_work_data *ruac_data;
540 struct dasd_device *device;
541 unsigned long flags;
542 int rc;
543
544 ruac_data = container_of(work, struct read_uac_work_data, dwork.work);
545 lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
546 device = ruac_data->device;
547 rc = _lcu_update(device, lcu);
548 /*
549 * Need to check flags again, as there could have been another
550 * prepare_update or a new device a new device while we were still
551 * processing the data
552 */
553 spin_lock_irqsave(&lcu->lock, flags);
554 if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
555 DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
556 " alias data in lcu (rc = %d), retry later", rc);
557 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
558 dasd_put_device(device);
559 } else {
560 dasd_put_device(device);
561 lcu->ruac_data.device = NULL;
562 lcu->flags &= ~UPDATE_PENDING;
563 }
564 spin_unlock_irqrestore(&lcu->lock, flags);
565 }
566
_schedule_lcu_update(struct alias_lcu * lcu,struct dasd_device * device)567 static int _schedule_lcu_update(struct alias_lcu *lcu,
568 struct dasd_device *device)
569 {
570 struct dasd_device *usedev = NULL;
571 struct alias_pav_group *group;
572
573 lcu->flags |= NEED_UAC_UPDATE;
574 if (lcu->ruac_data.device) {
575 /* already scheduled or running */
576 return 0;
577 }
578 if (device && !list_empty(&device->alias_list))
579 usedev = device;
580
581 if (!usedev && !list_empty(&lcu->grouplist)) {
582 group = list_first_entry(&lcu->grouplist,
583 struct alias_pav_group, group);
584 if (!list_empty(&group->baselist))
585 usedev = list_first_entry(&group->baselist,
586 struct dasd_device,
587 alias_list);
588 else if (!list_empty(&group->aliaslist))
589 usedev = list_first_entry(&group->aliaslist,
590 struct dasd_device,
591 alias_list);
592 }
593 if (!usedev && !list_empty(&lcu->active_devices)) {
594 usedev = list_first_entry(&lcu->active_devices,
595 struct dasd_device, alias_list);
596 }
597 /*
598 * if we haven't found a proper device yet, give up for now, the next
599 * device that will be set active will trigger an lcu update
600 */
601 if (!usedev)
602 return -EINVAL;
603 dasd_get_device(usedev);
604 lcu->ruac_data.device = usedev;
605 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
606 dasd_put_device(usedev);
607 return 0;
608 }
609
dasd_alias_add_device(struct dasd_device * device)610 int dasd_alias_add_device(struct dasd_device *device)
611 {
612 struct dasd_eckd_private *private = device->private;
613 __u8 uaddr = private->uid.real_unit_addr;
614 struct alias_lcu *lcu = private->lcu;
615 unsigned long flags;
616 int rc;
617
618 rc = 0;
619 spin_lock_irqsave(&lcu->lock, flags);
620 /*
621 * Check if device and lcu type differ. If so, the uac data may be
622 * outdated and needs to be updated.
623 */
624 if (private->uid.type != lcu->uac->unit[uaddr].ua_type) {
625 lcu->flags |= UPDATE_PENDING;
626 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
627 "uid type mismatch - trigger rescan");
628 }
629 if (!(lcu->flags & UPDATE_PENDING)) {
630 rc = _add_device_to_lcu(lcu, device, device);
631 if (rc)
632 lcu->flags |= UPDATE_PENDING;
633 }
634 if (lcu->flags & UPDATE_PENDING) {
635 list_move(&device->alias_list, &lcu->active_devices);
636 private->pavgroup = NULL;
637 _schedule_lcu_update(lcu, device);
638 }
639 spin_unlock_irqrestore(&lcu->lock, flags);
640 return rc;
641 }
642
dasd_alias_update_add_device(struct dasd_device * device)643 int dasd_alias_update_add_device(struct dasd_device *device)
644 {
645 struct dasd_eckd_private *private = device->private;
646
647 private->lcu->flags |= UPDATE_PENDING;
648 return dasd_alias_add_device(device);
649 }
650
dasd_alias_remove_device(struct dasd_device * device)651 int dasd_alias_remove_device(struct dasd_device *device)
652 {
653 struct dasd_eckd_private *private = device->private;
654 struct alias_lcu *lcu = private->lcu;
655 unsigned long flags;
656
657 /* nothing to do if already removed */
658 if (!lcu)
659 return 0;
660 spin_lock_irqsave(&lcu->lock, flags);
661 _remove_device_from_lcu(lcu, device);
662 spin_unlock_irqrestore(&lcu->lock, flags);
663 return 0;
664 }
665
dasd_alias_get_start_dev(struct dasd_device * base_device)666 struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
667 {
668 struct dasd_eckd_private *alias_priv, *private = base_device->private;
669 struct alias_lcu *lcu = private->lcu;
670 struct dasd_device *alias_device;
671 struct alias_pav_group *group;
672 unsigned long flags;
673
674 if (!lcu)
675 return NULL;
676 if (lcu->pav == NO_PAV ||
677 lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
678 return NULL;
679 if (unlikely(!(private->features.feature[8] & 0x01))) {
680 /*
681 * PAV enabled but prefix not, very unlikely
682 * seems to be a lost pathgroup
683 * use base device to do IO
684 */
685 DBF_DEV_EVENT(DBF_ERR, base_device, "%s",
686 "Prefix not enabled with PAV enabled\n");
687 return NULL;
688 }
689
690 spin_lock_irqsave(&lcu->lock, flags);
691 group = private->pavgroup;
692 if (!group) {
693 spin_unlock_irqrestore(&lcu->lock, flags);
694 return NULL;
695 }
696 alias_device = group->next;
697 if (!alias_device) {
698 if (list_empty(&group->aliaslist)) {
699 spin_unlock_irqrestore(&lcu->lock, flags);
700 return NULL;
701 } else {
702 alias_device = list_first_entry(&group->aliaslist,
703 struct dasd_device,
704 alias_list);
705 }
706 }
707 if (list_is_last(&alias_device->alias_list, &group->aliaslist))
708 group->next = list_first_entry(&group->aliaslist,
709 struct dasd_device, alias_list);
710 else
711 group->next = list_first_entry(&alias_device->alias_list,
712 struct dasd_device, alias_list);
713 spin_unlock_irqrestore(&lcu->lock, flags);
714 alias_priv = alias_device->private;
715 if ((alias_priv->count < private->count) && !alias_device->stopped &&
716 !test_bit(DASD_FLAG_OFFLINE, &alias_device->flags))
717 return alias_device;
718 else
719 return NULL;
720 }
721
722 /*
723 * Summary unit check handling depends on the way alias devices
724 * are handled so it is done here rather then in dasd_eckd.c
725 */
reset_summary_unit_check(struct alias_lcu * lcu,struct dasd_device * device,char reason)726 static int reset_summary_unit_check(struct alias_lcu *lcu,
727 struct dasd_device *device,
728 char reason)
729 {
730 struct dasd_ccw_req *cqr;
731 int rc = 0;
732 struct ccw1 *ccw;
733
734 cqr = lcu->rsu_cqr;
735 memcpy((char *) &cqr->magic, "ECKD", 4);
736 ASCEBC((char *) &cqr->magic, 4);
737 ccw = cqr->cpaddr;
738 ccw->cmd_code = DASD_ECKD_CCW_RSCK;
739 ccw->flags = CCW_FLAG_SLI;
740 ccw->count = 16;
741 ccw->cda = virt_to_dma32(cqr->data);
742 ((char *)cqr->data)[0] = reason;
743
744 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
745 cqr->retries = 255; /* set retry counter to enable basic ERP */
746 cqr->startdev = device;
747 cqr->memdev = device;
748 cqr->block = NULL;
749 cqr->expires = 5 * HZ;
750 cqr->buildclk = get_tod_clock();
751 cqr->status = DASD_CQR_FILLED;
752
753 rc = dasd_sleep_on_immediatly(cqr);
754 return rc;
755 }
756
_restart_all_base_devices_on_lcu(struct alias_lcu * lcu)757 static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
758 {
759 struct alias_pav_group *pavgroup;
760 struct dasd_device *device;
761 struct dasd_eckd_private *private;
762
763 /* active and inactive list can contain alias as well as base devices */
764 list_for_each_entry(device, &lcu->active_devices, alias_list) {
765 private = device->private;
766 if (private->uid.type != UA_BASE_DEVICE)
767 continue;
768 dasd_schedule_block_bh(device->block);
769 dasd_schedule_device_bh(device);
770 }
771 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
772 private = device->private;
773 if (private->uid.type != UA_BASE_DEVICE)
774 continue;
775 dasd_schedule_block_bh(device->block);
776 dasd_schedule_device_bh(device);
777 }
778 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
779 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
780 dasd_schedule_block_bh(device->block);
781 dasd_schedule_device_bh(device);
782 }
783 }
784 }
785
flush_all_alias_devices_on_lcu(struct alias_lcu * lcu)786 static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
787 {
788 struct alias_pav_group *pavgroup;
789 struct dasd_device *device, *temp;
790 struct dasd_eckd_private *private;
791 unsigned long flags;
792 LIST_HEAD(active);
793
794 /*
795 * Problem here ist that dasd_flush_device_queue may wait
796 * for termination of a request to complete. We can't keep
797 * the lcu lock during that time, so we must assume that
798 * the lists may have changed.
799 * Idea: first gather all active alias devices in a separate list,
800 * then flush the first element of this list unlocked, and afterwards
801 * check if it is still on the list before moving it to the
802 * active_devices list.
803 */
804
805 spin_lock_irqsave(&lcu->lock, flags);
806 list_for_each_entry_safe(device, temp, &lcu->active_devices,
807 alias_list) {
808 private = device->private;
809 if (private->uid.type == UA_BASE_DEVICE)
810 continue;
811 list_move(&device->alias_list, &active);
812 }
813
814 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
815 list_splice_init(&pavgroup->aliaslist, &active);
816 }
817 while (!list_empty(&active)) {
818 device = list_first_entry(&active, struct dasd_device,
819 alias_list);
820 spin_unlock_irqrestore(&lcu->lock, flags);
821 dasd_flush_device_queue(device);
822 spin_lock_irqsave(&lcu->lock, flags);
823 /*
824 * only move device around if it wasn't moved away while we
825 * were waiting for the flush
826 */
827 if (device == list_first_entry(&active,
828 struct dasd_device, alias_list)) {
829 list_move(&device->alias_list, &lcu->active_devices);
830 private = device->private;
831 private->pavgroup = NULL;
832 }
833 }
834 spin_unlock_irqrestore(&lcu->lock, flags);
835 }
836
_stop_all_devices_on_lcu(struct alias_lcu * lcu)837 static void _stop_all_devices_on_lcu(struct alias_lcu *lcu)
838 {
839 struct alias_pav_group *pavgroup;
840 struct dasd_device *device;
841
842 list_for_each_entry(device, &lcu->active_devices, alias_list) {
843 spin_lock(get_ccwdev_lock(device->cdev));
844 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
845 spin_unlock(get_ccwdev_lock(device->cdev));
846 }
847 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
848 spin_lock(get_ccwdev_lock(device->cdev));
849 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
850 spin_unlock(get_ccwdev_lock(device->cdev));
851 }
852 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
853 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
854 spin_lock(get_ccwdev_lock(device->cdev));
855 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
856 spin_unlock(get_ccwdev_lock(device->cdev));
857 }
858 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
859 spin_lock(get_ccwdev_lock(device->cdev));
860 dasd_device_set_stop_bits(device, DASD_STOPPED_SU);
861 spin_unlock(get_ccwdev_lock(device->cdev));
862 }
863 }
864 }
865
_unstop_all_devices_on_lcu(struct alias_lcu * lcu)866 static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
867 {
868 struct alias_pav_group *pavgroup;
869 struct dasd_device *device;
870
871 list_for_each_entry(device, &lcu->active_devices, alias_list) {
872 spin_lock(get_ccwdev_lock(device->cdev));
873 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
874 spin_unlock(get_ccwdev_lock(device->cdev));
875 }
876 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
877 spin_lock(get_ccwdev_lock(device->cdev));
878 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
879 spin_unlock(get_ccwdev_lock(device->cdev));
880 }
881 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
882 list_for_each_entry(device, &pavgroup->baselist, alias_list) {
883 spin_lock(get_ccwdev_lock(device->cdev));
884 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
885 spin_unlock(get_ccwdev_lock(device->cdev));
886 }
887 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) {
888 spin_lock(get_ccwdev_lock(device->cdev));
889 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU);
890 spin_unlock(get_ccwdev_lock(device->cdev));
891 }
892 }
893 }
894
summary_unit_check_handling_work(struct work_struct * work)895 static void summary_unit_check_handling_work(struct work_struct *work)
896 {
897 struct alias_lcu *lcu;
898 struct summary_unit_check_work_data *suc_data;
899 unsigned long flags;
900 struct dasd_device *device;
901
902 suc_data = container_of(work, struct summary_unit_check_work_data,
903 worker);
904 lcu = container_of(suc_data, struct alias_lcu, suc_data);
905 device = suc_data->device;
906
907 /* 1. flush alias devices */
908 flush_all_alias_devices_on_lcu(lcu);
909
910 /* 2. reset summary unit check */
911 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
912 dasd_device_remove_stop_bits(device,
913 (DASD_STOPPED_SU | DASD_STOPPED_PENDING));
914 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
915 reset_summary_unit_check(lcu, device, suc_data->reason);
916
917 spin_lock_irqsave(&lcu->lock, flags);
918 _unstop_all_devices_on_lcu(lcu);
919 _restart_all_base_devices_on_lcu(lcu);
920 /* 3. read new alias configuration */
921 _schedule_lcu_update(lcu, device);
922 lcu->suc_data.device = NULL;
923 dasd_put_device(device);
924 spin_unlock_irqrestore(&lcu->lock, flags);
925 }
926
dasd_alias_handle_summary_unit_check(struct work_struct * work)927 void dasd_alias_handle_summary_unit_check(struct work_struct *work)
928 {
929 struct dasd_device *device = container_of(work, struct dasd_device,
930 suc_work);
931 struct dasd_eckd_private *private = device->private;
932 struct alias_lcu *lcu;
933 unsigned long flags;
934
935 lcu = private->lcu;
936 if (!lcu) {
937 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
938 "device not ready to handle summary"
939 " unit check (no lcu structure)");
940 goto out;
941 }
942 spin_lock_irqsave(&lcu->lock, flags);
943 /* If this device is about to be removed just return and wait for
944 * the next interrupt on a different device
945 */
946 if (list_empty(&device->alias_list)) {
947 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
948 "device is in offline processing,"
949 " don't do summary unit check handling");
950 goto out_unlock;
951 }
952 if (lcu->suc_data.device) {
953 /* already scheduled or running */
954 DBF_DEV_EVENT(DBF_WARNING, device, "%s",
955 "previous instance of summary unit check worker"
956 " still pending");
957 goto out_unlock;
958 }
959 _stop_all_devices_on_lcu(lcu);
960 /* prepare for lcu_update */
961 lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
962 lcu->suc_data.reason = private->suc_reason;
963 lcu->suc_data.device = device;
964 dasd_get_device(device);
965 if (!schedule_work(&lcu->suc_data.worker))
966 dasd_put_device(device);
967 out_unlock:
968 spin_unlock_irqrestore(&lcu->lock, flags);
969 out:
970 clear_bit(DASD_FLAG_SUC, &device->flags);
971 dasd_put_device(device);
972 };
973