1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PAV alias management for the DASD ECKD discipline 4 * 5 * Copyright IBM Corp. 2007 6 * Author(s): Stefan Weinhuber <wein@de.ibm.com> 7 */ 8 9 #include <linux/list.h> 10 #include <linux/slab.h> 11 #include <asm/ebcdic.h> 12 #include "dasd_int.h" 13 #include "dasd_eckd.h" 14 15 /* 16 * General concept of alias management: 17 * - PAV and DASD alias management is specific to the eckd discipline. 18 * - A device is connected to an lcu as long as the device exists. 19 * dasd_alias_make_device_known_to_lcu will be called wenn the 20 * device is checked by the eckd discipline and 21 * dasd_alias_disconnect_device_from_lcu will be called 22 * before the device is deleted. 23 * - The dasd_alias_add_device / dasd_alias_remove_device 24 * functions mark the point when a device is 'ready for service'. 25 * - A summary unit check is a rare occasion, but it is mandatory to 26 * support it. It requires some complex recovery actions before the 27 * devices can be used again (see dasd_alias_handle_summary_unit_check). 28 * - dasd_alias_get_start_dev will find an alias device that can be used 29 * instead of the base device and does some (very simple) load balancing. 30 * This is the function that gets called for each I/O, so when improving 31 * something, this function should get faster or better, the rest has just 32 * to be correct. 33 */ 34 35 36 static void summary_unit_check_handling_work(struct work_struct *); 37 static void lcu_update_work(struct work_struct *); 38 static int _schedule_lcu_update(struct alias_lcu *, struct dasd_device *); 39 40 static struct alias_root aliastree = { 41 .serverlist = LIST_HEAD_INIT(aliastree.serverlist), 42 .lock = __SPIN_LOCK_UNLOCKED(aliastree.lock), 43 }; 44 45 static struct alias_server *_find_server(struct dasd_uid *uid) 46 { 47 struct alias_server *pos; 48 list_for_each_entry(pos, &aliastree.serverlist, server) { 49 if (!strncmp(pos->uid.vendor, uid->vendor, 50 sizeof(uid->vendor)) 51 && !strncmp(pos->uid.serial, uid->serial, 52 sizeof(uid->serial))) 53 return pos; 54 } 55 return NULL; 56 } 57 58 static struct alias_lcu *_find_lcu(struct alias_server *server, 59 struct dasd_uid *uid) 60 { 61 struct alias_lcu *pos; 62 list_for_each_entry(pos, &server->lculist, lcu) { 63 if (pos->uid.ssid == uid->ssid) 64 return pos; 65 } 66 return NULL; 67 } 68 69 static struct alias_pav_group *_find_group(struct alias_lcu *lcu, 70 struct dasd_uid *uid) 71 { 72 struct alias_pav_group *pos; 73 __u8 search_unit_addr; 74 75 /* for hyper pav there is only one group */ 76 if (lcu->pav == HYPER_PAV) { 77 if (list_empty(&lcu->grouplist)) 78 return NULL; 79 else 80 return list_first_entry(&lcu->grouplist, 81 struct alias_pav_group, group); 82 } 83 84 /* for base pav we have to find the group that matches the base */ 85 if (uid->type == UA_BASE_DEVICE) 86 search_unit_addr = uid->real_unit_addr; 87 else 88 search_unit_addr = uid->base_unit_addr; 89 list_for_each_entry(pos, &lcu->grouplist, group) { 90 if (pos->uid.base_unit_addr == search_unit_addr && 91 !strncmp(pos->uid.vduit, uid->vduit, sizeof(uid->vduit))) 92 return pos; 93 } 94 return NULL; 95 } 96 97 static struct alias_server *_allocate_server(struct dasd_uid *uid) 98 { 99 struct alias_server *server; 100 101 server = kzalloc(sizeof(*server), GFP_KERNEL); 102 if (!server) 103 return ERR_PTR(-ENOMEM); 104 memcpy(server->uid.vendor, uid->vendor, sizeof(uid->vendor)); 105 memcpy(server->uid.serial, uid->serial, sizeof(uid->serial)); 106 INIT_LIST_HEAD(&server->server); 107 INIT_LIST_HEAD(&server->lculist); 108 return server; 109 } 110 111 static void _free_server(struct alias_server *server) 112 { 113 kfree(server); 114 } 115 116 static struct alias_lcu *_allocate_lcu(struct dasd_uid *uid) 117 { 118 struct alias_lcu *lcu; 119 120 lcu = kzalloc(sizeof(*lcu), GFP_KERNEL); 121 if (!lcu) 122 return ERR_PTR(-ENOMEM); 123 lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA); 124 if (!lcu->uac) 125 goto out_err1; 126 lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA); 127 if (!lcu->rsu_cqr) 128 goto out_err2; 129 lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1), 130 GFP_KERNEL | GFP_DMA); 131 if (!lcu->rsu_cqr->cpaddr) 132 goto out_err3; 133 lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA); 134 if (!lcu->rsu_cqr->data) 135 goto out_err4; 136 137 memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor)); 138 memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial)); 139 lcu->uid.ssid = uid->ssid; 140 lcu->pav = NO_PAV; 141 lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING; 142 INIT_LIST_HEAD(&lcu->lcu); 143 INIT_LIST_HEAD(&lcu->inactive_devices); 144 INIT_LIST_HEAD(&lcu->active_devices); 145 INIT_LIST_HEAD(&lcu->grouplist); 146 INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work); 147 INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work); 148 spin_lock_init(&lcu->lock); 149 init_completion(&lcu->lcu_setup); 150 return lcu; 151 152 out_err4: 153 kfree(lcu->rsu_cqr->cpaddr); 154 out_err3: 155 kfree(lcu->rsu_cqr); 156 out_err2: 157 kfree(lcu->uac); 158 out_err1: 159 kfree(lcu); 160 return ERR_PTR(-ENOMEM); 161 } 162 163 static void _free_lcu(struct alias_lcu *lcu) 164 { 165 kfree(lcu->rsu_cqr->data); 166 kfree(lcu->rsu_cqr->cpaddr); 167 kfree(lcu->rsu_cqr); 168 kfree(lcu->uac); 169 kfree(lcu); 170 } 171 172 /* 173 * This is the function that will allocate all the server and lcu data, 174 * so this function must be called first for a new device. 175 * If the return value is 1, the lcu was already known before, if it 176 * is 0, this is a new lcu. 177 * Negative return code indicates that something went wrong (e.g. -ENOMEM) 178 */ 179 int dasd_alias_make_device_known_to_lcu(struct dasd_device *device) 180 { 181 struct dasd_eckd_private *private = device->private; 182 unsigned long flags; 183 struct alias_server *server, *newserver; 184 struct alias_lcu *lcu, *newlcu; 185 struct dasd_uid uid; 186 187 device->discipline->get_uid(device, &uid); 188 spin_lock_irqsave(&aliastree.lock, flags); 189 server = _find_server(&uid); 190 if (!server) { 191 spin_unlock_irqrestore(&aliastree.lock, flags); 192 newserver = _allocate_server(&uid); 193 if (IS_ERR(newserver)) 194 return PTR_ERR(newserver); 195 spin_lock_irqsave(&aliastree.lock, flags); 196 server = _find_server(&uid); 197 if (!server) { 198 list_add(&newserver->server, &aliastree.serverlist); 199 server = newserver; 200 } else { 201 /* someone was faster */ 202 _free_server(newserver); 203 } 204 } 205 206 lcu = _find_lcu(server, &uid); 207 if (!lcu) { 208 spin_unlock_irqrestore(&aliastree.lock, flags); 209 newlcu = _allocate_lcu(&uid); 210 if (IS_ERR(newlcu)) 211 return PTR_ERR(newlcu); 212 spin_lock_irqsave(&aliastree.lock, flags); 213 lcu = _find_lcu(server, &uid); 214 if (!lcu) { 215 list_add(&newlcu->lcu, &server->lculist); 216 lcu = newlcu; 217 } else { 218 /* someone was faster */ 219 _free_lcu(newlcu); 220 } 221 } 222 spin_lock(&lcu->lock); 223 list_add(&device->alias_list, &lcu->inactive_devices); 224 private->lcu = lcu; 225 spin_unlock(&lcu->lock); 226 spin_unlock_irqrestore(&aliastree.lock, flags); 227 228 return 0; 229 } 230 231 /* 232 * This function removes a device from the scope of alias management. 233 * The complicated part is to make sure that it is not in use by 234 * any of the workers. If necessary cancel the work. 235 */ 236 void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device) 237 { 238 struct dasd_eckd_private *private = device->private; 239 unsigned long flags; 240 struct alias_lcu *lcu; 241 struct alias_server *server; 242 int was_pending; 243 struct dasd_uid uid; 244 245 lcu = private->lcu; 246 /* nothing to do if already disconnected */ 247 if (!lcu) 248 return; 249 device->discipline->get_uid(device, &uid); 250 spin_lock_irqsave(&lcu->lock, flags); 251 /* make sure that the workers don't use this device */ 252 if (device == lcu->suc_data.device) { 253 spin_unlock_irqrestore(&lcu->lock, flags); 254 cancel_work_sync(&lcu->suc_data.worker); 255 spin_lock_irqsave(&lcu->lock, flags); 256 if (device == lcu->suc_data.device) { 257 dasd_put_device(device); 258 lcu->suc_data.device = NULL; 259 } 260 } 261 was_pending = 0; 262 if (device == lcu->ruac_data.device) { 263 spin_unlock_irqrestore(&lcu->lock, flags); 264 was_pending = 1; 265 cancel_delayed_work_sync(&lcu->ruac_data.dwork); 266 spin_lock_irqsave(&lcu->lock, flags); 267 if (device == lcu->ruac_data.device) { 268 dasd_put_device(device); 269 lcu->ruac_data.device = NULL; 270 } 271 } 272 private->lcu = NULL; 273 spin_unlock_irqrestore(&lcu->lock, flags); 274 275 spin_lock_irqsave(&aliastree.lock, flags); 276 spin_lock(&lcu->lock); 277 list_del_init(&device->alias_list); 278 if (list_empty(&lcu->grouplist) && 279 list_empty(&lcu->active_devices) && 280 list_empty(&lcu->inactive_devices)) { 281 list_del(&lcu->lcu); 282 spin_unlock(&lcu->lock); 283 _free_lcu(lcu); 284 lcu = NULL; 285 } else { 286 if (was_pending) 287 _schedule_lcu_update(lcu, NULL); 288 spin_unlock(&lcu->lock); 289 } 290 server = _find_server(&uid); 291 if (server && list_empty(&server->lculist)) { 292 list_del(&server->server); 293 _free_server(server); 294 } 295 spin_unlock_irqrestore(&aliastree.lock, flags); 296 } 297 298 /* 299 * This function assumes that the unit address configuration stored 300 * in the lcu is up to date and will update the device uid before 301 * adding it to a pav group. 302 */ 303 304 static int _add_device_to_lcu(struct alias_lcu *lcu, 305 struct dasd_device *device, 306 struct dasd_device *pos) 307 { 308 309 struct dasd_eckd_private *private = device->private; 310 struct alias_pav_group *group; 311 struct dasd_uid uid; 312 313 spin_lock(get_ccwdev_lock(device->cdev)); 314 private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type; 315 private->uid.base_unit_addr = 316 lcu->uac->unit[private->uid.real_unit_addr].base_ua; 317 uid = private->uid; 318 spin_unlock(get_ccwdev_lock(device->cdev)); 319 /* if we have no PAV anyway, we don't need to bother with PAV groups */ 320 if (lcu->pav == NO_PAV) { 321 list_move(&device->alias_list, &lcu->active_devices); 322 return 0; 323 } 324 group = _find_group(lcu, &uid); 325 if (!group) { 326 group = kzalloc(sizeof(*group), GFP_ATOMIC); 327 if (!group) 328 return -ENOMEM; 329 memcpy(group->uid.vendor, uid.vendor, sizeof(uid.vendor)); 330 memcpy(group->uid.serial, uid.serial, sizeof(uid.serial)); 331 group->uid.ssid = uid.ssid; 332 if (uid.type == UA_BASE_DEVICE) 333 group->uid.base_unit_addr = uid.real_unit_addr; 334 else 335 group->uid.base_unit_addr = uid.base_unit_addr; 336 memcpy(group->uid.vduit, uid.vduit, sizeof(uid.vduit)); 337 INIT_LIST_HEAD(&group->group); 338 INIT_LIST_HEAD(&group->baselist); 339 INIT_LIST_HEAD(&group->aliaslist); 340 list_add(&group->group, &lcu->grouplist); 341 } 342 if (uid.type == UA_BASE_DEVICE) 343 list_move(&device->alias_list, &group->baselist); 344 else 345 list_move(&device->alias_list, &group->aliaslist); 346 private->pavgroup = group; 347 return 0; 348 }; 349 350 static void _remove_device_from_lcu(struct alias_lcu *lcu, 351 struct dasd_device *device) 352 { 353 struct dasd_eckd_private *private = device->private; 354 struct alias_pav_group *group; 355 356 list_move(&device->alias_list, &lcu->inactive_devices); 357 group = private->pavgroup; 358 if (!group) 359 return; 360 private->pavgroup = NULL; 361 if (list_empty(&group->baselist) && list_empty(&group->aliaslist)) { 362 list_del(&group->group); 363 kfree(group); 364 return; 365 } 366 if (group->next == device) 367 group->next = NULL; 368 }; 369 370 static int 371 suborder_not_supported(struct dasd_ccw_req *cqr) 372 { 373 char *sense; 374 char reason; 375 char msg_format; 376 char msg_no; 377 378 /* 379 * intrc values ENODEV, ENOLINK and EPERM 380 * will be optained from sleep_on to indicate that no 381 * IO operation can be started 382 */ 383 if (cqr->intrc == -ENODEV) 384 return 1; 385 386 if (cqr->intrc == -ENOLINK) 387 return 1; 388 389 if (cqr->intrc == -EPERM) 390 return 1; 391 392 sense = dasd_get_sense(&cqr->irb); 393 if (!sense) 394 return 0; 395 396 reason = sense[0]; 397 msg_format = (sense[7] & 0xF0); 398 msg_no = (sense[7] & 0x0F); 399 400 /* command reject, Format 0 MSG 4 - invalid parameter */ 401 if ((reason == 0x80) && (msg_format == 0x00) && (msg_no == 0x04)) 402 return 1; 403 404 return 0; 405 } 406 407 static int read_unit_address_configuration(struct dasd_device *device, 408 struct alias_lcu *lcu) 409 { 410 struct dasd_psf_prssd_data *prssdp; 411 struct dasd_ccw_req *cqr; 412 struct ccw1 *ccw; 413 int rc; 414 unsigned long flags; 415 416 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 417 (sizeof(struct dasd_psf_prssd_data)), 418 device, NULL); 419 if (IS_ERR(cqr)) 420 return PTR_ERR(cqr); 421 cqr->startdev = device; 422 cqr->memdev = device; 423 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 424 cqr->retries = 10; 425 cqr->expires = 20 * HZ; 426 427 /* Prepare for Read Subsystem Data */ 428 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 429 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 430 prssdp->order = PSF_ORDER_PRSSD; 431 prssdp->suborder = 0x0e; /* Read unit address configuration */ 432 /* all other bytes of prssdp must be zero */ 433 434 ccw = cqr->cpaddr; 435 ccw->cmd_code = DASD_ECKD_CCW_PSF; 436 ccw->count = sizeof(struct dasd_psf_prssd_data); 437 ccw->flags |= CCW_FLAG_CC; 438 ccw->cda = virt_to_dma32(prssdp); 439 440 /* Read Subsystem Data - feature codes */ 441 memset(lcu->uac, 0, sizeof(*(lcu->uac))); 442 443 ccw++; 444 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 445 ccw->count = sizeof(*(lcu->uac)); 446 ccw->cda = virt_to_dma32(lcu->uac); 447 448 cqr->buildclk = get_tod_clock(); 449 cqr->status = DASD_CQR_FILLED; 450 451 /* need to unset flag here to detect race with summary unit check */ 452 spin_lock_irqsave(&lcu->lock, flags); 453 lcu->flags &= ~NEED_UAC_UPDATE; 454 spin_unlock_irqrestore(&lcu->lock, flags); 455 456 rc = dasd_sleep_on(cqr); 457 if (!rc) 458 goto out; 459 460 if (suborder_not_supported(cqr)) { 461 /* suborder not supported or device unusable for IO */ 462 rc = -EOPNOTSUPP; 463 } else { 464 /* IO failed but should be retried */ 465 spin_lock_irqsave(&lcu->lock, flags); 466 lcu->flags |= NEED_UAC_UPDATE; 467 spin_unlock_irqrestore(&lcu->lock, flags); 468 } 469 out: 470 dasd_sfree_request(cqr, cqr->memdev); 471 return rc; 472 } 473 474 static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu) 475 { 476 unsigned long flags; 477 struct alias_pav_group *pavgroup, *tempgroup; 478 struct dasd_device *device, *tempdev; 479 int i, rc; 480 struct dasd_eckd_private *private; 481 482 spin_lock_irqsave(&lcu->lock, flags); 483 list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) { 484 list_for_each_entry_safe(device, tempdev, &pavgroup->baselist, 485 alias_list) { 486 list_move(&device->alias_list, &lcu->active_devices); 487 private = device->private; 488 private->pavgroup = NULL; 489 } 490 list_for_each_entry_safe(device, tempdev, &pavgroup->aliaslist, 491 alias_list) { 492 list_move(&device->alias_list, &lcu->active_devices); 493 private = device->private; 494 private->pavgroup = NULL; 495 } 496 list_del(&pavgroup->group); 497 kfree(pavgroup); 498 } 499 spin_unlock_irqrestore(&lcu->lock, flags); 500 501 rc = read_unit_address_configuration(refdev, lcu); 502 if (rc) 503 return rc; 504 505 spin_lock_irqsave(&lcu->lock, flags); 506 /* 507 * there is another update needed skip the remaining handling 508 * the data might already be outdated 509 * but especially do not add the device to an LCU with pending 510 * update 511 */ 512 if (lcu->flags & NEED_UAC_UPDATE) 513 goto out; 514 lcu->pav = NO_PAV; 515 for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) { 516 switch (lcu->uac->unit[i].ua_type) { 517 case UA_BASE_PAV_ALIAS: 518 lcu->pav = BASE_PAV; 519 break; 520 case UA_HYPER_PAV_ALIAS: 521 lcu->pav = HYPER_PAV; 522 break; 523 } 524 if (lcu->pav != NO_PAV) 525 break; 526 } 527 528 list_for_each_entry_safe(device, tempdev, &lcu->active_devices, 529 alias_list) { 530 _add_device_to_lcu(lcu, device, refdev); 531 } 532 out: 533 spin_unlock_irqrestore(&lcu->lock, flags); 534 return 0; 535 } 536 537 static void lcu_update_work(struct work_struct *work) 538 { 539 struct alias_lcu *lcu; 540 struct read_uac_work_data *ruac_data; 541 struct dasd_device *device; 542 unsigned long flags; 543 int rc; 544 545 ruac_data = container_of(work, struct read_uac_work_data, dwork.work); 546 lcu = container_of(ruac_data, struct alias_lcu, ruac_data); 547 device = ruac_data->device; 548 rc = _lcu_update(device, lcu); 549 /* 550 * Need to check flags again, as there could have been another 551 * prepare_update or a new device a new device while we were still 552 * processing the data 553 */ 554 spin_lock_irqsave(&lcu->lock, flags); 555 if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) { 556 DBF_DEV_EVENT(DBF_WARNING, device, "could not update" 557 " alias data in lcu (rc = %d), retry later", rc); 558 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ)) 559 dasd_put_device(device); 560 } else { 561 dasd_put_device(device); 562 lcu->ruac_data.device = NULL; 563 lcu->flags &= ~UPDATE_PENDING; 564 } 565 spin_unlock_irqrestore(&lcu->lock, flags); 566 } 567 568 static int _schedule_lcu_update(struct alias_lcu *lcu, 569 struct dasd_device *device) 570 { 571 struct dasd_device *usedev = NULL; 572 struct alias_pav_group *group; 573 574 lcu->flags |= NEED_UAC_UPDATE; 575 if (lcu->ruac_data.device) { 576 /* already scheduled or running */ 577 return 0; 578 } 579 if (device && !list_empty(&device->alias_list)) 580 usedev = device; 581 582 if (!usedev && !list_empty(&lcu->grouplist)) { 583 group = list_first_entry(&lcu->grouplist, 584 struct alias_pav_group, group); 585 if (!list_empty(&group->baselist)) 586 usedev = list_first_entry(&group->baselist, 587 struct dasd_device, 588 alias_list); 589 else if (!list_empty(&group->aliaslist)) 590 usedev = list_first_entry(&group->aliaslist, 591 struct dasd_device, 592 alias_list); 593 } 594 if (!usedev && !list_empty(&lcu->active_devices)) { 595 usedev = list_first_entry(&lcu->active_devices, 596 struct dasd_device, alias_list); 597 } 598 /* 599 * if we haven't found a proper device yet, give up for now, the next 600 * device that will be set active will trigger an lcu update 601 */ 602 if (!usedev) 603 return -EINVAL; 604 dasd_get_device(usedev); 605 lcu->ruac_data.device = usedev; 606 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0)) 607 dasd_put_device(usedev); 608 return 0; 609 } 610 611 int dasd_alias_add_device(struct dasd_device *device) 612 { 613 struct dasd_eckd_private *private = device->private; 614 __u8 uaddr = private->uid.real_unit_addr; 615 struct alias_lcu *lcu = private->lcu; 616 unsigned long flags; 617 int rc; 618 619 rc = 0; 620 spin_lock_irqsave(&lcu->lock, flags); 621 /* 622 * Check if device and lcu type differ. If so, the uac data may be 623 * outdated and needs to be updated. 624 */ 625 if (private->uid.type != lcu->uac->unit[uaddr].ua_type) { 626 lcu->flags |= UPDATE_PENDING; 627 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 628 "uid type mismatch - trigger rescan"); 629 } 630 if (!(lcu->flags & UPDATE_PENDING)) { 631 rc = _add_device_to_lcu(lcu, device, device); 632 if (rc) 633 lcu->flags |= UPDATE_PENDING; 634 } 635 if (lcu->flags & UPDATE_PENDING) { 636 list_move(&device->alias_list, &lcu->active_devices); 637 private->pavgroup = NULL; 638 _schedule_lcu_update(lcu, device); 639 } 640 spin_unlock_irqrestore(&lcu->lock, flags); 641 return rc; 642 } 643 644 int dasd_alias_update_add_device(struct dasd_device *device) 645 { 646 struct dasd_eckd_private *private = device->private; 647 648 private->lcu->flags |= UPDATE_PENDING; 649 return dasd_alias_add_device(device); 650 } 651 652 int dasd_alias_remove_device(struct dasd_device *device) 653 { 654 struct dasd_eckd_private *private = device->private; 655 struct alias_lcu *lcu = private->lcu; 656 unsigned long flags; 657 658 /* nothing to do if already removed */ 659 if (!lcu) 660 return 0; 661 spin_lock_irqsave(&lcu->lock, flags); 662 _remove_device_from_lcu(lcu, device); 663 spin_unlock_irqrestore(&lcu->lock, flags); 664 return 0; 665 } 666 667 struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device) 668 { 669 struct dasd_eckd_private *alias_priv, *private = base_device->private; 670 struct alias_lcu *lcu = private->lcu; 671 struct dasd_device *alias_device; 672 struct alias_pav_group *group; 673 unsigned long flags; 674 675 if (!lcu) 676 return NULL; 677 if (lcu->pav == NO_PAV || 678 lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING)) 679 return NULL; 680 if (unlikely(!(private->features.feature[8] & 0x01))) { 681 /* 682 * PAV enabled but prefix not, very unlikely 683 * seems to be a lost pathgroup 684 * use base device to do IO 685 */ 686 DBF_DEV_EVENT(DBF_ERR, base_device, "%s", 687 "Prefix not enabled with PAV enabled\n"); 688 return NULL; 689 } 690 691 spin_lock_irqsave(&lcu->lock, flags); 692 group = private->pavgroup; 693 if (!group) { 694 spin_unlock_irqrestore(&lcu->lock, flags); 695 return NULL; 696 } 697 alias_device = group->next; 698 if (!alias_device) { 699 if (list_empty(&group->aliaslist)) { 700 spin_unlock_irqrestore(&lcu->lock, flags); 701 return NULL; 702 } else { 703 alias_device = list_first_entry(&group->aliaslist, 704 struct dasd_device, 705 alias_list); 706 } 707 } 708 if (list_is_last(&alias_device->alias_list, &group->aliaslist)) 709 group->next = list_first_entry(&group->aliaslist, 710 struct dasd_device, alias_list); 711 else 712 group->next = list_first_entry(&alias_device->alias_list, 713 struct dasd_device, alias_list); 714 spin_unlock_irqrestore(&lcu->lock, flags); 715 alias_priv = alias_device->private; 716 if ((alias_priv->count < private->count) && !alias_device->stopped && 717 !test_bit(DASD_FLAG_OFFLINE, &alias_device->flags)) 718 return alias_device; 719 else 720 return NULL; 721 } 722 723 /* 724 * Summary unit check handling depends on the way alias devices 725 * are handled so it is done here rather then in dasd_eckd.c 726 */ 727 static int reset_summary_unit_check(struct alias_lcu *lcu, 728 struct dasd_device *device, 729 char reason) 730 { 731 struct dasd_ccw_req *cqr; 732 int rc = 0; 733 struct ccw1 *ccw; 734 735 cqr = lcu->rsu_cqr; 736 memcpy((char *) &cqr->magic, "ECKD", 4); 737 ASCEBC((char *) &cqr->magic, 4); 738 ccw = cqr->cpaddr; 739 ccw->cmd_code = DASD_ECKD_CCW_RSCK; 740 ccw->flags = CCW_FLAG_SLI; 741 ccw->count = 16; 742 ccw->cda = virt_to_dma32(cqr->data); 743 ((char *)cqr->data)[0] = reason; 744 745 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 746 cqr->retries = 255; /* set retry counter to enable basic ERP */ 747 cqr->startdev = device; 748 cqr->memdev = device; 749 cqr->block = NULL; 750 cqr->expires = 5 * HZ; 751 cqr->buildclk = get_tod_clock(); 752 cqr->status = DASD_CQR_FILLED; 753 754 rc = dasd_sleep_on_immediatly(cqr); 755 return rc; 756 } 757 758 static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu) 759 { 760 struct alias_pav_group *pavgroup; 761 struct dasd_device *device; 762 struct dasd_eckd_private *private; 763 764 /* active and inactive list can contain alias as well as base devices */ 765 list_for_each_entry(device, &lcu->active_devices, alias_list) { 766 private = device->private; 767 if (private->uid.type != UA_BASE_DEVICE) 768 continue; 769 dasd_schedule_block_bh(device->block); 770 dasd_schedule_device_bh(device); 771 } 772 list_for_each_entry(device, &lcu->inactive_devices, alias_list) { 773 private = device->private; 774 if (private->uid.type != UA_BASE_DEVICE) 775 continue; 776 dasd_schedule_block_bh(device->block); 777 dasd_schedule_device_bh(device); 778 } 779 list_for_each_entry(pavgroup, &lcu->grouplist, group) { 780 list_for_each_entry(device, &pavgroup->baselist, alias_list) { 781 dasd_schedule_block_bh(device->block); 782 dasd_schedule_device_bh(device); 783 } 784 } 785 } 786 787 static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu) 788 { 789 struct alias_pav_group *pavgroup; 790 struct dasd_device *device, *temp; 791 struct dasd_eckd_private *private; 792 unsigned long flags; 793 LIST_HEAD(active); 794 795 /* 796 * Problem here ist that dasd_flush_device_queue may wait 797 * for termination of a request to complete. We can't keep 798 * the lcu lock during that time, so we must assume that 799 * the lists may have changed. 800 * Idea: first gather all active alias devices in a separate list, 801 * then flush the first element of this list unlocked, and afterwards 802 * check if it is still on the list before moving it to the 803 * active_devices list. 804 */ 805 806 spin_lock_irqsave(&lcu->lock, flags); 807 list_for_each_entry_safe(device, temp, &lcu->active_devices, 808 alias_list) { 809 private = device->private; 810 if (private->uid.type == UA_BASE_DEVICE) 811 continue; 812 list_move(&device->alias_list, &active); 813 } 814 815 list_for_each_entry(pavgroup, &lcu->grouplist, group) { 816 list_splice_init(&pavgroup->aliaslist, &active); 817 } 818 while (!list_empty(&active)) { 819 device = list_first_entry(&active, struct dasd_device, 820 alias_list); 821 spin_unlock_irqrestore(&lcu->lock, flags); 822 dasd_flush_device_queue(device); 823 spin_lock_irqsave(&lcu->lock, flags); 824 /* 825 * only move device around if it wasn't moved away while we 826 * were waiting for the flush 827 */ 828 if (device == list_first_entry(&active, 829 struct dasd_device, alias_list)) { 830 list_move(&device->alias_list, &lcu->active_devices); 831 private = device->private; 832 private->pavgroup = NULL; 833 } 834 } 835 spin_unlock_irqrestore(&lcu->lock, flags); 836 } 837 838 static void _stop_all_devices_on_lcu(struct alias_lcu *lcu) 839 { 840 struct alias_pav_group *pavgroup; 841 struct dasd_device *device; 842 843 list_for_each_entry(device, &lcu->active_devices, alias_list) { 844 spin_lock(get_ccwdev_lock(device->cdev)); 845 dasd_device_set_stop_bits(device, DASD_STOPPED_SU); 846 spin_unlock(get_ccwdev_lock(device->cdev)); 847 } 848 list_for_each_entry(device, &lcu->inactive_devices, alias_list) { 849 spin_lock(get_ccwdev_lock(device->cdev)); 850 dasd_device_set_stop_bits(device, DASD_STOPPED_SU); 851 spin_unlock(get_ccwdev_lock(device->cdev)); 852 } 853 list_for_each_entry(pavgroup, &lcu->grouplist, group) { 854 list_for_each_entry(device, &pavgroup->baselist, alias_list) { 855 spin_lock(get_ccwdev_lock(device->cdev)); 856 dasd_device_set_stop_bits(device, DASD_STOPPED_SU); 857 spin_unlock(get_ccwdev_lock(device->cdev)); 858 } 859 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) { 860 spin_lock(get_ccwdev_lock(device->cdev)); 861 dasd_device_set_stop_bits(device, DASD_STOPPED_SU); 862 spin_unlock(get_ccwdev_lock(device->cdev)); 863 } 864 } 865 } 866 867 static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu) 868 { 869 struct alias_pav_group *pavgroup; 870 struct dasd_device *device; 871 872 list_for_each_entry(device, &lcu->active_devices, alias_list) { 873 spin_lock(get_ccwdev_lock(device->cdev)); 874 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); 875 spin_unlock(get_ccwdev_lock(device->cdev)); 876 } 877 list_for_each_entry(device, &lcu->inactive_devices, alias_list) { 878 spin_lock(get_ccwdev_lock(device->cdev)); 879 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); 880 spin_unlock(get_ccwdev_lock(device->cdev)); 881 } 882 list_for_each_entry(pavgroup, &lcu->grouplist, group) { 883 list_for_each_entry(device, &pavgroup->baselist, alias_list) { 884 spin_lock(get_ccwdev_lock(device->cdev)); 885 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); 886 spin_unlock(get_ccwdev_lock(device->cdev)); 887 } 888 list_for_each_entry(device, &pavgroup->aliaslist, alias_list) { 889 spin_lock(get_ccwdev_lock(device->cdev)); 890 dasd_device_remove_stop_bits(device, DASD_STOPPED_SU); 891 spin_unlock(get_ccwdev_lock(device->cdev)); 892 } 893 } 894 } 895 896 static void summary_unit_check_handling_work(struct work_struct *work) 897 { 898 struct alias_lcu *lcu; 899 struct summary_unit_check_work_data *suc_data; 900 unsigned long flags; 901 struct dasd_device *device; 902 903 suc_data = container_of(work, struct summary_unit_check_work_data, 904 worker); 905 lcu = container_of(suc_data, struct alias_lcu, suc_data); 906 device = suc_data->device; 907 908 /* 1. flush alias devices */ 909 flush_all_alias_devices_on_lcu(lcu); 910 911 /* 2. reset summary unit check */ 912 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 913 dasd_device_remove_stop_bits(device, 914 (DASD_STOPPED_SU | DASD_STOPPED_PENDING)); 915 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 916 reset_summary_unit_check(lcu, device, suc_data->reason); 917 918 spin_lock_irqsave(&lcu->lock, flags); 919 _unstop_all_devices_on_lcu(lcu); 920 _restart_all_base_devices_on_lcu(lcu); 921 /* 3. read new alias configuration */ 922 _schedule_lcu_update(lcu, device); 923 lcu->suc_data.device = NULL; 924 dasd_put_device(device); 925 spin_unlock_irqrestore(&lcu->lock, flags); 926 } 927 928 void dasd_alias_handle_summary_unit_check(struct work_struct *work) 929 { 930 struct dasd_device *device = container_of(work, struct dasd_device, 931 suc_work); 932 struct dasd_eckd_private *private = device->private; 933 struct alias_lcu *lcu; 934 unsigned long flags; 935 936 lcu = private->lcu; 937 if (!lcu) { 938 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 939 "device not ready to handle summary" 940 " unit check (no lcu structure)"); 941 goto out; 942 } 943 spin_lock_irqsave(&lcu->lock, flags); 944 /* If this device is about to be removed just return and wait for 945 * the next interrupt on a different device 946 */ 947 if (list_empty(&device->alias_list)) { 948 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 949 "device is in offline processing," 950 " don't do summary unit check handling"); 951 goto out_unlock; 952 } 953 if (lcu->suc_data.device) { 954 /* already scheduled or running */ 955 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 956 "previous instance of summary unit check worker" 957 " still pending"); 958 goto out_unlock; 959 } 960 _stop_all_devices_on_lcu(lcu); 961 /* prepare for lcu_update */ 962 lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING; 963 lcu->suc_data.reason = private->suc_reason; 964 lcu->suc_data.device = device; 965 dasd_get_device(device); 966 if (!schedule_work(&lcu->suc_data.worker)) 967 dasd_put_device(device); 968 out_unlock: 969 spin_unlock_irqrestore(&lcu->lock, flags); 970 out: 971 clear_bit(DASD_FLAG_SUC, &device->flags); 972 dasd_put_device(device); 973 }; 974