1 /*- 2 * Copyright (c) 2011, 2012, 2013, 2014, 2016 Spectra Logic Corporation 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions, and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * substantially similar to the "NO WARRANTY" disclaimer below 13 * ("Disclaimer") and any redistribution must be conditioned upon 14 * including a substantially similar Disclaimer requirement for further 15 * binary redistribution. 16 * 17 * NO WARRANTY 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28 * POSSIBILITY OF SUCH DAMAGES. 29 * 30 * Authors: Justin T. Gibbs (Spectra Logic Corporation) 31 */ 32 33 /** 34 * \file case_file.cc 35 * 36 * We keep case files for any leaf vdev that is not in the optimal state. 37 * However, we only serialize to disk those events that need to be preserved 38 * across reboots. For now, this is just a log of soft errors which we 39 * accumulate in order to mark a device as degraded. 40 */ 41 #include <sys/cdefs.h> 42 #include <sys/byteorder.h> 43 #include <sys/time.h> 44 45 #include <sys/fs/zfs.h> 46 47 #include <dirent.h> 48 #include <fcntl.h> 49 #include <iomanip> 50 #include <fstream> 51 #include <functional> 52 #include <sstream> 53 #include <syslog.h> 54 #include <unistd.h> 55 56 #include <libzfs.h> 57 58 #include <list> 59 #include <map> 60 #include <string> 61 62 #include <devdctl/guid.h> 63 #include <devdctl/event.h> 64 #include <devdctl/event_factory.h> 65 #include <devdctl/exception.h> 66 #include <devdctl/consumer.h> 67 68 #include "callout.h" 69 #include "vdev_iterator.h" 70 #include "zfsd_event.h" 71 #include "case_file.h" 72 #include "vdev.h" 73 #include "zfsd.h" 74 #include "zfsd_exception.h" 75 #include "zpool_list.h" 76 /*============================ Namespace Control =============================*/ 77 using std::hex; 78 using std::ifstream; 79 using std::stringstream; 80 using std::setfill; 81 using std::setw; 82 83 using DevdCtl::Event; 84 using DevdCtl::EventFactory; 85 using DevdCtl::EventList; 86 using DevdCtl::Guid; 87 using DevdCtl::ParseException; 88 89 /*--------------------------------- CaseFile ---------------------------------*/ 90 //- CaseFile Static Data ------------------------------------------------------- 91 92 CaseFileList CaseFile::s_activeCases; 93 const string CaseFile::s_caseFilePath = "/var/db/zfsd/cases"; 94 const timeval CaseFile::s_removeGracePeriod = { 60 /*sec*/, 0 /*usec*/}; 95 96 //- CaseFile Static Public Methods --------------------------------------------- 97 CaseFile * 98 CaseFile::Find(Guid poolGUID, Guid vdevGUID) 99 { 100 for (CaseFileList::iterator curCase = s_activeCases.begin(); 101 curCase != s_activeCases.end(); curCase++) { 102 103 if (((*curCase)->PoolGUID() != poolGUID 104 && Guid::InvalidGuid() != poolGUID) 105 || (*curCase)->VdevGUID() != vdevGUID) 106 continue; 107 108 /* 109 * We only carry one active case per-vdev. 110 */ 111 return (*curCase); 112 } 113 return (NULL); 114 } 115 116 void 117 CaseFile::Find(Guid poolGUID, Guid vdevGUID, CaseFileList &cases) 118 { 119 for (CaseFileList::iterator curCase = s_activeCases.begin(); 120 curCase != s_activeCases.end(); curCase++) { 121 if (((*curCase)->PoolGUID() != poolGUID && 122 Guid::InvalidGuid() != poolGUID) || 123 (*curCase)->VdevGUID() != vdevGUID) 124 continue; 125 126 /* 127 * We can have multiple cases for spare vdevs 128 */ 129 cases.push_back(*curCase); 130 if (!(*curCase)->IsSpare()) { 131 return; 132 } 133 } 134 } 135 136 CaseFile * 137 CaseFile::Find(const string &physPath) 138 { 139 CaseFile *result = NULL; 140 141 for (CaseFileList::iterator curCase = s_activeCases.begin(); 142 curCase != s_activeCases.end(); curCase++) { 143 144 if ((*curCase)->PhysicalPath() != physPath) 145 continue; 146 147 if (result != NULL) { 148 syslog(LOG_WARNING, "Multiple casefiles found for " 149 "physical path %s. " 150 "This is most likely a bug in zfsd", 151 physPath.c_str()); 152 } 153 result = *curCase; 154 } 155 return (result); 156 } 157 158 159 void 160 CaseFile::ReEvaluateByGuid(Guid poolGUID, const ZfsEvent &event) 161 { 162 CaseFileList::iterator casefile; 163 for (casefile = s_activeCases.begin(); casefile != s_activeCases.end();){ 164 CaseFileList::iterator next = casefile; 165 next++; 166 if (poolGUID == (*casefile)->PoolGUID()) 167 (*casefile)->ReEvaluate(event); 168 casefile = next; 169 } 170 } 171 172 CaseFile & 173 CaseFile::Create(Vdev &vdev) 174 { 175 CaseFile *activeCase; 176 177 activeCase = Find(vdev.PoolGUID(), vdev.GUID()); 178 if (activeCase == NULL) 179 activeCase = new CaseFile(vdev); 180 181 return (*activeCase); 182 } 183 184 void 185 CaseFile::DeSerialize() 186 { 187 struct dirent **caseFiles; 188 189 int numCaseFiles(scandir(s_caseFilePath.c_str(), &caseFiles, 190 DeSerializeSelector, /*compar*/NULL)); 191 192 if (numCaseFiles == -1) 193 return; 194 if (numCaseFiles == 0) { 195 free(caseFiles); 196 return; 197 } 198 199 for (int i = 0; i < numCaseFiles; i++) { 200 201 DeSerializeFile(caseFiles[i]->d_name); 202 free(caseFiles[i]); 203 } 204 free(caseFiles); 205 } 206 207 bool 208 CaseFile::Empty() 209 { 210 return (s_activeCases.empty()); 211 } 212 213 void 214 CaseFile::LogAll() 215 { 216 for (CaseFileList::iterator curCase = s_activeCases.begin(); 217 curCase != s_activeCases.end(); curCase++) 218 (*curCase)->Log(); 219 } 220 221 void 222 CaseFile::PurgeAll() 223 { 224 /* 225 * Serialize casefiles before deleting them so that they can be reread 226 * and revalidated during BuildCaseFiles. 227 * CaseFiles remove themselves from this list on destruction. 228 */ 229 while (s_activeCases.size() != 0) { 230 CaseFile *casefile = s_activeCases.front(); 231 casefile->Serialize(); 232 delete casefile; 233 } 234 235 } 236 237 int 238 CaseFile::IsSpare() 239 { 240 return (m_is_spare); 241 } 242 243 //- CaseFile Public Methods ---------------------------------------------------- 244 bool 245 CaseFile::RefreshVdevState() 246 { 247 ZpoolList zpl(ZpoolList::ZpoolByGUID, &m_poolGUID); 248 zpool_handle_t *casePool(zpl.empty() ? NULL : zpl.front()); 249 if (casePool == NULL) 250 return (false); 251 252 Vdev vd(casePool, CaseVdev(casePool)); 253 if (vd.DoesNotExist()) 254 return (false); 255 256 m_vdevState = vd.State(); 257 m_vdevPhysPath = vd.PhysicalPath(); 258 return (true); 259 } 260 261 bool 262 CaseFile::ReEvaluate(const string &devPath, const string &physPath, Vdev *vdev) 263 { 264 ZpoolList zpl(ZpoolList::ZpoolByGUID, &m_poolGUID); 265 zpool_handle_t *pool(zpl.empty() ? NULL : zpl.front()); 266 int flags = ZFS_ONLINE_CHECKREMOVE | ZFS_ONLINE_UNSPARE; 267 268 if (pool == NULL || !RefreshVdevState()) { 269 /* 270 * The pool or vdev for this case file is no longer 271 * part of the configuration. This can happen 272 * if we process a device arrival notification 273 * before seeing the ZFS configuration change 274 * event. 275 */ 276 syslog(LOG_INFO, 277 "CaseFile::ReEvaluate(%s,%s) Pool/Vdev unconfigured. " 278 "Closing\n", 279 PoolGUIDString().c_str(), 280 VdevGUIDString().c_str()); 281 Close(); 282 283 /* 284 * Since this event was not used to close this 285 * case, do not report it as consumed. 286 */ 287 return (/*consumed*/false); 288 } 289 290 if (VdevState() > VDEV_STATE_CANT_OPEN) { 291 /* 292 * For now, newly discovered devices only help for 293 * devices that are missing. In the future, we might 294 * use a newly inserted spare to replace a degraded 295 * or faulted device. 296 */ 297 syslog(LOG_INFO, "CaseFile::ReEvaluate(%s,%s): Pool/Vdev ignored", 298 PoolGUIDString().c_str(), VdevGUIDString().c_str()); 299 return (/*consumed*/false); 300 } 301 302 if (vdev != NULL 303 && ( vdev->PoolGUID() == m_poolGUID 304 || vdev->PoolGUID() == Guid::InvalidGuid()) 305 && vdev->GUID() == m_vdevGUID) { 306 307 if (IsSpare()) 308 flags |= ZFS_ONLINE_SPARE; 309 if (zpool_vdev_online(pool, vdev->GUIDString().c_str(), 310 flags, &m_vdevState) != 0) { 311 syslog(LOG_ERR, 312 "Failed to online vdev(%s/%s:%s): %s: %s\n", 313 zpool_get_name(pool), vdev->GUIDString().c_str(), 314 devPath.c_str(), libzfs_error_action(g_zfsHandle), 315 libzfs_error_description(g_zfsHandle)); 316 return (/*consumed*/false); 317 } 318 319 syslog(LOG_INFO, "Onlined vdev(%s/%s:%s). State now %s.\n", 320 zpool_get_name(pool), vdev->GUIDString().c_str(), 321 devPath.c_str(), 322 zpool_state_to_name(VdevState(), VDEV_AUX_NONE)); 323 324 /* 325 * Check the vdev state post the online action to see 326 * if we can retire this case. 327 */ 328 CloseIfSolved(); 329 330 return (/*consumed*/true); 331 } 332 333 /* 334 * If the auto-replace policy is enabled, and we have physical 335 * path information, try a physical path replacement. 336 */ 337 if (zpool_get_prop_int(pool, ZPOOL_PROP_AUTOREPLACE, NULL) == 0) { 338 syslog(LOG_INFO, 339 "CaseFile(%s:%s:%s): AutoReplace not set. " 340 "Ignoring device insertion.\n", 341 PoolGUIDString().c_str(), 342 VdevGUIDString().c_str(), 343 zpool_state_to_name(VdevState(), VDEV_AUX_NONE)); 344 return (/*consumed*/false); 345 } 346 347 if (PhysicalPath().empty()) { 348 syslog(LOG_INFO, 349 "CaseFile(%s:%s:%s): No physical path information. " 350 "Ignoring device insertion.\n", 351 PoolGUIDString().c_str(), 352 VdevGUIDString().c_str(), 353 zpool_state_to_name(VdevState(), VDEV_AUX_NONE)); 354 return (/*consumed*/false); 355 } 356 357 if (physPath != PhysicalPath()) { 358 syslog(LOG_INFO, 359 "CaseFile(%s:%s:%s): Physical path mismatch. " 360 "Ignoring device insertion.\n", 361 PoolGUIDString().c_str(), 362 VdevGUIDString().c_str(), 363 zpool_state_to_name(VdevState(), VDEV_AUX_NONE)); 364 return (/*consumed*/false); 365 } 366 367 /* Write a label on the newly inserted disk. */ 368 if (zpool_label_disk(g_zfsHandle, pool, devPath.c_str()) != 0) { 369 syslog(LOG_ERR, 370 "Replace vdev(%s/%s) by physical path (label): %s: %s\n", 371 zpool_get_name(pool), VdevGUIDString().c_str(), 372 libzfs_error_action(g_zfsHandle), 373 libzfs_error_description(g_zfsHandle)); 374 return (/*consumed*/false); 375 } 376 377 syslog(LOG_INFO, "CaseFile::ReEvaluate(%s/%s): Replacing with %s", 378 PoolGUIDString().c_str(), VdevGUIDString().c_str(), 379 devPath.c_str()); 380 return (Replace(VDEV_TYPE_DISK, devPath.c_str(), /*isspare*/false)); 381 } 382 383 bool 384 CaseFile::ReEvaluate(const ZfsEvent &event) 385 { 386 bool consumed(false); 387 388 if (event.Value("type") == "sysevent.fs.zfs.vdev_remove") { 389 /* 390 * The Vdev we represent has been removed from the 391 * configuration. This case is no longer of value. 392 */ 393 Close(); 394 395 return (/*consumed*/true); 396 } else if (event.Value("type") == "sysevent.fs.zfs.pool_destroy") { 397 /* This Pool has been destroyed. Discard the case */ 398 Close(); 399 400 return (/*consumed*/true); 401 } else if (event.Value("type") == "sysevent.fs.zfs.config_sync") { 402 RefreshVdevState(); 403 if (VdevState() < VDEV_STATE_HEALTHY) 404 consumed = ActivateSpare(); 405 } 406 407 408 if (event.Value("class") == "resource.fs.zfs.removed") { 409 bool spare_activated; 410 411 if (!RefreshVdevState()) { 412 /* 413 * The pool or vdev for this case file is no longer 414 * part of the configuration. This can happen 415 * if we process a device arrival notification 416 * before seeing the ZFS configuration change 417 * event. 418 */ 419 syslog(LOG_INFO, 420 "CaseFile::ReEvaluate(%s,%s) Pool/Vdev " 421 "unconfigured. Closing\n", 422 PoolGUIDString().c_str(), 423 VdevGUIDString().c_str()); 424 /* 425 * Close the case now so we won't waste cycles in the 426 * system rescan 427 */ 428 Close(); 429 430 /* 431 * Since this event was not used to close this 432 * case, do not report it as consumed. 433 */ 434 return (/*consumed*/false); 435 } 436 437 /* 438 * Discard any tentative I/O error events for 439 * this case. They were most likely caused by the 440 * hot-unplug of this device. 441 */ 442 PurgeTentativeEvents(); 443 444 /* Try to activate spares if they are available */ 445 spare_activated = ActivateSpare(); 446 447 /* 448 * Rescan the drives in the system to see if a recent 449 * drive arrival can be used to solve this case. 450 */ 451 ZfsDaemon::RequestSystemRescan(); 452 453 /* 454 * Consume the event if we successfully activated a spare. 455 * Otherwise, leave it in the unconsumed events list so that the 456 * future addition of a spare to this pool might be able to 457 * close the case 458 */ 459 consumed = spare_activated; 460 } else if (event.Value("class") == "resource.fs.zfs.statechange") { 461 RefreshVdevState(); 462 /* 463 * If this vdev is DEGRADED, FAULTED, or UNAVAIL, try to 464 * activate a hotspare. Otherwise, ignore the event 465 */ 466 if (VdevState() == VDEV_STATE_FAULTED || 467 VdevState() == VDEV_STATE_DEGRADED || 468 VdevState() == VDEV_STATE_CANT_OPEN) 469 (void) ActivateSpare(); 470 consumed = true; 471 } 472 else if (event.Value("class") == "ereport.fs.zfs.io" || 473 event.Value("class") == "ereport.fs.zfs.checksum") { 474 475 m_tentativeEvents.push_front(event.DeepCopy()); 476 RegisterCallout(event); 477 consumed = true; 478 } 479 480 bool closed(CloseIfSolved()); 481 482 return (consumed || closed); 483 } 484 485 /* Find a Vdev containing the vdev with the given GUID */ 486 static nvlist_t* 487 find_parent(nvlist_t *pool_config, nvlist_t *config, DevdCtl::Guid child_guid) 488 { 489 nvlist_t **vdevChildren; 490 int error; 491 unsigned ch, numChildren; 492 493 error = nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN, 494 &vdevChildren, &numChildren); 495 496 if (error != 0 || numChildren == 0) 497 return (NULL); 498 499 for (ch = 0; ch < numChildren; ch++) { 500 nvlist *result; 501 Vdev vdev(pool_config, vdevChildren[ch]); 502 503 if (vdev.GUID() == child_guid) 504 return (config); 505 506 result = find_parent(pool_config, vdevChildren[ch], child_guid); 507 if (result != NULL) 508 return (result); 509 } 510 511 return (NULL); 512 } 513 514 bool 515 CaseFile::ActivateSpare() { 516 nvlist_t *config, *nvroot, *parent_config; 517 nvlist_t **spares; 518 const char *devPath, *poolname, *vdev_type; 519 u_int nspares, i; 520 int error; 521 522 ZpoolList zpl(ZpoolList::ZpoolByGUID, &m_poolGUID); 523 zpool_handle_t *zhp(zpl.empty() ? NULL : zpl.front()); 524 if (zhp == NULL) { 525 syslog(LOG_ERR, "CaseFile::ActivateSpare: Could not find pool " 526 "for pool_guid %" PRIu64".", (uint64_t)m_poolGUID); 527 return (false); 528 } 529 poolname = zpool_get_name(zhp); 530 config = zpool_get_config(zhp, NULL); 531 if (config == NULL) { 532 syslog(LOG_ERR, "CaseFile::ActivateSpare: Could not find pool " 533 "config for pool %s", poolname); 534 return (false); 535 } 536 error = nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot); 537 if (error != 0){ 538 syslog(LOG_ERR, "CaseFile::ActivateSpare: Could not find vdev " 539 "tree for pool %s", poolname); 540 return (false); 541 } 542 543 parent_config = find_parent(config, nvroot, m_vdevGUID); 544 if (parent_config != NULL) { 545 const char *parent_type; 546 547 /* 548 * Don't activate spares for members of a "replacing" vdev. 549 * They're already dealt with. Sparing them will just drag out 550 * the resilver process. 551 */ 552 error = nvlist_lookup_string(parent_config, 553 ZPOOL_CONFIG_TYPE, &parent_type); 554 if (error == 0 && strcmp(parent_type, VDEV_TYPE_REPLACING) == 0) 555 return (false); 556 } 557 558 nspares = 0; 559 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, 560 &nspares); 561 if (nspares == 0) { 562 /* The pool has no spares configured */ 563 syslog(LOG_INFO, "CaseFile::ActivateSpare: " 564 "No spares available for pool %s", poolname); 565 return (false); 566 } 567 for (i = 0; i < nspares; i++) { 568 uint64_t *nvlist_array; 569 vdev_stat_t *vs; 570 uint_t nstats; 571 572 if (nvlist_lookup_uint64_array(spares[i], 573 ZPOOL_CONFIG_VDEV_STATS, &nvlist_array, &nstats) != 0) { 574 syslog(LOG_ERR, "CaseFile::ActivateSpare: Could not " 575 "find vdev stats for pool %s, spare %d", 576 poolname, i); 577 return (false); 578 } 579 vs = reinterpret_cast<vdev_stat_t *>(nvlist_array); 580 581 if ((vs->vs_aux != VDEV_AUX_SPARED) 582 && (vs->vs_state == VDEV_STATE_HEALTHY)) { 583 /* We found a usable spare */ 584 break; 585 } 586 } 587 588 if (i == nspares) { 589 /* No available spares were found */ 590 return (false); 591 } 592 593 error = nvlist_lookup_string(spares[i], ZPOOL_CONFIG_PATH, &devPath); 594 if (error != 0) { 595 syslog(LOG_ERR, "CaseFile::ActivateSpare: Cannot determine " 596 "the path of pool %s, spare %d. Error %d", 597 poolname, i, error); 598 return (false); 599 } 600 601 error = nvlist_lookup_string(spares[i], ZPOOL_CONFIG_TYPE, &vdev_type); 602 if (error != 0) { 603 syslog(LOG_ERR, "CaseFile::ActivateSpare: Cannot determine " 604 "the vdev type of pool %s, spare %d. Error %d", 605 poolname, i, error); 606 return (false); 607 } 608 609 return (Replace(vdev_type, devPath, /*isspare*/true)); 610 } 611 612 void 613 CaseFile::RegisterCallout(const Event &event) 614 { 615 timeval now, countdown, elapsed, timestamp, zero, remaining; 616 617 gettimeofday(&now, 0); 618 timestamp = event.GetTimestamp(); 619 timersub(&now, ×tamp, &elapsed); 620 timersub(&s_removeGracePeriod, &elapsed, &countdown); 621 /* 622 * If countdown is <= zero, Reset the timer to the 623 * smallest positive time value instead 624 */ 625 timerclear(&zero); 626 if (timercmp(&countdown, &zero, <=)) { 627 timerclear(&countdown); 628 countdown.tv_usec = 1; 629 } 630 631 remaining = m_tentativeTimer.TimeRemaining(); 632 633 if (!m_tentativeTimer.IsPending() 634 || timercmp(&countdown, &remaining, <)) 635 m_tentativeTimer.Reset(countdown, OnGracePeriodEnded, this); 636 } 637 638 639 bool 640 CaseFile::CloseIfSolved() 641 { 642 if (m_events.empty() 643 && m_tentativeEvents.empty()) { 644 645 /* 646 * We currently do not track or take actions on 647 * devices in the degraded or faulted state. 648 * Once we have support for spare pools, we'll 649 * retain these cases so that any spares added in 650 * the future can be applied to them. 651 */ 652 switch (VdevState()) { 653 case VDEV_STATE_HEALTHY: 654 /* No need to keep cases for healthy vdevs */ 655 Close(); 656 return (true); 657 case VDEV_STATE_REMOVED: 658 case VDEV_STATE_CANT_OPEN: 659 /* 660 * Keep open. We may solve it with a newly inserted 661 * device. 662 */ 663 case VDEV_STATE_FAULTED: 664 case VDEV_STATE_DEGRADED: 665 /* 666 * Keep open. We may solve it with the future 667 * addition of a spare to the pool 668 */ 669 case VDEV_STATE_UNKNOWN: 670 case VDEV_STATE_CLOSED: 671 case VDEV_STATE_OFFLINE: 672 /* 673 * Keep open? This may not be the correct behavior, 674 * but it's what we've always done 675 */ 676 ; 677 } 678 679 /* 680 * Re-serialize the case in order to remove any 681 * previous event data. 682 */ 683 Serialize(); 684 } 685 686 return (false); 687 } 688 689 void 690 CaseFile::Log() 691 { 692 syslog(LOG_INFO, "CaseFile(%s,%s,%s)\n", PoolGUIDString().c_str(), 693 VdevGUIDString().c_str(), PhysicalPath().c_str()); 694 syslog(LOG_INFO, "\tVdev State = %s\n", 695 zpool_state_to_name(VdevState(), VDEV_AUX_NONE)); 696 if (m_tentativeEvents.size() != 0) { 697 syslog(LOG_INFO, "\t=== Tentative Events ===\n"); 698 for (EventList::iterator event(m_tentativeEvents.begin()); 699 event != m_tentativeEvents.end(); event++) 700 (*event)->Log(LOG_INFO); 701 } 702 if (m_events.size() != 0) { 703 syslog(LOG_INFO, "\t=== Events ===\n"); 704 for (EventList::iterator event(m_events.begin()); 705 event != m_events.end(); event++) 706 (*event)->Log(LOG_INFO); 707 } 708 } 709 710 //- CaseFile Static Protected Methods ------------------------------------------ 711 void 712 CaseFile::OnGracePeriodEnded(void *arg) 713 { 714 CaseFile &casefile(*static_cast<CaseFile *>(arg)); 715 716 casefile.OnGracePeriodEnded(); 717 } 718 719 int 720 CaseFile::DeSerializeSelector(const struct dirent *dirEntry) 721 { 722 uint64_t poolGUID; 723 uint64_t vdevGUID; 724 725 if (dirEntry->d_type == DT_REG 726 && sscanf(dirEntry->d_name, "pool_%" PRIu64 "_vdev_%" PRIu64 ".case", 727 &poolGUID, &vdevGUID) == 2) 728 return (1); 729 return (0); 730 } 731 732 void 733 CaseFile::DeSerializeFile(const char *fileName) 734 { 735 string fullName(s_caseFilePath + '/' + fileName); 736 CaseFile *existingCaseFile(NULL); 737 CaseFile *caseFile(NULL); 738 739 try { 740 uint64_t poolGUID; 741 uint64_t vdevGUID; 742 nvlist_t *vdevConf; 743 744 if (sscanf(fileName, "pool_%" PRIu64 "_vdev_%" PRIu64 ".case", 745 &poolGUID, &vdevGUID) != 2) { 746 throw ZfsdException("CaseFile::DeSerialize: " 747 "Unintelligible CaseFile filename %s.\n", fileName); 748 } 749 existingCaseFile = Find(Guid(poolGUID), Guid(vdevGUID)); 750 if (existingCaseFile != NULL) { 751 /* 752 * If the vdev is already degraded or faulted, 753 * there's no point in keeping the state around 754 * that we use to put a drive into the degraded 755 * state. However, if the vdev is simply missing, 756 * preserve the case data in the hopes that it will 757 * return. 758 */ 759 caseFile = existingCaseFile; 760 vdev_state curState(caseFile->VdevState()); 761 if (curState > VDEV_STATE_CANT_OPEN 762 && curState < VDEV_STATE_HEALTHY) { 763 unlink(fileName); 764 return; 765 } 766 } else { 767 ZpoolList zpl(ZpoolList::ZpoolByGUID, &poolGUID); 768 if (zpl.empty() 769 || (vdevConf = VdevIterator(zpl.front()) 770 .Find(vdevGUID)) == NULL) { 771 /* 772 * Either the pool no longer exists 773 * or this vdev is no longer a member of 774 * the pool. 775 */ 776 unlink(fullName.c_str()); 777 return; 778 } 779 780 /* 781 * Any vdev we find that does not have a case file 782 * must be in the healthy state and thus worthy of 783 * continued SERD data tracking. 784 */ 785 caseFile = new CaseFile(Vdev(zpl.front(), vdevConf)); 786 } 787 788 ifstream caseStream(fullName.c_str()); 789 if (!caseStream) 790 throw ZfsdException("CaseFile::DeSerialize: Unable to " 791 "read %s.\n", fileName); 792 793 caseFile->DeSerialize(caseStream); 794 } catch (const ParseException &exp) { 795 796 exp.Log(); 797 if (caseFile != existingCaseFile) 798 delete caseFile; 799 800 /* 801 * Since we can't parse the file, unlink it so we don't 802 * trip over it again. 803 */ 804 unlink(fileName); 805 } catch (const ZfsdException &zfsException) { 806 807 zfsException.Log(); 808 if (caseFile != existingCaseFile) 809 delete caseFile; 810 } 811 } 812 813 //- CaseFile Protected Methods ------------------------------------------------- 814 CaseFile::CaseFile(const Vdev &vdev) 815 : m_poolGUID(vdev.PoolGUID()), 816 m_vdevGUID(vdev.GUID()), 817 m_vdevState(vdev.State()), 818 m_vdevPhysPath(vdev.PhysicalPath()), 819 m_is_spare(vdev.IsSpare()) 820 { 821 stringstream guidString; 822 823 guidString << m_vdevGUID; 824 m_vdevGUIDString = guidString.str(); 825 guidString.str(""); 826 guidString << m_poolGUID; 827 m_poolGUIDString = guidString.str(); 828 829 s_activeCases.push_back(this); 830 831 syslog(LOG_INFO, "Creating new CaseFile:\n"); 832 Log(); 833 } 834 835 CaseFile::~CaseFile() 836 { 837 PurgeEvents(); 838 PurgeTentativeEvents(); 839 m_tentativeTimer.Stop(); 840 s_activeCases.remove(this); 841 } 842 843 void 844 CaseFile::PurgeEvents() 845 { 846 for (EventList::iterator event(m_events.begin()); 847 event != m_events.end(); event++) 848 delete *event; 849 850 m_events.clear(); 851 } 852 853 void 854 CaseFile::PurgeTentativeEvents() 855 { 856 for (EventList::iterator event(m_tentativeEvents.begin()); 857 event != m_tentativeEvents.end(); event++) 858 delete *event; 859 860 m_tentativeEvents.clear(); 861 } 862 863 void 864 CaseFile::SerializeEvList(const EventList events, int fd, 865 const char* prefix) const 866 { 867 if (events.empty()) 868 return; 869 for (EventList::const_iterator curEvent = events.begin(); 870 curEvent != events.end(); curEvent++) { 871 const string &eventString((*curEvent)->GetEventString()); 872 873 // TODO: replace many write(2) calls with a single writev(2) 874 if (prefix) 875 write(fd, prefix, strlen(prefix)); 876 write(fd, eventString.c_str(), eventString.length()); 877 } 878 } 879 880 void 881 CaseFile::Serialize() 882 { 883 stringstream saveFile; 884 885 saveFile << setfill('0') 886 << s_caseFilePath << "/" 887 << "pool_" << PoolGUIDString() 888 << "_vdev_" << VdevGUIDString() 889 << ".case"; 890 891 if (m_events.empty() && m_tentativeEvents.empty()) { 892 unlink(saveFile.str().c_str()); 893 return; 894 } 895 896 int fd(open(saveFile.str().c_str(), O_CREAT|O_TRUNC|O_WRONLY, 0644)); 897 if (fd == -1) { 898 syslog(LOG_ERR, "CaseFile::Serialize: Unable to open %s.\n", 899 saveFile.str().c_str()); 900 return; 901 } 902 SerializeEvList(m_events, fd); 903 SerializeEvList(m_tentativeEvents, fd, "tentative "); 904 close(fd); 905 } 906 907 /* 908 * XXX: This method assumes that events may not contain embedded newlines. If 909 * ever events can contain embedded newlines, then CaseFile must switch 910 * serialization formats 911 */ 912 void 913 CaseFile::DeSerialize(ifstream &caseStream) 914 { 915 string evString; 916 const EventFactory &factory(ZfsDaemon::Get().GetFactory()); 917 918 caseStream >> std::noskipws >> std::ws; 919 while (caseStream.good()) { 920 /* 921 * Outline: 922 * read the beginning of a line and check it for 923 * "tentative". If found, discard "tentative". 924 * Create a new event 925 * continue 926 */ 927 EventList* destEvents; 928 const string tentFlag("tentative "); 929 string line; 930 std::stringbuf lineBuf; 931 932 caseStream.get(lineBuf); 933 caseStream.ignore(); /*discard the newline character*/ 934 line = lineBuf.str(); 935 if (line.compare(0, tentFlag.size(), tentFlag) == 0) { 936 /* Discard "tentative" */ 937 line.erase(0, tentFlag.size()); 938 destEvents = &m_tentativeEvents; 939 } else { 940 destEvents = &m_events; 941 } 942 Event *event(Event::CreateEvent(factory, line)); 943 if (event != NULL) { 944 destEvents->push_back(event); 945 RegisterCallout(*event); 946 } 947 } 948 } 949 950 void 951 CaseFile::Close() 952 { 953 /* 954 * This case is no longer relevant. Clean up our 955 * serialization file, and delete the case. 956 */ 957 syslog(LOG_INFO, "CaseFile(%s,%s) closed - State %s\n", 958 PoolGUIDString().c_str(), VdevGUIDString().c_str(), 959 zpool_state_to_name(VdevState(), VDEV_AUX_NONE)); 960 961 /* 962 * Serialization of a Case with no event data, clears the 963 * Serialization data for that event. 964 */ 965 PurgeEvents(); 966 Serialize(); 967 968 delete this; 969 } 970 971 void 972 CaseFile::OnGracePeriodEnded() 973 { 974 bool should_fault, should_degrade; 975 ZpoolList zpl(ZpoolList::ZpoolByGUID, &m_poolGUID); 976 zpool_handle_t *zhp(zpl.empty() ? NULL : zpl.front()); 977 978 m_events.splice(m_events.begin(), m_tentativeEvents); 979 should_fault = ShouldFault(); 980 should_degrade = ShouldDegrade(); 981 982 if (should_fault || should_degrade) { 983 if (zhp == NULL 984 || (VdevIterator(zhp).Find(m_vdevGUID)) == NULL) { 985 /* 986 * Either the pool no longer exists 987 * or this vdev is no longer a member of 988 * the pool. 989 */ 990 Close(); 991 return; 992 } 993 994 } 995 996 /* A fault condition has priority over a degrade condition */ 997 if (ShouldFault()) { 998 /* Fault the vdev and close the case. */ 999 if (zpool_vdev_fault(zhp, (uint64_t)m_vdevGUID, 1000 VDEV_AUX_ERR_EXCEEDED) == 0) { 1001 syslog(LOG_INFO, "Faulting vdev(%s/%s)", 1002 PoolGUIDString().c_str(), 1003 VdevGUIDString().c_str()); 1004 Close(); 1005 return; 1006 } 1007 else { 1008 syslog(LOG_ERR, "Fault vdev(%s/%s): %s: %s\n", 1009 PoolGUIDString().c_str(), 1010 VdevGUIDString().c_str(), 1011 libzfs_error_action(g_zfsHandle), 1012 libzfs_error_description(g_zfsHandle)); 1013 } 1014 } 1015 else if (ShouldDegrade()) { 1016 /* Degrade the vdev and close the case. */ 1017 if (zpool_vdev_degrade(zhp, (uint64_t)m_vdevGUID, 1018 VDEV_AUX_ERR_EXCEEDED) == 0) { 1019 syslog(LOG_INFO, "Degrading vdev(%s/%s)", 1020 PoolGUIDString().c_str(), 1021 VdevGUIDString().c_str()); 1022 Close(); 1023 return; 1024 } 1025 else { 1026 syslog(LOG_ERR, "Degrade vdev(%s/%s): %s: %s\n", 1027 PoolGUIDString().c_str(), 1028 VdevGUIDString().c_str(), 1029 libzfs_error_action(g_zfsHandle), 1030 libzfs_error_description(g_zfsHandle)); 1031 } 1032 } 1033 Serialize(); 1034 } 1035 1036 Vdev 1037 CaseFile::BeingReplacedBy(zpool_handle_t *zhp) { 1038 Vdev vd(zhp, CaseVdev(zhp)); 1039 std::list<Vdev> children; 1040 std::list<Vdev>::iterator children_it; 1041 1042 Vdev parent(vd.Parent()); 1043 Vdev replacing(NonexistentVdev); 1044 1045 /* 1046 * To determine whether we are being replaced by another spare that 1047 * is still working, then make sure that it is currently spared and 1048 * that the spare is either resilvering or healthy. If any of these 1049 * conditions fail, then we are not being replaced by a spare. 1050 * 1051 * If the spare is healthy, then the case file should be closed very 1052 * soon after this check. 1053 */ 1054 if (parent.DoesNotExist() 1055 || parent.Name(zhp, /*verbose*/false) != "spare") 1056 return (NonexistentVdev); 1057 1058 children = parent.Children(); 1059 children_it = children.begin(); 1060 for (;children_it != children.end(); children_it++) { 1061 Vdev child = *children_it; 1062 1063 /* Skip our vdev. */ 1064 if (child.GUID() == VdevGUID()) 1065 continue; 1066 /* 1067 * Accept the first child that doesn't match our GUID, or 1068 * any resilvering/healthy device if one exists. 1069 */ 1070 if (replacing.DoesNotExist() || child.IsResilvering() 1071 || child.State() == VDEV_STATE_HEALTHY) 1072 replacing = child; 1073 } 1074 1075 return (replacing); 1076 } 1077 1078 bool 1079 CaseFile::Replace(const char* vdev_type, const char* path, bool isspare) { 1080 nvlist_t *nvroot, *newvd; 1081 const char *poolname; 1082 string oldstr(VdevGUIDString()); 1083 bool retval = true; 1084 1085 /* Figure out what pool we're working on */ 1086 ZpoolList zpl(ZpoolList::ZpoolByGUID, &m_poolGUID); 1087 zpool_handle_t *zhp(zpl.empty() ? NULL : zpl.front()); 1088 if (zhp == NULL) { 1089 syslog(LOG_ERR, "CaseFile::Replace: could not find pool for " 1090 "pool_guid %" PRIu64 ".", (uint64_t)m_poolGUID); 1091 return (false); 1092 } 1093 poolname = zpool_get_name(zhp); 1094 Vdev vd(zhp, CaseVdev(zhp)); 1095 Vdev replaced(BeingReplacedBy(zhp)); 1096 1097 if (isspare && !vd.IsSpare() && !replaced.DoesNotExist()) { 1098 /* If we are already being replaced by a working spare, pass. */ 1099 if (replaced.IsResilvering() 1100 || replaced.State() == VDEV_STATE_HEALTHY) { 1101 syslog(LOG_INFO, "CaseFile::Replace(%s->%s): already " 1102 "replaced", VdevGUIDString().c_str(), path); 1103 return (/*consumed*/false); 1104 } 1105 /* 1106 * If we have already been replaced by a spare, but that spare 1107 * is broken, we must spare the spare, not the original device. 1108 */ 1109 oldstr = replaced.GUIDString(); 1110 syslog(LOG_INFO, "CaseFile::Replace(%s->%s): sparing " 1111 "broken spare %s instead", VdevGUIDString().c_str(), 1112 path, oldstr.c_str()); 1113 } 1114 1115 /* 1116 * Build a root vdev/leaf vdev configuration suitable for 1117 * zpool_vdev_attach. Only enough data for the kernel to find 1118 * the device (i.e. type and disk device node path) are needed. 1119 */ 1120 nvroot = NULL; 1121 newvd = NULL; 1122 1123 if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0 1124 || nvlist_alloc(&newvd, NV_UNIQUE_NAME, 0) != 0) { 1125 syslog(LOG_ERR, "Replace vdev(%s/%s): Unable to allocate " 1126 "configuration data.", poolname, oldstr.c_str()); 1127 if (nvroot != NULL) 1128 nvlist_free(nvroot); 1129 return (false); 1130 } 1131 if (nvlist_add_string(newvd, ZPOOL_CONFIG_TYPE, vdev_type) != 0 1132 || nvlist_add_string(newvd, ZPOOL_CONFIG_PATH, path) != 0 1133 || nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) != 0 1134 || nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 1135 &newvd, 1) != 0) { 1136 syslog(LOG_ERR, "Replace vdev(%s/%s): Unable to initialize " 1137 "configuration data.", poolname, oldstr.c_str()); 1138 nvlist_free(newvd); 1139 nvlist_free(nvroot); 1140 return (true); 1141 } 1142 1143 /* Data was copied when added to the root vdev. */ 1144 nvlist_free(newvd); 1145 1146 retval = (zpool_vdev_attach(zhp, oldstr.c_str(), path, nvroot, 1147 /*replace*/B_TRUE, /*rebuild*/ B_FALSE) == 0); 1148 if (retval) 1149 syslog(LOG_INFO, "Replacing vdev(%s/%s) with %s\n", 1150 poolname, oldstr.c_str(), path); 1151 else 1152 syslog(LOG_ERR, "Replace vdev(%s/%s): %s: %s\n", 1153 poolname, oldstr.c_str(), libzfs_error_action(g_zfsHandle), 1154 libzfs_error_description(g_zfsHandle)); 1155 nvlist_free(nvroot); 1156 1157 return (retval); 1158 } 1159 1160 /* Does the argument event refer to a checksum error? */ 1161 static bool 1162 IsChecksumEvent(const Event* const event) 1163 { 1164 return ("ereport.fs.zfs.checksum" == event->Value("type")); 1165 } 1166 1167 /* Does the argument event refer to an IO error? */ 1168 static bool 1169 IsIOEvent(const Event* const event) 1170 { 1171 return ("ereport.fs.zfs.io" == event->Value("type")); 1172 } 1173 1174 bool 1175 CaseFile::ShouldDegrade() const 1176 { 1177 return (std::count_if(m_events.begin(), m_events.end(), 1178 IsChecksumEvent) > ZFS_DEGRADE_IO_COUNT); 1179 } 1180 1181 bool 1182 CaseFile::ShouldFault() const 1183 { 1184 return (std::count_if(m_events.begin(), m_events.end(), 1185 IsIOEvent) > ZFS_DEGRADE_IO_COUNT); 1186 } 1187 1188 nvlist_t * 1189 CaseFile::CaseVdev(zpool_handle_t *zhp) const 1190 { 1191 return (VdevIterator(zhp).Find(VdevGUID())); 1192 } 1193