1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * FMD Dynamic Reconfiguration (DR) Event Handling 30 * 31 * Fault manager scheme plug-ins must track characteristics of individual 32 * pieces of hardware. As these components can be added or removed by a DR 33 * operation, we need to provide a means by which plug-ins can determine when 34 * they need to re-examine the current configuration. We provide a simple 35 * mechanism whereby this task can be implemented using lazy evaluation: a 36 * simple 64-bit generation counter is maintained and incremented on *any* DR. 37 * Schemes can store the generation number in scheme-specific data structures, 38 * and then revalidate their contents if the current generation number has 39 * changed since the resource information was cached. This method saves time, 40 * avoids the complexity of direct participation in DR, avoids the need for 41 * resource-specific processing of DR events, and is relatively easy to port 42 * to other systems that support dynamic reconfiguration. 43 * 44 * The dr generation is only incremented in response to hardware changes. Since 45 * ASRUs can be in any scheme, including the device scheme, we must also be 46 * aware of software configuration changes which may affect the resource cache. 47 * In addition, we take a snapshot of the topology whenever a reconfiguration 48 * event occurs and notify any modules of the change. 49 */ 50 51 #include <sys/types.h> 52 #include <sys/sunddi.h> 53 #include <sys/sysevent/dr.h> 54 #include <sys/sysevent/eventdefs.h> 55 56 #include <stdio.h> 57 #include <string.h> 58 #include <unistd.h> 59 #include <libsysevent.h> 60 61 #undef MUTEX_HELD 62 #undef RW_READ_HELD 63 #undef RW_WRITE_HELD 64 65 #include <fmd_asru.h> 66 #include <fmd_error.h> 67 #include <fmd_event.h> 68 #include <fmd_fmri.h> 69 #include <fmd_module.h> 70 #include <fmd_subr.h> 71 #include <fmd_topo.h> 72 #include <fmd.h> 73 74 void 75 fmd_dr_event(sysevent_t *sep) 76 { 77 uint64_t gen; 78 fmd_event_t *e; 79 const char *class = sysevent_get_class_name(sep); 80 const char *subclass = sysevent_get_subclass_name(sep); 81 hrtime_t evtime; 82 fmd_topo_t *ftp, *prev; 83 boolean_t update_topo = B_FALSE; 84 85 if (strcmp(class, EC_DR) == 0) { 86 if (strcmp(subclass, ESC_DR_AP_STATE_CHANGE) != 0 && 87 strcmp(subclass, ESC_DR_TARGET_STATE_CHANGE) != 0) 88 return; 89 90 /* 91 * The DR generation is only changed in response to DR events. 92 */ 93 update_topo = B_TRUE; 94 95 (void) pthread_mutex_lock(&fmd.d_stats_lock); 96 gen = fmd.d_stats->ds_dr_gen.fmds_value.ui64++; 97 (void) pthread_mutex_unlock(&fmd.d_stats_lock); 98 99 TRACE((FMD_DBG_XPRT, "dr event %p, gen=%llu", 100 (void *)sep, gen)); 101 } else if (strcmp(class, EC_DEVFS) == 0) { 102 /* 103 * A devfs configuration event can change the topology, 104 * as disk nodes only exist when the device is configured. 105 */ 106 update_topo = B_TRUE; 107 } else if (strcmp(class, EC_ZFS) == 0) { 108 /* 109 * These events can change the resource cache. 110 */ 111 if (strcmp(subclass, ESC_ZFS_VDEV_CLEAR) != 0 && 112 strcmp(subclass, ESC_ZFS_VDEV_REMOVE) != 0 && 113 strcmp(subclass, ESC_ZFS_POOL_DESTROY) != 0) 114 return; 115 } else if (strcmp(class, EC_DEV_ADD) == 0 || 116 strcmp(class, EC_DEV_REMOVE) == 0) { 117 if (strcmp(subclass, ESC_DISK) != 0) 118 return; 119 } 120 121 /* 122 * Take a topo snapshot and notify modules of the change. Picking an 123 * accurate time here is difficult. On one hand, we have the timestamp 124 * of the underlying sysevent, indicating when the reconfiguration event 125 * occurred. On the other hand, we are taking the topo snapshot 126 * asynchronously, and hence the timestamp of the snapshot is the 127 * current time. Pretending this topo snapshot was valid at the time 128 * the sysevent was posted seems wrong, so we instead opt for the 129 * current time as an upper bound on the snapshot validity. 130 * 131 * Along these lines, we keep track of the last time we dispatched a 132 * topo snapshot. If the sysevent occurred before the last topo 133 * snapshot, then don't bother dispatching another topo change event. 134 * We've already indicated (to the best of our ability) the change in 135 * topology. This prevents endless topo snapshots in response to a 136 * flurry of sysevents. 137 */ 138 sysevent_get_time(sep, &evtime); 139 prev = fmd_topo_hold(); 140 if (evtime <= prev->ft_time && 141 fmd.d_clockops == &fmd_timeops_native) { 142 fmd_topo_rele(prev); 143 return; 144 } 145 fmd_topo_rele(prev); 146 147 if (update_topo) 148 fmd_topo_update(); 149 150 ftp = fmd_topo_hold(); 151 e = fmd_event_create(FMD_EVT_TOPO, ftp->ft_time, NULL, ftp); 152 fmd_modhash_dispatch(fmd.d_mod_hash, e); 153 } 154