xref: /titanic_52/usr/src/uts/common/fs/zfs/zio_inject.c (revision e2529962e4cb04b49c12526895f0536d1d46daf6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * ZFS fault injection
30  *
31  * To handle fault injection, we keep track of a series of zinject_record_t
32  * structures which describe which logical block(s) should be injected with a
33  * fault.  These are kept in a global list.  Each record corresponds to a given
34  * spa_t and maintains a special hold on the spa_t so that it cannot be deleted
35  * or exported while the injection record exists.
36  *
37  * Device level injection is done using the 'zi_guid' field.  If this is set, it
38  * means that the error is destined for a particular device, not a piece of
39  * data.
40  *
41  * This is a rather poor data structure and algorithm, but we don't expect more
42  * than a few faults at any one time, so it should be sufficient for our needs.
43  */
44 
45 #include <sys/arc.h>
46 #include <sys/zio_impl.h>
47 #include <sys/zfs_ioctl.h>
48 #include <sys/spa_impl.h>
49 #include <sys/vdev_impl.h>
50 #include <sys/fs/zfs.h>
51 
52 uint32_t zio_injection_enabled;
53 
54 typedef struct inject_handler {
55 	int			zi_id;
56 	spa_t			*zi_spa;
57 	zinject_record_t	zi_record;
58 	list_node_t		zi_link;
59 } inject_handler_t;
60 
61 static list_t inject_handlers;
62 static krwlock_t inject_lock;
63 static int inject_next_id = 1;
64 
65 /*
66  * Returns true if the given record matches the I/O in progress.
67  */
68 static boolean_t
69 zio_match_handler(zbookmark_t *zb, uint64_t type,
70     zinject_record_t *record, int error)
71 {
72 	/*
73 	 * Check for a match against the MOS, which is based on type
74 	 */
75 	if (zb->zb_objset == 0 && record->zi_objset == 0 &&
76 	    record->zi_object == 0) {
77 		if (record->zi_type == DMU_OT_NONE ||
78 		    type == record->zi_type)
79 			return (record->zi_freq == 0 ||
80 			    spa_get_random(100) < record->zi_freq);
81 		else
82 			return (B_FALSE);
83 	}
84 
85 	/*
86 	 * Check for an exact match.
87 	 */
88 	if (zb->zb_objset == record->zi_objset &&
89 	    zb->zb_object == record->zi_object &&
90 	    zb->zb_level == record->zi_level &&
91 	    zb->zb_blkid >= record->zi_start &&
92 	    zb->zb_blkid <= record->zi_end &&
93 	    error == record->zi_error)
94 		return (record->zi_freq == 0 ||
95 		    spa_get_random(100) < record->zi_freq);
96 
97 	return (B_FALSE);
98 }
99 
100 /*
101  * Determine if the I/O in question should return failure.  Returns the errno
102  * to be returned to the caller.
103  */
104 int
105 zio_handle_fault_injection(zio_t *zio, int error)
106 {
107 	int ret = 0;
108 	inject_handler_t *handler;
109 
110 	/*
111 	 * Ignore I/O not associated with any logical data.
112 	 */
113 	if (zio->io_logical == NULL)
114 		return (0);
115 
116 	/*
117 	 * Currently, we only support fault injection on reads.
118 	 */
119 	if (zio->io_type != ZIO_TYPE_READ)
120 		return (0);
121 
122 	rw_enter(&inject_lock, RW_READER);
123 
124 	for (handler = list_head(&inject_handlers); handler != NULL;
125 	    handler = list_next(&inject_handlers, handler)) {
126 
127 		/* Ignore errors not destined for this pool */
128 		if (zio->io_spa != handler->zi_spa)
129 			continue;
130 
131 		/* Ignore device errors */
132 		if (handler->zi_record.zi_guid != 0)
133 			continue;
134 
135 		/* If this handler matches, return EIO */
136 		if (zio_match_handler(&zio->io_logical->io_bookmark,
137 		    zio->io_bp ? BP_GET_TYPE(zio->io_bp) : DMU_OT_NONE,
138 		    &handler->zi_record, error)) {
139 			ret = error;
140 			break;
141 		}
142 	}
143 
144 	rw_exit(&inject_lock);
145 
146 	return (ret);
147 }
148 
149 /*
150  * Determine if the zio is part of a label update and has an injection
151  * handler associated with that portion of the label. Currently, we
152  * allow error injection in either the nvlist or the uberblock region of
153  * of the vdev label.
154  */
155 int
156 zio_handle_label_injection(zio_t *zio, int error)
157 {
158 	inject_handler_t *handler;
159 	vdev_t *vd = zio->io_vd;
160 	uint64_t offset = zio->io_offset;
161 	int label;
162 	int ret = 0;
163 
164 	/*
165 	 * We only care about physical I/Os.
166 	 */
167 	if (!(zio->io_flags & ZIO_FLAG_PHYSICAL))
168 		return (0);
169 
170 	if (offset + zio->io_size > VDEV_LABEL_START_SIZE &&
171 	    offset < vd->vdev_psize - VDEV_LABEL_END_SIZE)
172 		return (0);
173 
174 	rw_enter(&inject_lock, RW_READER);
175 
176 	for (handler = list_head(&inject_handlers); handler != NULL;
177 	    handler = list_next(&inject_handlers, handler)) {
178 		uint64_t start = handler->zi_record.zi_start;
179 		uint64_t end = handler->zi_record.zi_end;
180 
181 		/* Ignore device only faults */
182 		if (handler->zi_record.zi_start == 0)
183 			continue;
184 
185 		/*
186 		 * The injection region is the relative offsets within a
187 		 * vdev label. We must determine the label which is being
188 		 * updated and adjust our region accordingly.
189 		 */
190 		label = vdev_label_number(vd->vdev_psize, offset);
191 		start = vdev_label_offset(vd->vdev_psize, label, start);
192 		end = vdev_label_offset(vd->vdev_psize, label, end);
193 
194 		if (zio->io_vd->vdev_guid == handler->zi_record.zi_guid &&
195 		    (offset >= start && offset <= end)) {
196 			ret = error;
197 			break;
198 		}
199 	}
200 	rw_exit(&inject_lock);
201 	return (ret);
202 }
203 
204 
205 int
206 zio_handle_device_injection(vdev_t *vd, int error)
207 {
208 	inject_handler_t *handler;
209 	int ret = 0;
210 
211 	rw_enter(&inject_lock, RW_READER);
212 
213 	for (handler = list_head(&inject_handlers); handler != NULL;
214 	    handler = list_next(&inject_handlers, handler)) {
215 
216 		/* Ignore label specific faults */
217 		if (handler->zi_record.zi_start != 0)
218 			continue;
219 
220 		if (vd->vdev_guid == handler->zi_record.zi_guid) {
221 			if (handler->zi_record.zi_error == error) {
222 				/*
223 				 * For a failed open, pretend like the device
224 				 * has gone away.
225 				 */
226 				if (error == ENXIO)
227 					vd->vdev_stat.vs_aux =
228 					    VDEV_AUX_OPEN_FAILED;
229 				ret = error;
230 				break;
231 			}
232 			if (handler->zi_record.zi_error == ENXIO) {
233 				ret = EIO;
234 				break;
235 			}
236 		}
237 	}
238 
239 	rw_exit(&inject_lock);
240 
241 	return (ret);
242 }
243 
244 /*
245  * Create a new handler for the given record.  We add it to the list, adding
246  * a reference to the spa_t in the process.  We increment zio_injection_enabled,
247  * which is the switch to trigger all fault injection.
248  */
249 int
250 zio_inject_fault(char *name, int flags, int *id, zinject_record_t *record)
251 {
252 	inject_handler_t *handler;
253 	int error;
254 	spa_t *spa;
255 
256 	/*
257 	 * If this is pool-wide metadata, make sure we unload the corresponding
258 	 * spa_t, so that the next attempt to load it will trigger the fault.
259 	 * We call spa_reset() to unload the pool appropriately.
260 	 */
261 	if (flags & ZINJECT_UNLOAD_SPA)
262 		if ((error = spa_reset(name)) != 0)
263 			return (error);
264 
265 	if (!(flags & ZINJECT_NULL)) {
266 		/*
267 		 * spa_inject_ref() will add an injection reference, which will
268 		 * prevent the pool from being removed from the namespace while
269 		 * still allowing it to be unloaded.
270 		 */
271 		if ((spa = spa_inject_addref(name)) == NULL)
272 			return (ENOENT);
273 
274 		handler = kmem_alloc(sizeof (inject_handler_t), KM_SLEEP);
275 
276 		rw_enter(&inject_lock, RW_WRITER);
277 
278 		*id = handler->zi_id = inject_next_id++;
279 		handler->zi_spa = spa;
280 		handler->zi_record = *record;
281 		list_insert_tail(&inject_handlers, handler);
282 		atomic_add_32(&zio_injection_enabled, 1);
283 
284 		rw_exit(&inject_lock);
285 	}
286 
287 	/*
288 	 * Flush the ARC, so that any attempts to read this data will end up
289 	 * going to the ZIO layer.  Note that this is a little overkill, but
290 	 * we don't have the necessary ARC interfaces to do anything else, and
291 	 * fault injection isn't a performance critical path.
292 	 */
293 	if (flags & ZINJECT_FLUSH_ARC)
294 		arc_flush(NULL);
295 
296 	return (0);
297 }
298 
299 /*
300  * Returns the next record with an ID greater than that supplied to the
301  * function.  Used to iterate over all handlers in the system.
302  */
303 int
304 zio_inject_list_next(int *id, char *name, size_t buflen,
305     zinject_record_t *record)
306 {
307 	inject_handler_t *handler;
308 	int ret;
309 
310 	mutex_enter(&spa_namespace_lock);
311 	rw_enter(&inject_lock, RW_READER);
312 
313 	for (handler = list_head(&inject_handlers); handler != NULL;
314 	    handler = list_next(&inject_handlers, handler))
315 		if (handler->zi_id > *id)
316 			break;
317 
318 	if (handler) {
319 		*record = handler->zi_record;
320 		*id = handler->zi_id;
321 		(void) strncpy(name, spa_name(handler->zi_spa), buflen);
322 		ret = 0;
323 	} else {
324 		ret = ENOENT;
325 	}
326 
327 	rw_exit(&inject_lock);
328 	mutex_exit(&spa_namespace_lock);
329 
330 	return (ret);
331 }
332 
333 /*
334  * Clear the fault handler with the given identifier, or return ENOENT if none
335  * exists.
336  */
337 int
338 zio_clear_fault(int id)
339 {
340 	inject_handler_t *handler;
341 	int ret;
342 
343 	rw_enter(&inject_lock, RW_WRITER);
344 
345 	for (handler = list_head(&inject_handlers); handler != NULL;
346 	    handler = list_next(&inject_handlers, handler))
347 		if (handler->zi_id == id)
348 			break;
349 
350 	if (handler == NULL) {
351 		ret = ENOENT;
352 	} else {
353 		list_remove(&inject_handlers, handler);
354 		spa_inject_delref(handler->zi_spa);
355 		kmem_free(handler, sizeof (inject_handler_t));
356 		atomic_add_32(&zio_injection_enabled, -1);
357 		ret = 0;
358 	}
359 
360 	rw_exit(&inject_lock);
361 
362 	return (ret);
363 }
364 
365 void
366 zio_inject_init(void)
367 {
368 	list_create(&inject_handlers, sizeof (inject_handler_t),
369 	    offsetof(inject_handler_t, zi_link));
370 }
371 
372 void
373 zio_inject_fini(void)
374 {
375 	list_destroy(&inject_handlers);
376 }
377