xref: /titanic_50/usr/src/uts/common/fs/zfs/metaslab.c (revision 0e42dee69ed771bf604dd1789fca9d77b5bbe302)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/zfs_context.h>
29 #include <sys/spa_impl.h>
30 #include <sys/dmu.h>
31 #include <sys/dmu_tx.h>
32 #include <sys/space_map.h>
33 #include <sys/metaslab_impl.h>
34 #include <sys/vdev_impl.h>
35 #include <sys/zio.h>
36 
37 /*
38  * ==========================================================================
39  * Metaslab classes
40  * ==========================================================================
41  */
42 metaslab_class_t *
43 metaslab_class_create(void)
44 {
45 	metaslab_class_t *mc;
46 
47 	mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
48 
49 	mc->mc_rotor = NULL;
50 
51 	return (mc);
52 }
53 
54 void
55 metaslab_class_destroy(metaslab_class_t *mc)
56 {
57 	metaslab_group_t *mg;
58 
59 	while ((mg = mc->mc_rotor) != NULL) {
60 		metaslab_class_remove(mc, mg);
61 		metaslab_group_destroy(mg);
62 	}
63 
64 	kmem_free(mc, sizeof (metaslab_class_t));
65 }
66 
67 void
68 metaslab_class_add(metaslab_class_t *mc, metaslab_group_t *mg)
69 {
70 	metaslab_group_t *mgprev, *mgnext;
71 
72 	ASSERT(mg->mg_class == NULL);
73 
74 	if ((mgprev = mc->mc_rotor) == NULL) {
75 		mg->mg_prev = mg;
76 		mg->mg_next = mg;
77 	} else {
78 		mgnext = mgprev->mg_next;
79 		mg->mg_prev = mgprev;
80 		mg->mg_next = mgnext;
81 		mgprev->mg_next = mg;
82 		mgnext->mg_prev = mg;
83 	}
84 	mc->mc_rotor = mg;
85 	mg->mg_class = mc;
86 }
87 
88 void
89 metaslab_class_remove(metaslab_class_t *mc, metaslab_group_t *mg)
90 {
91 	metaslab_group_t *mgprev, *mgnext;
92 
93 	ASSERT(mg->mg_class == mc);
94 
95 	mgprev = mg->mg_prev;
96 	mgnext = mg->mg_next;
97 
98 	if (mg == mgnext) {
99 		mc->mc_rotor = NULL;
100 	} else {
101 		mc->mc_rotor = mgnext;
102 		mgprev->mg_next = mgnext;
103 		mgnext->mg_prev = mgprev;
104 	}
105 
106 	mg->mg_prev = NULL;
107 	mg->mg_next = NULL;
108 	mg->mg_class = NULL;
109 }
110 
111 /*
112  * ==========================================================================
113  * Metaslab groups
114  * ==========================================================================
115  */
116 static int
117 metaslab_compare(const void *x1, const void *x2)
118 {
119 	const metaslab_t *m1 = x1;
120 	const metaslab_t *m2 = x2;
121 
122 	if (m1->ms_weight < m2->ms_weight)
123 		return (1);
124 	if (m1->ms_weight > m2->ms_weight)
125 		return (-1);
126 
127 	/*
128 	 * If the weights are identical, use the offset to force uniqueness.
129 	 */
130 	if (m1->ms_map.sm_start < m2->ms_map.sm_start)
131 		return (-1);
132 	if (m1->ms_map.sm_start > m2->ms_map.sm_start)
133 		return (1);
134 
135 	ASSERT3P(m1, ==, m2);
136 
137 	return (0);
138 }
139 
140 metaslab_group_t *
141 metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
142 {
143 	metaslab_group_t *mg;
144 
145 	mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
146 	mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
147 	avl_create(&mg->mg_metaslab_tree, metaslab_compare,
148 	    sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
149 	mg->mg_aliquot = 2ULL << 20;		/* XXX -- tweak me */
150 	mg->mg_vd = vd;
151 	metaslab_class_add(mc, mg);
152 
153 	return (mg);
154 }
155 
156 void
157 metaslab_group_destroy(metaslab_group_t *mg)
158 {
159 	avl_destroy(&mg->mg_metaslab_tree);
160 	mutex_destroy(&mg->mg_lock);
161 	kmem_free(mg, sizeof (metaslab_group_t));
162 }
163 
164 static void
165 metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
166 {
167 	mutex_enter(&mg->mg_lock);
168 	ASSERT(msp->ms_group == NULL);
169 	msp->ms_group = mg;
170 	msp->ms_weight = 0;
171 	avl_add(&mg->mg_metaslab_tree, msp);
172 	mutex_exit(&mg->mg_lock);
173 }
174 
175 static void
176 metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
177 {
178 	mutex_enter(&mg->mg_lock);
179 	ASSERT(msp->ms_group == mg);
180 	avl_remove(&mg->mg_metaslab_tree, msp);
181 	msp->ms_group = NULL;
182 	mutex_exit(&mg->mg_lock);
183 }
184 
185 static void
186 metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
187 {
188 	ASSERT(MUTEX_HELD(&msp->ms_lock));
189 
190 	mutex_enter(&mg->mg_lock);
191 	ASSERT(msp->ms_group == mg);
192 	avl_remove(&mg->mg_metaslab_tree, msp);
193 	msp->ms_weight = weight;
194 	avl_add(&mg->mg_metaslab_tree, msp);
195 	mutex_exit(&mg->mg_lock);
196 }
197 
198 /*
199  * ==========================================================================
200  * The first-fit block allocator
201  * ==========================================================================
202  */
203 static void
204 metaslab_ff_load(space_map_t *sm)
205 {
206 	ASSERT(sm->sm_ppd == NULL);
207 	sm->sm_ppd = kmem_zalloc(64 * sizeof (uint64_t), KM_SLEEP);
208 }
209 
210 static void
211 metaslab_ff_unload(space_map_t *sm)
212 {
213 	kmem_free(sm->sm_ppd, 64 * sizeof (uint64_t));
214 	sm->sm_ppd = NULL;
215 }
216 
217 static uint64_t
218 metaslab_ff_alloc(space_map_t *sm, uint64_t size)
219 {
220 	avl_tree_t *t = &sm->sm_root;
221 	uint64_t align = size & -size;
222 	uint64_t *cursor = (uint64_t *)sm->sm_ppd + highbit(align) - 1;
223 	space_seg_t *ss, ssearch;
224 	avl_index_t where;
225 
226 	ssearch.ss_start = *cursor;
227 	ssearch.ss_end = *cursor + size;
228 
229 	ss = avl_find(t, &ssearch, &where);
230 	if (ss == NULL)
231 		ss = avl_nearest(t, where, AVL_AFTER);
232 
233 	while (ss != NULL) {
234 		uint64_t offset = P2ROUNDUP(ss->ss_start, align);
235 
236 		if (offset + size <= ss->ss_end) {
237 			*cursor = offset + size;
238 			return (offset);
239 		}
240 		ss = AVL_NEXT(t, ss);
241 	}
242 
243 	/*
244 	 * If we know we've searched the whole map (*cursor == 0), give up.
245 	 * Otherwise, reset the cursor to the beginning and try again.
246 	 */
247 	if (*cursor == 0)
248 		return (-1ULL);
249 
250 	*cursor = 0;
251 	return (metaslab_ff_alloc(sm, size));
252 }
253 
254 /* ARGSUSED */
255 static void
256 metaslab_ff_claim(space_map_t *sm, uint64_t start, uint64_t size)
257 {
258 	/* No need to update cursor */
259 }
260 
261 /* ARGSUSED */
262 static void
263 metaslab_ff_free(space_map_t *sm, uint64_t start, uint64_t size)
264 {
265 	/* No need to update cursor */
266 }
267 
268 static space_map_ops_t metaslab_ff_ops = {
269 	metaslab_ff_load,
270 	metaslab_ff_unload,
271 	metaslab_ff_alloc,
272 	metaslab_ff_claim,
273 	metaslab_ff_free
274 };
275 
276 /*
277  * ==========================================================================
278  * Metaslabs
279  * ==========================================================================
280  */
281 metaslab_t *
282 metaslab_init(metaslab_group_t *mg, space_map_obj_t *smo,
283 	uint64_t start, uint64_t size, uint64_t txg)
284 {
285 	vdev_t *vd = mg->mg_vd;
286 	metaslab_t *msp;
287 
288 	msp = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
289 
290 	msp->ms_smo_syncing = *smo;
291 
292 	/*
293 	 * We create the main space map here, but we don't create the
294 	 * allocmaps and freemaps until metaslab_sync_done().  This serves
295 	 * two purposes: it allows metaslab_sync_done() to detect the
296 	 * addition of new space; and for debugging, it ensures that we'd
297 	 * data fault on any attempt to use this metaslab before it's ready.
298 	 */
299 	space_map_create(&msp->ms_map, start, size,
300 	    vd->vdev_ashift, &msp->ms_lock);
301 
302 	metaslab_group_add(mg, msp);
303 
304 	/*
305 	 * If we're opening an existing pool (txg == 0) or creating
306 	 * a new one (txg == TXG_INITIAL), all space is available now.
307 	 * If we're adding space to an existing pool, the new space
308 	 * does not become available until after this txg has synced.
309 	 */
310 	if (txg <= TXG_INITIAL)
311 		metaslab_sync_done(msp, 0);
312 
313 	if (txg != 0) {
314 		/*
315 		 * The vdev is dirty, but the metaslab isn't -- it just needs
316 		 * to have metaslab_sync_done() invoked from vdev_sync_done().
317 		 * [We could just dirty the metaslab, but that would cause us
318 		 * to allocate a space map object for it, which is wasteful
319 		 * and would mess up the locality logic in metaslab_weight().]
320 		 */
321 		ASSERT(TXG_CLEAN(txg) == spa_last_synced_txg(vd->vdev_spa));
322 		vdev_dirty(vd, 0, NULL, txg);
323 		vdev_dirty(vd, VDD_METASLAB, msp, TXG_CLEAN(txg));
324 	}
325 
326 	return (msp);
327 }
328 
329 void
330 metaslab_fini(metaslab_t *msp)
331 {
332 	metaslab_group_t *mg = msp->ms_group;
333 	int t;
334 
335 	vdev_space_update(mg->mg_vd, -msp->ms_map.sm_size,
336 	    -msp->ms_smo.smo_alloc);
337 
338 	metaslab_group_remove(mg, msp);
339 
340 	mutex_enter(&msp->ms_lock);
341 
342 	space_map_unload(&msp->ms_map);
343 	space_map_destroy(&msp->ms_map);
344 
345 	for (t = 0; t < TXG_SIZE; t++) {
346 		space_map_destroy(&msp->ms_allocmap[t]);
347 		space_map_destroy(&msp->ms_freemap[t]);
348 	}
349 
350 	mutex_exit(&msp->ms_lock);
351 
352 	kmem_free(msp, sizeof (metaslab_t));
353 }
354 
355 #define	METASLAB_WEIGHT_PRIMARY		(1ULL << 63)
356 #define	METASLAB_WEIGHT_SECONDARY	(1ULL << 62)
357 #define	METASLAB_ACTIVE_MASK		\
358 	(METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
359 #define	METASLAB_SMO_BONUS_MULTIPLIER	2
360 
361 static uint64_t
362 metaslab_weight(metaslab_t *msp)
363 {
364 	metaslab_group_t *mg = msp->ms_group;
365 	space_map_t *sm = &msp->ms_map;
366 	space_map_obj_t *smo = &msp->ms_smo;
367 	vdev_t *vd = mg->mg_vd;
368 	uint64_t weight, space;
369 
370 	ASSERT(MUTEX_HELD(&msp->ms_lock));
371 
372 	/*
373 	 * The baseline weight is the metaslab's free space.
374 	 */
375 	space = sm->sm_size - smo->smo_alloc;
376 	weight = space;
377 
378 	/*
379 	 * Modern disks have uniform bit density and constant angular velocity.
380 	 * Therefore, the outer recording zones are faster (higher bandwidth)
381 	 * than the inner zones by the ratio of outer to inner track diameter,
382 	 * which is typically around 2:1.  We account for this by assigning
383 	 * higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
384 	 * In effect, this means that we'll select the metaslab with the most
385 	 * free bandwidth rather than simply the one with the most free space.
386 	 */
387 	weight = 2 * weight -
388 	    ((sm->sm_start >> vd->vdev_ms_shift) * weight) / vd->vdev_ms_count;
389 	ASSERT(weight >= space && weight <= 2 * space);
390 
391 	/*
392 	 * For locality, assign higher weight to metaslabs we've used before.
393 	 */
394 	if (smo->smo_object != 0)
395 		weight *= METASLAB_SMO_BONUS_MULTIPLIER;
396 	ASSERT(weight >= space &&
397 	    weight <= 2 * METASLAB_SMO_BONUS_MULTIPLIER * space);
398 
399 	/*
400 	 * If this metaslab is one we're actively using, adjust its weight to
401 	 * make it preferable to any inactive metaslab so we'll polish it off.
402 	 */
403 	weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
404 
405 	return (weight);
406 }
407 
408 static int
409 metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
410 {
411 	space_map_t *sm = &msp->ms_map;
412 
413 	ASSERT(MUTEX_HELD(&msp->ms_lock));
414 
415 	if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
416 		int error = space_map_load(sm, &metaslab_ff_ops,
417 		    SM_FREE, &msp->ms_smo,
418 		    msp->ms_group->mg_vd->vdev_spa->spa_meta_objset);
419 		if (error) {
420 			metaslab_group_sort(msp->ms_group, msp, 0);
421 			return (error);
422 		}
423 		metaslab_group_sort(msp->ms_group, msp,
424 		    msp->ms_weight | activation_weight);
425 	}
426 	ASSERT(sm->sm_loaded);
427 	ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
428 
429 	return (0);
430 }
431 
432 static void
433 metaslab_passivate(metaslab_t *msp, uint64_t size)
434 {
435 	metaslab_group_sort(msp->ms_group, msp, MIN(msp->ms_weight, size));
436 	ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
437 }
438 
439 /*
440  * Write a metaslab to disk in the context of the specified transaction group.
441  */
442 void
443 metaslab_sync(metaslab_t *msp, uint64_t txg)
444 {
445 	vdev_t *vd = msp->ms_group->mg_vd;
446 	spa_t *spa = vd->vdev_spa;
447 	objset_t *mos = spa->spa_meta_objset;
448 	space_map_t *allocmap = &msp->ms_allocmap[txg & TXG_MASK];
449 	space_map_t *freemap = &msp->ms_freemap[txg & TXG_MASK];
450 	space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
451 	space_map_t *sm = &msp->ms_map;
452 	space_map_obj_t *smo = &msp->ms_smo_syncing;
453 	dmu_buf_t *db;
454 	dmu_tx_t *tx;
455 	int t;
456 
457 	tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
458 
459 	/*
460 	 * The only state that can actually be changing concurrently with
461 	 * metaslab_sync() is the metaslab's ms_map.  No other thread can
462 	 * be modifying this txg's allocmap, freemap, freed_map, or smo.
463 	 * Therefore, we only hold ms_lock to satify space_map ASSERTs.
464 	 * We drop it whenever we call into the DMU, because the DMU
465 	 * can call down to us (e.g. via zio_free()) at any time.
466 	 */
467 	mutex_enter(&msp->ms_lock);
468 
469 	if (smo->smo_object == 0) {
470 		ASSERT(smo->smo_objsize == 0);
471 		ASSERT(smo->smo_alloc == 0);
472 		mutex_exit(&msp->ms_lock);
473 		smo->smo_object = dmu_object_alloc(mos,
474 		    DMU_OT_SPACE_MAP, 1 << SPACE_MAP_BLOCKSHIFT,
475 		    DMU_OT_SPACE_MAP_HEADER, sizeof (*smo), tx);
476 		ASSERT(smo->smo_object != 0);
477 		dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
478 		    (sm->sm_start >> vd->vdev_ms_shift),
479 		    sizeof (uint64_t), &smo->smo_object, tx);
480 		mutex_enter(&msp->ms_lock);
481 	}
482 
483 	space_map_walk(freemap, space_map_add, freed_map);
484 
485 	if (sm->sm_loaded && spa_sync_pass(spa) == 1 && smo->smo_objsize >=
486 	    2 * sizeof (uint64_t) * avl_numnodes(&sm->sm_root)) {
487 		/*
488 		 * The in-core space map representation is twice as compact
489 		 * as the on-disk one, so it's time to condense the latter
490 		 * by generating a pure allocmap from first principles.
491 		 *
492 		 * This metaslab is 100% allocated,
493 		 * minus the content of the in-core map (sm),
494 		 * minus what's been freed this txg (freed_map),
495 		 * minus allocations from txgs in the future
496 		 * (because they haven't been committed yet).
497 		 */
498 		space_map_vacate(allocmap, NULL, NULL);
499 		space_map_vacate(freemap, NULL, NULL);
500 
501 		space_map_add(allocmap, allocmap->sm_start, allocmap->sm_size);
502 
503 		space_map_walk(sm, space_map_remove, allocmap);
504 		space_map_walk(freed_map, space_map_remove, allocmap);
505 
506 		for (t = 1; t < TXG_CONCURRENT_STATES; t++)
507 			space_map_walk(&msp->ms_allocmap[(txg + t) & TXG_MASK],
508 			    space_map_remove, allocmap);
509 
510 		mutex_exit(&msp->ms_lock);
511 		space_map_truncate(smo, mos, tx);
512 		mutex_enter(&msp->ms_lock);
513 	}
514 
515 	space_map_sync(allocmap, SM_ALLOC, smo, mos, tx);
516 	space_map_sync(freemap, SM_FREE, smo, mos, tx);
517 
518 	mutex_exit(&msp->ms_lock);
519 
520 	VERIFY(0 == dmu_bonus_hold(mos, smo->smo_object, FTAG, &db));
521 	dmu_buf_will_dirty(db, tx);
522 	ASSERT3U(db->db_size, ==, sizeof (*smo));
523 	bcopy(smo, db->db_data, db->db_size);
524 	dmu_buf_rele(db, FTAG);
525 
526 	dmu_tx_commit(tx);
527 }
528 
529 /*
530  * Called after a transaction group has completely synced to mark
531  * all of the metaslab's free space as usable.
532  */
533 void
534 metaslab_sync_done(metaslab_t *msp, uint64_t txg)
535 {
536 	space_map_obj_t *smo = &msp->ms_smo;
537 	space_map_obj_t *smosync = &msp->ms_smo_syncing;
538 	space_map_t *sm = &msp->ms_map;
539 	space_map_t *freed_map = &msp->ms_freemap[TXG_CLEAN(txg) & TXG_MASK];
540 	metaslab_group_t *mg = msp->ms_group;
541 	vdev_t *vd = mg->mg_vd;
542 	int t;
543 
544 	mutex_enter(&msp->ms_lock);
545 
546 	/*
547 	 * If this metaslab is just becoming available, initialize its
548 	 * allocmaps and freemaps and add its capacity to the vdev.
549 	 */
550 	if (freed_map->sm_size == 0) {
551 		for (t = 0; t < TXG_SIZE; t++) {
552 			space_map_create(&msp->ms_allocmap[t], sm->sm_start,
553 			    sm->sm_size, sm->sm_shift, sm->sm_lock);
554 			space_map_create(&msp->ms_freemap[t], sm->sm_start,
555 			    sm->sm_size, sm->sm_shift, sm->sm_lock);
556 		}
557 		vdev_space_update(vd, sm->sm_size, 0);
558 	}
559 
560 	vdev_space_update(vd, 0, smosync->smo_alloc - smo->smo_alloc);
561 
562 	ASSERT(msp->ms_allocmap[txg & TXG_MASK].sm_space == 0);
563 	ASSERT(msp->ms_freemap[txg & TXG_MASK].sm_space == 0);
564 
565 	/*
566 	 * If there's a space_map_load() in progress, wait for it to complete
567 	 * so that we have a consistent view of the in-core space map.
568 	 * Then, add everything we freed in this txg to the map.
569 	 */
570 	space_map_load_wait(sm);
571 	space_map_vacate(freed_map, sm->sm_loaded ? space_map_free : NULL, sm);
572 
573 	*smo = *smosync;
574 
575 	/*
576 	 * If the map is loaded but no longer active, evict it as soon as all
577 	 * future allocations have synced.  (If we unloaded it now and then
578 	 * loaded a moment later, the map wouldn't reflect those allocations.)
579 	 */
580 	if (sm->sm_loaded && (msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
581 		int evictable = 1;
582 
583 		for (t = 1; t < TXG_CONCURRENT_STATES; t++)
584 			if (msp->ms_allocmap[(txg + t) & TXG_MASK].sm_space)
585 				evictable = 0;
586 
587 		if (evictable)
588 			space_map_unload(sm);
589 	}
590 
591 	metaslab_group_sort(mg, msp, metaslab_weight(msp));
592 
593 	mutex_exit(&msp->ms_lock);
594 }
595 
596 static uint64_t
597 metaslab_distance(metaslab_t *msp, dva_t *dva)
598 {
599 	uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
600 	uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
601 	uint64_t start = msp->ms_map.sm_start >> ms_shift;
602 
603 	if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
604 		return (1ULL << 63);
605 
606 	if (offset < start)
607 		return ((start - offset) << ms_shift);
608 	if (offset > start)
609 		return ((offset - start) << ms_shift);
610 	return (0);
611 }
612 
613 static uint64_t
614 metaslab_group_alloc(metaslab_group_t *mg, uint64_t size, uint64_t txg,
615     uint64_t min_distance, dva_t *dva, int d)
616 {
617 	metaslab_t *msp = NULL;
618 	uint64_t offset = -1ULL;
619 	avl_tree_t *t = &mg->mg_metaslab_tree;
620 	uint64_t activation_weight;
621 	uint64_t target_distance;
622 	int i;
623 
624 	activation_weight = METASLAB_WEIGHT_PRIMARY;
625 	for (i = 0; i < d; i++)
626 		if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id)
627 			activation_weight = METASLAB_WEIGHT_SECONDARY;
628 
629 	for (;;) {
630 		mutex_enter(&mg->mg_lock);
631 		for (msp = avl_first(t); msp; msp = AVL_NEXT(t, msp)) {
632 			if (msp->ms_weight < size) {
633 				mutex_exit(&mg->mg_lock);
634 				return (-1ULL);
635 			}
636 
637 			if (activation_weight == METASLAB_WEIGHT_PRIMARY)
638 				break;
639 
640 			target_distance = min_distance +
641 			    (msp->ms_smo.smo_alloc ? 0 : min_distance >> 1);
642 
643 			for (i = 0; i < d; i++)
644 				if (metaslab_distance(msp, &dva[i]) <
645 				    target_distance)
646 					break;
647 			if (i == d)
648 				break;
649 		}
650 		mutex_exit(&mg->mg_lock);
651 		if (msp == NULL)
652 			return (-1ULL);
653 
654 		mutex_enter(&msp->ms_lock);
655 
656 		if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
657 		    activation_weight == METASLAB_WEIGHT_PRIMARY) {
658 			metaslab_passivate(msp,
659 			    (msp->ms_weight & ~METASLAB_ACTIVE_MASK) /
660 			    METASLAB_SMO_BONUS_MULTIPLIER);
661 			mutex_exit(&msp->ms_lock);
662 			continue;
663 		}
664 
665 		if (metaslab_activate(msp, activation_weight) != 0) {
666 			mutex_exit(&msp->ms_lock);
667 			continue;
668 		}
669 
670 		if ((offset = space_map_alloc(&msp->ms_map, size)) != -1ULL)
671 			break;
672 
673 		metaslab_passivate(msp, size - 1);
674 
675 		mutex_exit(&msp->ms_lock);
676 	}
677 
678 	if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0)
679 		vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
680 
681 	space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size);
682 
683 	mutex_exit(&msp->ms_lock);
684 
685 	return (offset);
686 }
687 
688 /*
689  * Allocate a block for the specified i/o.
690  */
691 static int
692 metaslab_alloc_dva(spa_t *spa, uint64_t psize, dva_t *dva, int d,
693     dva_t *hintdva, uint64_t txg)
694 {
695 	metaslab_group_t *mg, *rotor;
696 	metaslab_class_t *mc;
697 	vdev_t *vd;
698 	int dshift = 3;
699 	int all_zero;
700 	uint64_t offset = -1ULL;
701 	uint64_t asize;
702 	uint64_t distance;
703 
704 	ASSERT(!DVA_IS_VALID(&dva[d]));
705 
706 	mc = spa_metaslab_class_select(spa);
707 
708 	/*
709 	 * Start at the rotor and loop through all mgs until we find something.
710 	 * Note that there's no locking on mc_rotor or mc_allocated because
711 	 * nothing actually breaks if we miss a few updates -- we just won't
712 	 * allocate quite as evenly.  It all balances out over time.
713 	 *
714 	 * If we are doing ditto blocks, try to spread them across consecutive
715 	 * vdevs.  If we're forced to reuse a vdev before we've allocated
716 	 * all of our ditto blocks, then try and spread them out on that
717 	 * vdev as much as possible.  If it turns out to not be possible,
718 	 * gradually lower our standards until anything becomes acceptable.
719 	 * Also, allocating on consecutive vdevs (as opposed to random vdevs)
720 	 * gives us hope of containing our fault domains to something we're
721 	 * able to reason about.  Otherwise, any two top-level vdev failures
722 	 * will guarantee the loss of data.  With consecutive allocation,
723 	 * only two adjacent top-level vdev failures will result in data loss.
724 	 *
725 	 * If we are doing gang blocks (hintdva is non-NULL), try to keep
726 	 * ourselves on the same vdev as our gang block header.  That
727 	 * way, we can hope for locality in vdev_cache, plus it makes our
728 	 * fault domains something tractable.
729 	 */
730 	if (hintdva) {
731 		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
732 		mg = vd->vdev_mg;
733 	} else if (d != 0) {
734 		vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
735 		mg = vd->vdev_mg->mg_next;
736 	} else {
737 		mg = mc->mc_rotor;
738 	}
739 	rotor = mg;
740 
741 top:
742 	all_zero = B_TRUE;
743 	do {
744 		vd = mg->mg_vd;
745 
746 		distance = vd->vdev_asize >> dshift;
747 		if (distance <= (1ULL << vd->vdev_ms_shift))
748 			distance = 0;
749 		else
750 			all_zero = B_FALSE;
751 
752 		asize = vdev_psize_to_asize(vd, psize);
753 		ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
754 
755 		offset = metaslab_group_alloc(mg, asize, txg, distance, dva, d);
756 		if (offset != -1ULL) {
757 			/*
758 			 * If we've just selected this metaslab group,
759 			 * figure out whether the corresponding vdev is
760 			 * over- or under-used relative to the pool,
761 			 * and set an allocation bias to even it out.
762 			 */
763 			if (mc->mc_allocated == 0) {
764 				vdev_stat_t *vs = &vd->vdev_stat;
765 				uint64_t alloc, space;
766 				int64_t vu, su;
767 
768 				alloc = spa_get_alloc(spa);
769 				space = spa_get_space(spa);
770 
771 				/*
772 				 * Determine percent used in units of 0..1024.
773 				 * (This is just to avoid floating point.)
774 				 */
775 				vu = (vs->vs_alloc << 10) / (vs->vs_space + 1);
776 				su = (alloc << 10) / (space + 1);
777 
778 				/*
779 				 * Bias by at most +/- 25% of the aliquot.
780 				 */
781 				mg->mg_bias = ((su - vu) *
782 				    (int64_t)mg->mg_aliquot) / (1024 * 4);
783 			}
784 
785 			if (atomic_add_64_nv(&mc->mc_allocated, asize) >=
786 			    mg->mg_aliquot + mg->mg_bias) {
787 				mc->mc_rotor = mg->mg_next;
788 				mc->mc_allocated = 0;
789 			}
790 
791 			DVA_SET_VDEV(&dva[d], vd->vdev_id);
792 			DVA_SET_OFFSET(&dva[d], offset);
793 			DVA_SET_GANG(&dva[d], 0);
794 			DVA_SET_ASIZE(&dva[d], asize);
795 
796 			return (0);
797 		}
798 		mc->mc_rotor = mg->mg_next;
799 		mc->mc_allocated = 0;
800 	} while ((mg = mg->mg_next) != rotor);
801 
802 	if (!all_zero) {
803 		dshift++;
804 		ASSERT(dshift < 64);
805 		goto top;
806 	}
807 
808 	bzero(&dva[d], sizeof (dva_t));
809 
810 	return (ENOSPC);
811 }
812 
813 /*
814  * Free the block represented by DVA in the context of the specified
815  * transaction group.
816  */
817 static void
818 metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
819 {
820 	uint64_t vdev = DVA_GET_VDEV(dva);
821 	uint64_t offset = DVA_GET_OFFSET(dva);
822 	uint64_t size = DVA_GET_ASIZE(dva);
823 	vdev_t *vd;
824 	metaslab_t *msp;
825 
826 	ASSERT(DVA_IS_VALID(dva));
827 
828 	if (txg > spa_freeze_txg(spa))
829 		return;
830 
831 	if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
832 	    (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
833 		cmn_err(CE_WARN, "metaslab_free_dva(): bad DVA %llu:%llu",
834 		    (u_longlong_t)vdev, (u_longlong_t)offset);
835 		ASSERT(0);
836 		return;
837 	}
838 
839 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
840 
841 	if (DVA_GET_GANG(dva))
842 		size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
843 
844 	mutex_enter(&msp->ms_lock);
845 
846 	if (now) {
847 		space_map_remove(&msp->ms_allocmap[txg & TXG_MASK],
848 		    offset, size);
849 		space_map_free(&msp->ms_map, offset, size);
850 	} else {
851 		if (msp->ms_freemap[txg & TXG_MASK].sm_space == 0)
852 			vdev_dirty(vd, VDD_METASLAB, msp, txg);
853 		space_map_add(&msp->ms_freemap[txg & TXG_MASK], offset, size);
854 	}
855 
856 	mutex_exit(&msp->ms_lock);
857 }
858 
859 /*
860  * Intent log support: upon opening the pool after a crash, notify the SPA
861  * of blocks that the intent log has allocated for immediate write, but
862  * which are still considered free by the SPA because the last transaction
863  * group didn't commit yet.
864  */
865 static int
866 metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
867 {
868 	uint64_t vdev = DVA_GET_VDEV(dva);
869 	uint64_t offset = DVA_GET_OFFSET(dva);
870 	uint64_t size = DVA_GET_ASIZE(dva);
871 	vdev_t *vd;
872 	metaslab_t *msp;
873 	int error;
874 
875 	ASSERT(DVA_IS_VALID(dva));
876 
877 	if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
878 	    (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
879 		return (ENXIO);
880 
881 	msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
882 
883 	if (DVA_GET_GANG(dva))
884 		size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
885 
886 	mutex_enter(&msp->ms_lock);
887 
888 	error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
889 	if (error) {
890 		mutex_exit(&msp->ms_lock);
891 		return (error);
892 	}
893 
894 	if (msp->ms_allocmap[txg & TXG_MASK].sm_space == 0)
895 		vdev_dirty(vd, VDD_METASLAB, msp, txg);
896 
897 	space_map_claim(&msp->ms_map, offset, size);
898 	space_map_add(&msp->ms_allocmap[txg & TXG_MASK], offset, size);
899 
900 	mutex_exit(&msp->ms_lock);
901 
902 	return (0);
903 }
904 
905 int
906 metaslab_alloc(spa_t *spa, uint64_t psize, blkptr_t *bp, int ndvas,
907     uint64_t txg, blkptr_t *hintbp)
908 {
909 	dva_t *dva = bp->blk_dva;
910 	dva_t *hintdva = hintbp->blk_dva;
911 	int d;
912 	int error = 0;
913 
914 	ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
915 	ASSERT(BP_GET_NDVAS(bp) == 0);
916 	ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
917 
918 	for (d = 0; d < ndvas; d++) {
919 		error = metaslab_alloc_dva(spa, psize, dva, d, hintdva, txg);
920 		if (error) {
921 			for (d--; d >= 0; d--) {
922 				metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
923 				bzero(&dva[d], sizeof (dva_t));
924 			}
925 			return (error);
926 		}
927 	}
928 	ASSERT(error == 0);
929 	ASSERT(BP_GET_NDVAS(bp) == ndvas);
930 
931 	return (0);
932 }
933 
934 void
935 metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
936 {
937 	const dva_t *dva = bp->blk_dva;
938 	int ndvas = BP_GET_NDVAS(bp);
939 	int d;
940 
941 	ASSERT(!BP_IS_HOLE(bp));
942 
943 	for (d = 0; d < ndvas; d++)
944 		metaslab_free_dva(spa, &dva[d], txg, now);
945 }
946 
947 int
948 metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
949 {
950 	const dva_t *dva = bp->blk_dva;
951 	int ndvas = BP_GET_NDVAS(bp);
952 	int d, error;
953 	int last_error = 0;
954 
955 	ASSERT(!BP_IS_HOLE(bp));
956 
957 	for (d = 0; d < ndvas; d++)
958 		if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
959 			last_error = error;
960 
961 	return (last_error);
962 }
963