1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25 /*
26 * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
27 * Copyright 2019 Joyent, Inc.
28 */
29
30 #include <sys/zfs_context.h>
31 #include <sys/spa.h>
32 #include <sys/dmu.h>
33 #include <sys/dmu_tx.h>
34 #include <sys/dnode.h>
35 #include <sys/dsl_pool.h>
36 #include <sys/zio.h>
37 #include <sys/space_map.h>
38 #include <sys/spa_log_spacemap.h>
39 #include <sys/refcount.h>
40 #include <sys/zfeature.h>
41
42 /*
43 * Note on space map block size:
44 *
45 * The data for a given space map can be kept on blocks of any size.
46 * Larger blocks entail fewer I/O operations, but they also cause the
47 * DMU to keep more data in-core, and also to waste more I/O bandwidth
48 * when only a few blocks have changed since the last transaction group.
49 */
50
51 /*
52 * Enabled whenever we want to stress test the use of double-word
53 * space map entries.
54 */
55 boolean_t zfs_force_some_double_word_sm_entries = B_FALSE;
56
57 /*
58 * Override the default indirect block size of 128K, instead using 16K for
59 * spacemaps (2^14 bytes). This dramatically reduces write inflation since
60 * appending to a spacemap typically has to write one data block (4KB) and one
61 * or two indirect blocks (16K-32K, rather than 128K).
62 */
63 int space_map_ibs = 14;
64
65 boolean_t
sm_entry_is_debug(uint64_t e)66 sm_entry_is_debug(uint64_t e)
67 {
68 return (SM_PREFIX_DECODE(e) == SM_DEBUG_PREFIX);
69 }
70
71 boolean_t
sm_entry_is_single_word(uint64_t e)72 sm_entry_is_single_word(uint64_t e)
73 {
74 uint8_t prefix = SM_PREFIX_DECODE(e);
75 return (prefix != SM_DEBUG_PREFIX && prefix != SM2_PREFIX);
76 }
77
78 boolean_t
sm_entry_is_double_word(uint64_t e)79 sm_entry_is_double_word(uint64_t e)
80 {
81 return (SM_PREFIX_DECODE(e) == SM2_PREFIX);
82 }
83
84 /*
85 * Iterate through the space map, invoking the callback on each (non-debug)
86 * space map entry. Stop after reading 'end' bytes of the space map.
87 */
88 int
space_map_iterate(space_map_t * sm,uint64_t end,sm_cb_t callback,void * arg)89 space_map_iterate(space_map_t *sm, uint64_t end, sm_cb_t callback, void *arg)
90 {
91 uint64_t blksz = sm->sm_blksz;
92
93 ASSERT3U(blksz, !=, 0);
94 ASSERT3U(end, <=, space_map_length(sm));
95 ASSERT0(P2PHASE(end, sizeof (uint64_t)));
96
97 dmu_prefetch(sm->sm_os, space_map_object(sm), 0, 0, end,
98 ZIO_PRIORITY_SYNC_READ);
99
100 int error = 0;
101 for (uint64_t block_base = 0; block_base < end && error == 0;
102 block_base += blksz) {
103 dmu_buf_t *db;
104 error = dmu_buf_hold(sm->sm_os, space_map_object(sm),
105 block_base, FTAG, &db, DMU_READ_PREFETCH);
106 if (error != 0)
107 return (error);
108
109 uint64_t *block_start = db->db_data;
110 uint64_t block_length = MIN(end - block_base, blksz);
111 uint64_t *block_end = block_start +
112 (block_length / sizeof (uint64_t));
113
114 VERIFY0(P2PHASE(block_length, sizeof (uint64_t)));
115 VERIFY3U(block_length, !=, 0);
116 ASSERT3U(blksz, ==, db->db_size);
117
118 for (uint64_t *block_cursor = block_start;
119 block_cursor < block_end && error == 0; block_cursor++) {
120 uint64_t e = *block_cursor;
121
122 if (sm_entry_is_debug(e)) /* Skip debug entries */
123 continue;
124
125 uint64_t raw_offset, raw_run, vdev_id;
126 maptype_t type;
127 if (sm_entry_is_single_word(e)) {
128 type = SM_TYPE_DECODE(e);
129 vdev_id = SM_NO_VDEVID;
130 raw_offset = SM_OFFSET_DECODE(e);
131 raw_run = SM_RUN_DECODE(e);
132 } else {
133 /* it is a two-word entry */
134 ASSERT(sm_entry_is_double_word(e));
135 raw_run = SM2_RUN_DECODE(e);
136 vdev_id = SM2_VDEV_DECODE(e);
137
138 /* move on to the second word */
139 block_cursor++;
140 e = *block_cursor;
141 VERIFY3P(block_cursor, <=, block_end);
142
143 type = SM2_TYPE_DECODE(e);
144 raw_offset = SM2_OFFSET_DECODE(e);
145 }
146
147 uint64_t entry_offset = (raw_offset << sm->sm_shift) +
148 sm->sm_start;
149 uint64_t entry_run = raw_run << sm->sm_shift;
150
151 VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift));
152 VERIFY0(P2PHASE(entry_run, 1ULL << sm->sm_shift));
153 ASSERT3U(entry_offset, >=, sm->sm_start);
154 ASSERT3U(entry_offset, <, sm->sm_start + sm->sm_size);
155 ASSERT3U(entry_run, <=, sm->sm_size);
156 ASSERT3U(entry_offset + entry_run, <=,
157 sm->sm_start + sm->sm_size);
158
159 space_map_entry_t sme = {
160 .sme_type = type,
161 .sme_vdev = vdev_id,
162 .sme_offset = entry_offset,
163 .sme_run = entry_run
164 };
165 error = callback(&sme, arg);
166 }
167 dmu_buf_rele(db, FTAG);
168 }
169 return (error);
170 }
171
172 /*
173 * Reads the entries from the last block of the space map into
174 * buf in reverse order. Populates nwords with number of words
175 * in the last block.
176 *
177 * Refer to block comment within space_map_incremental_destroy()
178 * to understand why this function is needed.
179 */
180 static int
space_map_reversed_last_block_entries(space_map_t * sm,uint64_t * buf,uint64_t bufsz,uint64_t * nwords)181 space_map_reversed_last_block_entries(space_map_t *sm, uint64_t *buf,
182 uint64_t bufsz, uint64_t *nwords)
183 {
184 int error = 0;
185 dmu_buf_t *db;
186
187 /*
188 * Find the offset of the last word in the space map and use
189 * that to read the last block of the space map with
190 * dmu_buf_hold().
191 */
192 uint64_t last_word_offset =
193 sm->sm_phys->smp_length - sizeof (uint64_t);
194 error = dmu_buf_hold(sm->sm_os, space_map_object(sm), last_word_offset,
195 FTAG, &db, DMU_READ_NO_PREFETCH);
196 if (error != 0)
197 return (error);
198
199 ASSERT3U(sm->sm_object, ==, db->db_object);
200 ASSERT3U(sm->sm_blksz, ==, db->db_size);
201 ASSERT3U(bufsz, >=, db->db_size);
202 ASSERT(nwords != NULL);
203
204 uint64_t *words = db->db_data;
205 *nwords =
206 (sm->sm_phys->smp_length - db->db_offset) / sizeof (uint64_t);
207
208 ASSERT3U(*nwords, <=, bufsz / sizeof (uint64_t));
209
210 uint64_t n = *nwords;
211 uint64_t j = n - 1;
212 for (uint64_t i = 0; i < n; i++) {
213 uint64_t entry = words[i];
214 if (sm_entry_is_double_word(entry)) {
215 /*
216 * Since we are populating the buffer backwards
217 * we have to be extra careful and add the two
218 * words of the double-word entry in the right
219 * order.
220 */
221 ASSERT3U(j, >, 0);
222 buf[j - 1] = entry;
223
224 i++;
225 ASSERT3U(i, <, n);
226 entry = words[i];
227 buf[j] = entry;
228 j -= 2;
229 } else {
230 ASSERT(sm_entry_is_debug(entry) ||
231 sm_entry_is_single_word(entry));
232 buf[j] = entry;
233 j--;
234 }
235 }
236
237 /*
238 * Assert that we wrote backwards all the
239 * way to the beginning of the buffer.
240 */
241 ASSERT3S(j, ==, -1);
242
243 dmu_buf_rele(db, FTAG);
244 return (error);
245 }
246
247 /*
248 * Note: This function performs destructive actions - specifically
249 * it deletes entries from the end of the space map. Thus, callers
250 * should ensure that they are holding the appropriate locks for
251 * the space map that they provide.
252 */
253 int
space_map_incremental_destroy(space_map_t * sm,sm_cb_t callback,void * arg,dmu_tx_t * tx)254 space_map_incremental_destroy(space_map_t *sm, sm_cb_t callback, void *arg,
255 dmu_tx_t *tx)
256 {
257 uint64_t bufsz = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE);
258 uint64_t *buf = zio_buf_alloc(bufsz);
259
260 dmu_buf_will_dirty(sm->sm_dbuf, tx);
261
262 /*
263 * Ideally we would want to iterate from the beginning of the
264 * space map to the end in incremental steps. The issue with this
265 * approach is that we don't have any field on-disk that points
266 * us where to start between each step. We could try zeroing out
267 * entries that we've destroyed, but this doesn't work either as
268 * an entry that is 0 is a valid one (ALLOC for range [0x0:0x200]).
269 *
270 * As a result, we destroy its entries incrementally starting from
271 * the end after applying the callback to each of them.
272 *
273 * The problem with this approach is that we cannot literally
274 * iterate through the words in the space map backwards as we
275 * can't distinguish two-word space map entries from their second
276 * word. Thus we do the following:
277 *
278 * 1] We get all the entries from the last block of the space map
279 * and put them into a buffer in reverse order. This way the
280 * last entry comes first in the buffer, the second to last is
281 * second, etc.
282 * 2] We iterate through the entries in the buffer and we apply
283 * the callback to each one. As we move from entry to entry we
284 * we decrease the size of the space map, deleting effectively
285 * each entry.
286 * 3] If there are no more entries in the space map or the callback
287 * returns a value other than 0, we stop iterating over the
288 * space map. If there are entries remaining and the callback
289 * returned 0, we go back to step [1].
290 */
291 int error = 0;
292 while (space_map_length(sm) > 0 && error == 0) {
293 uint64_t nwords = 0;
294 error = space_map_reversed_last_block_entries(sm, buf, bufsz,
295 &nwords);
296 if (error != 0)
297 break;
298
299 ASSERT3U(nwords, <=, bufsz / sizeof (uint64_t));
300
301 for (uint64_t i = 0; i < nwords; i++) {
302 uint64_t e = buf[i];
303
304 if (sm_entry_is_debug(e)) {
305 sm->sm_phys->smp_length -= sizeof (uint64_t);
306 continue;
307 }
308
309 int words = 1;
310 uint64_t raw_offset, raw_run, vdev_id;
311 maptype_t type;
312 if (sm_entry_is_single_word(e)) {
313 type = SM_TYPE_DECODE(e);
314 vdev_id = SM_NO_VDEVID;
315 raw_offset = SM_OFFSET_DECODE(e);
316 raw_run = SM_RUN_DECODE(e);
317 } else {
318 ASSERT(sm_entry_is_double_word(e));
319 words = 2;
320
321 raw_run = SM2_RUN_DECODE(e);
322 vdev_id = SM2_VDEV_DECODE(e);
323
324 /* move to the second word */
325 i++;
326 e = buf[i];
327
328 ASSERT3P(i, <=, nwords);
329
330 type = SM2_TYPE_DECODE(e);
331 raw_offset = SM2_OFFSET_DECODE(e);
332 }
333
334 uint64_t entry_offset =
335 (raw_offset << sm->sm_shift) + sm->sm_start;
336 uint64_t entry_run = raw_run << sm->sm_shift;
337
338 VERIFY0(P2PHASE(entry_offset, 1ULL << sm->sm_shift));
339 VERIFY0(P2PHASE(entry_run, 1ULL << sm->sm_shift));
340 VERIFY3U(entry_offset, >=, sm->sm_start);
341 VERIFY3U(entry_offset, <, sm->sm_start + sm->sm_size);
342 VERIFY3U(entry_run, <=, sm->sm_size);
343 VERIFY3U(entry_offset + entry_run, <=,
344 sm->sm_start + sm->sm_size);
345
346 space_map_entry_t sme = {
347 .sme_type = type,
348 .sme_vdev = vdev_id,
349 .sme_offset = entry_offset,
350 .sme_run = entry_run
351 };
352 error = callback(&sme, arg);
353 if (error != 0)
354 break;
355
356 if (type == SM_ALLOC)
357 sm->sm_phys->smp_alloc -= entry_run;
358 else
359 sm->sm_phys->smp_alloc += entry_run;
360 sm->sm_phys->smp_length -= words * sizeof (uint64_t);
361 }
362 }
363
364 if (space_map_length(sm) == 0) {
365 ASSERT0(error);
366 ASSERT0(space_map_allocated(sm));
367 }
368
369 zio_buf_free(buf, bufsz);
370 return (error);
371 }
372
373 typedef struct space_map_load_arg {
374 space_map_t *smla_sm;
375 range_tree_t *smla_rt;
376 maptype_t smla_type;
377 } space_map_load_arg_t;
378
379 static int
space_map_load_callback(space_map_entry_t * sme,void * arg)380 space_map_load_callback(space_map_entry_t *sme, void *arg)
381 {
382 space_map_load_arg_t *smla = arg;
383 if (sme->sme_type == smla->smla_type) {
384 VERIFY3U(range_tree_space(smla->smla_rt) + sme->sme_run, <=,
385 smla->smla_sm->sm_size);
386 range_tree_add(smla->smla_rt, sme->sme_offset, sme->sme_run);
387 } else {
388 range_tree_remove(smla->smla_rt, sme->sme_offset, sme->sme_run);
389 }
390
391 return (0);
392 }
393
394 /*
395 * Load the spacemap into the rangetree, like space_map_load. But only
396 * read the first 'length' bytes of the spacemap.
397 */
398 int
space_map_load_length(space_map_t * sm,range_tree_t * rt,maptype_t maptype,uint64_t length)399 space_map_load_length(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
400 uint64_t length)
401 {
402 space_map_load_arg_t smla;
403
404 VERIFY0(range_tree_space(rt));
405
406 if (maptype == SM_FREE)
407 range_tree_add(rt, sm->sm_start, sm->sm_size);
408
409 smla.smla_rt = rt;
410 smla.smla_sm = sm;
411 smla.smla_type = maptype;
412 int err = space_map_iterate(sm, length,
413 space_map_load_callback, &smla);
414
415 if (err != 0)
416 range_tree_vacate(rt, NULL, NULL);
417
418 return (err);
419 }
420
421 /*
422 * Load the space map disk into the specified range tree. Segments of maptype
423 * are added to the range tree, other segment types are removed.
424 */
425 int
space_map_load(space_map_t * sm,range_tree_t * rt,maptype_t maptype)426 space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype)
427 {
428 return (space_map_load_length(sm, rt, maptype, space_map_length(sm)));
429 }
430
431 void
space_map_histogram_clear(space_map_t * sm)432 space_map_histogram_clear(space_map_t *sm)
433 {
434 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
435 return;
436
437 bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram));
438 }
439
440 boolean_t
space_map_histogram_verify(space_map_t * sm,range_tree_t * rt)441 space_map_histogram_verify(space_map_t *sm, range_tree_t *rt)
442 {
443 /*
444 * Verify that the in-core range tree does not have any
445 * ranges smaller than our sm_shift size.
446 */
447 for (int i = 0; i < sm->sm_shift; i++) {
448 if (rt->rt_histogram[i] != 0)
449 return (B_FALSE);
450 }
451 return (B_TRUE);
452 }
453
454 void
space_map_histogram_add(space_map_t * sm,range_tree_t * rt,dmu_tx_t * tx)455 space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
456 {
457 int idx = 0;
458
459 ASSERT(dmu_tx_is_syncing(tx));
460 VERIFY3U(space_map_object(sm), !=, 0);
461
462 if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
463 return;
464
465 dmu_buf_will_dirty(sm->sm_dbuf, tx);
466
467 ASSERT(space_map_histogram_verify(sm, rt));
468 /*
469 * Transfer the content of the range tree histogram to the space
470 * map histogram. The space map histogram contains 32 buckets ranging
471 * between 2^sm_shift to 2^(32+sm_shift-1). The range tree,
472 * however, can represent ranges from 2^0 to 2^63. Since the space
473 * map only cares about allocatable blocks (minimum of sm_shift) we
474 * can safely ignore all ranges in the range tree smaller than sm_shift.
475 */
476 for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
477
478 /*
479 * Since the largest histogram bucket in the space map is
480 * 2^(32+sm_shift-1), we need to normalize the values in
481 * the range tree for any bucket larger than that size. For
482 * example given an sm_shift of 9, ranges larger than 2^40
483 * would get normalized as if they were 1TB ranges. Assume
484 * the range tree had a count of 5 in the 2^44 (16TB) bucket,
485 * the calculation below would normalize this to 5 * 2^4 (16).
486 */
487 ASSERT3U(i, >=, idx + sm->sm_shift);
488 sm->sm_phys->smp_histogram[idx] +=
489 rt->rt_histogram[i] << (i - idx - sm->sm_shift);
490
491 /*
492 * Increment the space map's index as long as we haven't
493 * reached the maximum bucket size. Accumulate all ranges
494 * larger than the max bucket size into the last bucket.
495 */
496 if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
497 ASSERT3U(idx + sm->sm_shift, ==, i);
498 idx++;
499 ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
500 }
501 }
502 }
503
504 static void
space_map_write_intro_debug(space_map_t * sm,maptype_t maptype,dmu_tx_t * tx)505 space_map_write_intro_debug(space_map_t *sm, maptype_t maptype, dmu_tx_t *tx)
506 {
507 dmu_buf_will_dirty(sm->sm_dbuf, tx);
508
509 uint64_t dentry = SM_PREFIX_ENCODE(SM_DEBUG_PREFIX) |
510 SM_DEBUG_ACTION_ENCODE(maptype) |
511 SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(tx->tx_pool->dp_spa)) |
512 SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
513
514 dmu_write(sm->sm_os, space_map_object(sm), sm->sm_phys->smp_length,
515 sizeof (dentry), &dentry, tx);
516
517 sm->sm_phys->smp_length += sizeof (dentry);
518 }
519
520 /*
521 * Writes one or more entries given a segment.
522 *
523 * Note: The function may release the dbuf from the pointer initially
524 * passed to it, and return a different dbuf. Also, the space map's
525 * dbuf must be dirty for the changes in sm_phys to take effect.
526 */
527 static void
space_map_write_seg(space_map_t * sm,uint64_t rstart,uint64_t rend,maptype_t maptype,uint64_t vdev_id,uint8_t words,dmu_buf_t ** dbp,void * tag,dmu_tx_t * tx)528 space_map_write_seg(space_map_t *sm, uint64_t rstart, uint64_t rend,
529 maptype_t maptype, uint64_t vdev_id, uint8_t words, dmu_buf_t **dbp,
530 void *tag, dmu_tx_t *tx)
531 {
532 ASSERT3U(words, !=, 0);
533 ASSERT3U(words, <=, 2);
534
535 /* ensure the vdev_id can be represented by the space map */
536 ASSERT3U(vdev_id, <=, SM_NO_VDEVID);
537
538 /*
539 * if this is a single word entry, ensure that no vdev was
540 * specified.
541 */
542 IMPLY(words == 1, vdev_id == SM_NO_VDEVID);
543
544 dmu_buf_t *db = *dbp;
545 ASSERT3U(db->db_size, ==, sm->sm_blksz);
546
547 uint64_t *block_base = db->db_data;
548 uint64_t *block_end = block_base + (sm->sm_blksz / sizeof (uint64_t));
549 uint64_t *block_cursor = block_base +
550 (sm->sm_phys->smp_length - db->db_offset) / sizeof (uint64_t);
551
552 ASSERT3P(block_cursor, <=, block_end);
553
554 uint64_t size = (rend - rstart) >> sm->sm_shift;
555 uint64_t start = (rstart - sm->sm_start) >> sm->sm_shift;
556 uint64_t run_max = (words == 2) ? SM2_RUN_MAX : SM_RUN_MAX;
557
558 ASSERT3U(rstart, >=, sm->sm_start);
559 ASSERT3U(rstart, <, sm->sm_start + sm->sm_size);
560 ASSERT3U(rend - rstart, <=, sm->sm_size);
561 ASSERT3U(rend, <=, sm->sm_start + sm->sm_size);
562
563 while (size != 0) {
564 ASSERT3P(block_cursor, <=, block_end);
565
566 /*
567 * If we are at the end of this block, flush it and start
568 * writing again from the beginning.
569 */
570 if (block_cursor == block_end) {
571 dmu_buf_rele(db, tag);
572
573 uint64_t next_word_offset = sm->sm_phys->smp_length;
574 VERIFY0(dmu_buf_hold(sm->sm_os,
575 space_map_object(sm), next_word_offset,
576 tag, &db, DMU_READ_PREFETCH));
577 dmu_buf_will_dirty(db, tx);
578
579 /* update caller's dbuf */
580 *dbp = db;
581
582 ASSERT3U(db->db_size, ==, sm->sm_blksz);
583
584 block_base = db->db_data;
585 block_cursor = block_base;
586 block_end = block_base +
587 (db->db_size / sizeof (uint64_t));
588 }
589
590 /*
591 * If we are writing a two-word entry and we only have one
592 * word left on this block, just pad it with an empty debug
593 * entry and write the two-word entry in the next block.
594 */
595 uint64_t *next_entry = block_cursor + 1;
596 if (next_entry == block_end && words > 1) {
597 ASSERT3U(words, ==, 2);
598 *block_cursor = SM_PREFIX_ENCODE(SM_DEBUG_PREFIX) |
599 SM_DEBUG_ACTION_ENCODE(0) |
600 SM_DEBUG_SYNCPASS_ENCODE(0) |
601 SM_DEBUG_TXG_ENCODE(0);
602 block_cursor++;
603 sm->sm_phys->smp_length += sizeof (uint64_t);
604 ASSERT3P(block_cursor, ==, block_end);
605 continue;
606 }
607
608 uint64_t run_len = MIN(size, run_max);
609 switch (words) {
610 case 1:
611 *block_cursor = SM_OFFSET_ENCODE(start) |
612 SM_TYPE_ENCODE(maptype) |
613 SM_RUN_ENCODE(run_len);
614 block_cursor++;
615 break;
616 case 2:
617 /* write the first word of the entry */
618 *block_cursor = SM_PREFIX_ENCODE(SM2_PREFIX) |
619 SM2_RUN_ENCODE(run_len) |
620 SM2_VDEV_ENCODE(vdev_id);
621 block_cursor++;
622
623 /* move on to the second word of the entry */
624 ASSERT3P(block_cursor, <, block_end);
625 *block_cursor = SM2_TYPE_ENCODE(maptype) |
626 SM2_OFFSET_ENCODE(start);
627 block_cursor++;
628 break;
629 default:
630 panic("%d-word space map entries are not supported",
631 words);
632 break;
633 }
634 sm->sm_phys->smp_length += words * sizeof (uint64_t);
635
636 start += run_len;
637 size -= run_len;
638 }
639 ASSERT0(size);
640
641 }
642
643 /*
644 * Note: The space map's dbuf must be dirty for the changes in sm_phys to
645 * take effect.
646 */
647 static void
space_map_write_impl(space_map_t * sm,range_tree_t * rt,maptype_t maptype,uint64_t vdev_id,dmu_tx_t * tx)648 space_map_write_impl(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
649 uint64_t vdev_id, dmu_tx_t *tx)
650 {
651 spa_t *spa = tx->tx_pool->dp_spa;
652 dmu_buf_t *db;
653
654 space_map_write_intro_debug(sm, maptype, tx);
655
656 #ifdef DEBUG
657 /*
658 * We do this right after we write the intro debug entry
659 * because the estimate does not take it into account.
660 */
661 uint64_t initial_objsize = sm->sm_phys->smp_length;
662 uint64_t estimated_growth =
663 space_map_estimate_optimal_size(sm, rt, SM_NO_VDEVID);
664 uint64_t estimated_final_objsize = initial_objsize + estimated_growth;
665 #endif
666
667 /*
668 * Find the offset right after the last word in the space map
669 * and use that to get a hold of the last block, so we can
670 * start appending to it.
671 */
672 uint64_t next_word_offset = sm->sm_phys->smp_length;
673 VERIFY0(dmu_buf_hold(sm->sm_os, space_map_object(sm),
674 next_word_offset, FTAG, &db, DMU_READ_PREFETCH));
675 ASSERT3U(db->db_size, ==, sm->sm_blksz);
676
677 dmu_buf_will_dirty(db, tx);
678
679 zfs_btree_t *t = &rt->rt_root;
680 zfs_btree_index_t where;
681 for (range_seg_t *rs = zfs_btree_first(t, &where); rs != NULL;
682 rs = zfs_btree_next(t, &where, &where)) {
683 uint64_t offset = (rs_get_start(rs, rt) - sm->sm_start) >>
684 sm->sm_shift;
685 uint64_t length = (rs_get_end(rs, rt) - rs_get_start(rs, rt)) >>
686 sm->sm_shift;
687 uint8_t words = 1;
688
689 /*
690 * We only write two-word entries when both of the following
691 * are true:
692 *
693 * [1] The feature is enabled.
694 * [2] The offset or run is too big for a single-word entry,
695 * or the vdev_id is set (meaning not equal to
696 * SM_NO_VDEVID).
697 *
698 * Note that for purposes of testing we've added the case that
699 * we write two-word entries occasionally when the feature is
700 * enabled and zfs_force_some_double_word_sm_entries has been
701 * set.
702 */
703 if (spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_V2) &&
704 (offset >= (1ULL << SM_OFFSET_BITS) ||
705 length > SM_RUN_MAX ||
706 vdev_id != SM_NO_VDEVID ||
707 (zfs_force_some_double_word_sm_entries &&
708 spa_get_random(100) == 0)))
709 words = 2;
710
711 space_map_write_seg(sm, rs_get_start(rs, rt), rs_get_end(rs,
712 rt), maptype, vdev_id, words, &db, FTAG, tx);
713 }
714
715 dmu_buf_rele(db, FTAG);
716
717 #ifdef DEBUG
718 /*
719 * We expect our estimation to be based on the worst case
720 * scenario [see comment in space_map_estimate_optimal_size()].
721 * Therefore we expect the actual objsize to be equal or less
722 * than whatever we estimated it to be.
723 */
724 ASSERT3U(estimated_final_objsize, >=, sm->sm_phys->smp_length);
725 #endif
726 }
727
728 /*
729 * Note: This function manipulates the state of the given space map but
730 * does not hold any locks implicitly. Thus the caller is responsible
731 * for synchronizing writes to the space map.
732 */
733 void
space_map_write(space_map_t * sm,range_tree_t * rt,maptype_t maptype,uint64_t vdev_id,dmu_tx_t * tx)734 space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
735 uint64_t vdev_id, dmu_tx_t *tx)
736 {
737 objset_t *os = sm->sm_os;
738
739 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
740 VERIFY3U(space_map_object(sm), !=, 0);
741
742 dmu_buf_will_dirty(sm->sm_dbuf, tx);
743
744 /*
745 * This field is no longer necessary since the in-core space map
746 * now contains the object number but is maintained for backwards
747 * compatibility.
748 */
749 sm->sm_phys->smp_object = sm->sm_object;
750
751 if (range_tree_is_empty(rt)) {
752 VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object);
753 return;
754 }
755
756 if (maptype == SM_ALLOC)
757 sm->sm_phys->smp_alloc += range_tree_space(rt);
758 else
759 sm->sm_phys->smp_alloc -= range_tree_space(rt);
760
761 uint64_t nodes = zfs_btree_numnodes(&rt->rt_root);
762 uint64_t rt_space = range_tree_space(rt);
763
764 space_map_write_impl(sm, rt, maptype, vdev_id, tx);
765
766 /*
767 * Ensure that the space_map's accounting wasn't changed
768 * while we were in the middle of writing it out.
769 */
770 VERIFY3U(nodes, ==, zfs_btree_numnodes(&rt->rt_root));
771 VERIFY3U(range_tree_space(rt), ==, rt_space);
772 }
773
774 static int
space_map_open_impl(space_map_t * sm)775 space_map_open_impl(space_map_t *sm)
776 {
777 int error;
778 u_longlong_t blocks;
779
780 error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf);
781 if (error)
782 return (error);
783
784 dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks);
785 sm->sm_phys = sm->sm_dbuf->db_data;
786 return (0);
787 }
788
789 int
space_map_open(space_map_t ** smp,objset_t * os,uint64_t object,uint64_t start,uint64_t size,uint8_t shift)790 space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
791 uint64_t start, uint64_t size, uint8_t shift)
792 {
793 space_map_t *sm;
794 int error;
795
796 ASSERT(*smp == NULL);
797 ASSERT(os != NULL);
798 ASSERT(object != 0);
799
800 sm = kmem_zalloc(sizeof (space_map_t), KM_SLEEP);
801
802 sm->sm_start = start;
803 sm->sm_size = size;
804 sm->sm_shift = shift;
805 sm->sm_os = os;
806 sm->sm_object = object;
807
808 error = space_map_open_impl(sm);
809 if (error != 0) {
810 space_map_close(sm);
811 return (error);
812 }
813 *smp = sm;
814
815 return (0);
816 }
817
818 void
space_map_close(space_map_t * sm)819 space_map_close(space_map_t *sm)
820 {
821 if (sm == NULL)
822 return;
823
824 if (sm->sm_dbuf != NULL)
825 dmu_buf_rele(sm->sm_dbuf, sm);
826 sm->sm_dbuf = NULL;
827 sm->sm_phys = NULL;
828
829 kmem_free(sm, sizeof (*sm));
830 }
831
832 void
space_map_truncate(space_map_t * sm,int blocksize,dmu_tx_t * tx)833 space_map_truncate(space_map_t *sm, int blocksize, dmu_tx_t *tx)
834 {
835 objset_t *os = sm->sm_os;
836 spa_t *spa = dmu_objset_spa(os);
837 dmu_object_info_t doi;
838
839 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
840 ASSERT(dmu_tx_is_syncing(tx));
841 VERIFY3U(dmu_tx_get_txg(tx), <=, spa_final_dirty_txg(spa));
842
843 dmu_object_info_from_db(sm->sm_dbuf, &doi);
844
845 /*
846 * If the space map has the wrong bonus size (because
847 * SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or
848 * the wrong block size (because space_map_blksz has changed),
849 * free and re-allocate its object with the updated sizes.
850 *
851 * Otherwise, just truncate the current object.
852 */
853 if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
854 doi.doi_bonus_size != sizeof (space_map_phys_t)) ||
855 doi.doi_data_block_size != blocksize ||
856 doi.doi_metadata_block_size != 1 << space_map_ibs) {
857 zfs_dbgmsg("txg %llu, spa %s, sm %p, reallocating "
858 "object[%llu]: old bonus %u, old blocksz %u",
859 dmu_tx_get_txg(tx), spa_name(spa), sm, sm->sm_object,
860 doi.doi_bonus_size, doi.doi_data_block_size);
861
862 space_map_free(sm, tx);
863 dmu_buf_rele(sm->sm_dbuf, sm);
864
865 sm->sm_object = space_map_alloc(sm->sm_os, blocksize, tx);
866 VERIFY0(space_map_open_impl(sm));
867 } else {
868 VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx));
869
870 /*
871 * If the spacemap is reallocated, its histogram
872 * will be reset. Do the same in the common case so that
873 * bugs related to the uncommon case do not go unnoticed.
874 */
875 bzero(sm->sm_phys->smp_histogram,
876 sizeof (sm->sm_phys->smp_histogram));
877 }
878
879 dmu_buf_will_dirty(sm->sm_dbuf, tx);
880 sm->sm_phys->smp_length = 0;
881 sm->sm_phys->smp_alloc = 0;
882 }
883
884 uint64_t
space_map_alloc(objset_t * os,int blocksize,dmu_tx_t * tx)885 space_map_alloc(objset_t *os, int blocksize, dmu_tx_t *tx)
886 {
887 spa_t *spa = dmu_objset_spa(os);
888 uint64_t object;
889 int bonuslen;
890
891 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
892 spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
893 bonuslen = sizeof (space_map_phys_t);
894 ASSERT3U(bonuslen, <=, dmu_bonus_max());
895 } else {
896 bonuslen = SPACE_MAP_SIZE_V0;
897 }
898
899 object = dmu_object_alloc_ibs(os, DMU_OT_SPACE_MAP, blocksize,
900 space_map_ibs, DMU_OT_SPACE_MAP_HEADER, bonuslen, tx);
901
902 return (object);
903 }
904
905 void
space_map_free_obj(objset_t * os,uint64_t smobj,dmu_tx_t * tx)906 space_map_free_obj(objset_t *os, uint64_t smobj, dmu_tx_t *tx)
907 {
908 spa_t *spa = dmu_objset_spa(os);
909 if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
910 dmu_object_info_t doi;
911
912 VERIFY0(dmu_object_info(os, smobj, &doi));
913 if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) {
914 spa_feature_decr(spa,
915 SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
916 }
917 }
918
919 VERIFY0(dmu_object_free(os, smobj, tx));
920 }
921
922 void
space_map_free(space_map_t * sm,dmu_tx_t * tx)923 space_map_free(space_map_t *sm, dmu_tx_t *tx)
924 {
925 if (sm == NULL)
926 return;
927
928 space_map_free_obj(sm->sm_os, space_map_object(sm), tx);
929 sm->sm_object = 0;
930 }
931
932 /*
933 * Given a range tree, it makes a worst-case estimate of how much
934 * space would the tree's segments take if they were written to
935 * the given space map.
936 */
937 uint64_t
space_map_estimate_optimal_size(space_map_t * sm,range_tree_t * rt,uint64_t vdev_id)938 space_map_estimate_optimal_size(space_map_t *sm, range_tree_t *rt,
939 uint64_t vdev_id)
940 {
941 spa_t *spa = dmu_objset_spa(sm->sm_os);
942 uint64_t shift = sm->sm_shift;
943 uint64_t *histogram = rt->rt_histogram;
944 uint64_t entries_for_seg = 0;
945
946 /*
947 * In order to get a quick estimate of the optimal size that this
948 * range tree would have on-disk as a space map, we iterate through
949 * its histogram buckets instead of iterating through its nodes.
950 *
951 * Note that this is a highest-bound/worst-case estimate for the
952 * following reasons:
953 *
954 * 1] We assume that we always add a debug padding for each block
955 * we write and we also assume that we start at the last word
956 * of a block attempting to write a two-word entry.
957 * 2] Rounding up errors due to the way segments are distributed
958 * in the buckets of the range tree's histogram.
959 * 3] The activation of zfs_force_some_double_word_sm_entries
960 * (tunable) when testing.
961 *
962 * = Math and Rounding Errors =
963 *
964 * rt_histogram[i] bucket of a range tree represents the number
965 * of entries in [2^i, (2^(i+1))-1] of that range_tree. Given
966 * that, we want to divide the buckets into groups: Buckets that
967 * can be represented using a single-word entry, ones that can
968 * be represented with a double-word entry, and ones that can
969 * only be represented with multiple two-word entries.
970 *
971 * [Note that if the new encoding feature is not enabled there
972 * are only two groups: single-word entry buckets and multiple
973 * single-word entry buckets. The information below assumes
974 * two-word entries enabled, but it can easily applied when
975 * the feature is not enabled]
976 *
977 * To find the highest bucket that can be represented with a
978 * single-word entry we look at the maximum run that such entry
979 * can have, which is 2^(SM_RUN_BITS + sm_shift) [remember that
980 * the run of a space map entry is shifted by sm_shift, thus we
981 * add it to the exponent]. This way, excluding the value of the
982 * maximum run that can be represented by a single-word entry,
983 * all runs that are smaller exist in buckets 0 to
984 * SM_RUN_BITS + shift - 1.
985 *
986 * To find the highest bucket that can be represented with a
987 * double-word entry, we follow the same approach. Finally, any
988 * bucket higher than that are represented with multiple two-word
989 * entries. To be more specific, if the highest bucket whose
990 * segments can be represented with a single two-word entry is X,
991 * then bucket X+1 will need 2 two-word entries for each of its
992 * segments, X+2 will need 4, X+3 will need 8, ...etc.
993 *
994 * With all of the above we make our estimation based on bucket
995 * groups. There is a rounding error though. As we mentioned in
996 * the example with the one-word entry, the maximum run that can
997 * be represented in a one-word entry 2^(SM_RUN_BITS + shift) is
998 * not part of bucket SM_RUN_BITS + shift - 1. Thus, segments of
999 * that length fall into the next bucket (and bucket group) where
1000 * we start counting two-word entries and this is one more reason
1001 * why the estimated size may end up being bigger than the actual
1002 * size written.
1003 */
1004 uint64_t size = 0;
1005 uint64_t idx = 0;
1006
1007 if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2) ||
1008 (vdev_id == SM_NO_VDEVID && sm->sm_size < SM_OFFSET_MAX)) {
1009
1010 /*
1011 * If we are trying to force some double word entries just
1012 * assume the worst-case of every single word entry being
1013 * written as a double word entry.
1014 */
1015 uint64_t entry_size =
1016 (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2) &&
1017 zfs_force_some_double_word_sm_entries) ?
1018 (2 * sizeof (uint64_t)) : sizeof (uint64_t);
1019
1020 uint64_t single_entry_max_bucket = SM_RUN_BITS + shift - 1;
1021 for (; idx <= single_entry_max_bucket; idx++)
1022 size += histogram[idx] * entry_size;
1023
1024 if (!spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2)) {
1025 for (; idx < RANGE_TREE_HISTOGRAM_SIZE; idx++) {
1026 ASSERT3U(idx, >=, single_entry_max_bucket);
1027 entries_for_seg =
1028 1ULL << (idx - single_entry_max_bucket);
1029 size += histogram[idx] *
1030 entries_for_seg * entry_size;
1031 }
1032 return (size);
1033 }
1034 }
1035
1036 ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_V2));
1037
1038 uint64_t double_entry_max_bucket = SM2_RUN_BITS + shift - 1;
1039 for (; idx <= double_entry_max_bucket; idx++)
1040 size += histogram[idx] * 2 * sizeof (uint64_t);
1041
1042 for (; idx < RANGE_TREE_HISTOGRAM_SIZE; idx++) {
1043 ASSERT3U(idx, >=, double_entry_max_bucket);
1044 entries_for_seg = 1ULL << (idx - double_entry_max_bucket);
1045 size += histogram[idx] *
1046 entries_for_seg * 2 * sizeof (uint64_t);
1047 }
1048
1049 /*
1050 * Assume the worst case where we start with the padding at the end
1051 * of the current block and we add an extra padding entry at the end
1052 * of all subsequent blocks.
1053 */
1054 size += ((size / sm->sm_blksz) + 1) * sizeof (uint64_t);
1055
1056 return (size);
1057 }
1058
1059 uint64_t
space_map_object(space_map_t * sm)1060 space_map_object(space_map_t *sm)
1061 {
1062 return (sm != NULL ? sm->sm_object : 0);
1063 }
1064
1065 int64_t
space_map_allocated(space_map_t * sm)1066 space_map_allocated(space_map_t *sm)
1067 {
1068 return (sm != NULL ? sm->sm_phys->smp_alloc : 0);
1069 }
1070
1071 uint64_t
space_map_length(space_map_t * sm)1072 space_map_length(space_map_t *sm)
1073 {
1074 return (sm != NULL ? sm->sm_phys->smp_length : 0);
1075 }
1076
1077 uint64_t
space_map_nblocks(space_map_t * sm)1078 space_map_nblocks(space_map_t *sm)
1079 {
1080 if (sm == NULL)
1081 return (0);
1082 return (DIV_ROUND_UP(space_map_length(sm), sm->sm_blksz));
1083 }
1084