xref: /illumos-gate/usr/src/uts/common/fs/zfs/dmu_object.c (revision 48edc7cf07b5dccc3ad84bf2dafe4150bd666d60)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright (c) 2013, 2015 by Delphix. All rights reserved.
24  * Copyright 2014 HybridCluster. All rights reserved.
25  */
26 
27 #include <sys/dmu.h>
28 #include <sys/dmu_objset.h>
29 #include <sys/dmu_tx.h>
30 #include <sys/dnode.h>
31 #include <sys/zap.h>
32 #include <sys/zfeature.h>
33 
34 uint64_t
35 dmu_object_alloc(objset_t *os, dmu_object_type_t ot, int blocksize,
36     dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
37 {
38 	uint64_t object;
39 	uint64_t L1_dnode_count = DNODES_PER_BLOCK <<
40 	    (DMU_META_DNODE(os)->dn_indblkshift - SPA_BLKPTRSHIFT);
41 	dnode_t *dn = NULL;
42 
43 	mutex_enter(&os->os_obj_lock);
44 	for (;;) {
45 		object = os->os_obj_next;
46 		/*
47 		 * Each time we polish off a L1 bp worth of dnodes (2^12
48 		 * objects), move to another L1 bp that's still reasonably
49 		 * sparse (at most 1/4 full). Look from the beginning at most
50 		 * once per txg, but after that keep looking from here.
51 		 * os_scan_dnodes is set during txg sync if enough objects
52 		 * have been freed since the previous rescan to justify
53 		 * backfilling again. If we can't find a suitable block, just
54 		 * keep going from here.
55 		 *
56 		 * Note that dmu_traverse depends on the behavior that we use
57 		 * multiple blocks of the dnode object before going back to
58 		 * reuse objects.  Any change to this algorithm should preserve
59 		 * that property or find another solution to the issues
60 		 * described in traverse_visitbp.
61 		 */
62 
63 		if (P2PHASE(object, L1_dnode_count) == 0) {
64 			uint64_t offset;
65 			int error;
66 			if (os->os_rescan_dnodes) {
67 				offset = 0;
68 				os->os_rescan_dnodes = B_FALSE;
69 			} else {
70 				offset = object << DNODE_SHIFT;
71 			}
72 			error = dnode_next_offset(DMU_META_DNODE(os),
73 			    DNODE_FIND_HOLE,
74 			    &offset, 2, DNODES_PER_BLOCK >> 2, 0);
75 			if (error == 0)
76 				object = offset >> DNODE_SHIFT;
77 		}
78 		os->os_obj_next = ++object;
79 
80 		/*
81 		 * XXX We should check for an i/o error here and return
82 		 * up to our caller.  Actually we should pre-read it in
83 		 * dmu_tx_assign(), but there is currently no mechanism
84 		 * to do so.
85 		 */
86 		(void) dnode_hold_impl(os, object, DNODE_MUST_BE_FREE,
87 		    FTAG, &dn);
88 		if (dn)
89 			break;
90 
91 		if (dmu_object_next(os, &object, B_TRUE, 0) == 0)
92 			os->os_obj_next = object - 1;
93 	}
94 
95 	dnode_allocate(dn, ot, blocksize, 0, bonustype, bonuslen, tx);
96 	dnode_rele(dn, FTAG);
97 
98 	mutex_exit(&os->os_obj_lock);
99 
100 	dmu_tx_add_new_object(tx, os, object);
101 	return (object);
102 }
103 
104 int
105 dmu_object_claim(objset_t *os, uint64_t object, dmu_object_type_t ot,
106     int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
107 {
108 	dnode_t *dn;
109 	int err;
110 
111 	if (object == DMU_META_DNODE_OBJECT && !dmu_tx_private_ok(tx))
112 		return (SET_ERROR(EBADF));
113 
114 	err = dnode_hold_impl(os, object, DNODE_MUST_BE_FREE, FTAG, &dn);
115 	if (err)
116 		return (err);
117 	dnode_allocate(dn, ot, blocksize, 0, bonustype, bonuslen, tx);
118 	dnode_rele(dn, FTAG);
119 
120 	dmu_tx_add_new_object(tx, os, object);
121 	return (0);
122 }
123 
124 int
125 dmu_object_reclaim(objset_t *os, uint64_t object, dmu_object_type_t ot,
126     int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
127 {
128 	dnode_t *dn;
129 	int err;
130 
131 	if (object == DMU_META_DNODE_OBJECT)
132 		return (SET_ERROR(EBADF));
133 
134 	err = dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED,
135 	    FTAG, &dn);
136 	if (err)
137 		return (err);
138 
139 	dnode_reallocate(dn, ot, blocksize, bonustype, bonuslen, tx);
140 
141 	dnode_rele(dn, FTAG);
142 	return (err);
143 }
144 
145 int
146 dmu_object_free(objset_t *os, uint64_t object, dmu_tx_t *tx)
147 {
148 	dnode_t *dn;
149 	int err;
150 
151 	ASSERT(object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx));
152 
153 	err = dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED,
154 	    FTAG, &dn);
155 	if (err)
156 		return (err);
157 
158 	ASSERT(dn->dn_type != DMU_OT_NONE);
159 	dnode_free_range(dn, 0, DMU_OBJECT_END, tx);
160 	dnode_free(dn, tx);
161 	dnode_rele(dn, FTAG);
162 
163 	return (0);
164 }
165 
166 /*
167  * Return (in *objectp) the next object which is allocated (or a hole)
168  * after *object, taking into account only objects that may have been modified
169  * after the specified txg.
170  */
171 int
172 dmu_object_next(objset_t *os, uint64_t *objectp, boolean_t hole, uint64_t txg)
173 {
174 	uint64_t offset = (*objectp + 1) << DNODE_SHIFT;
175 	int error;
176 
177 	error = dnode_next_offset(DMU_META_DNODE(os),
178 	    (hole ? DNODE_FIND_HOLE : 0), &offset, 0, DNODES_PER_BLOCK, txg);
179 
180 	*objectp = offset >> DNODE_SHIFT;
181 
182 	return (error);
183 }
184 
185 /*
186  * Turn this object from old_type into DMU_OTN_ZAP_METADATA, and bump the
187  * refcount on SPA_FEATURE_EXTENSIBLE_DATASET.
188  *
189  * Only for use from syncing context, on MOS objects.
190  */
191 void
192 dmu_object_zapify(objset_t *mos, uint64_t object, dmu_object_type_t old_type,
193     dmu_tx_t *tx)
194 {
195 	dnode_t *dn;
196 
197 	ASSERT(dmu_tx_is_syncing(tx));
198 
199 	VERIFY0(dnode_hold(mos, object, FTAG, &dn));
200 	if (dn->dn_type == DMU_OTN_ZAP_METADATA) {
201 		dnode_rele(dn, FTAG);
202 		return;
203 	}
204 	ASSERT3U(dn->dn_type, ==, old_type);
205 	ASSERT0(dn->dn_maxblkid);
206 	dn->dn_next_type[tx->tx_txg & TXG_MASK] = dn->dn_type =
207 	    DMU_OTN_ZAP_METADATA;
208 	dnode_setdirty(dn, tx);
209 	dnode_rele(dn, FTAG);
210 
211 	mzap_create_impl(mos, object, 0, 0, tx);
212 
213 	spa_feature_incr(dmu_objset_spa(mos),
214 	    SPA_FEATURE_EXTENSIBLE_DATASET, tx);
215 }
216 
217 void
218 dmu_object_free_zapified(objset_t *mos, uint64_t object, dmu_tx_t *tx)
219 {
220 	dnode_t *dn;
221 	dmu_object_type_t t;
222 
223 	ASSERT(dmu_tx_is_syncing(tx));
224 
225 	VERIFY0(dnode_hold(mos, object, FTAG, &dn));
226 	t = dn->dn_type;
227 	dnode_rele(dn, FTAG);
228 
229 	if (t == DMU_OTN_ZAP_METADATA) {
230 		spa_feature_decr(dmu_objset_spa(mos),
231 		    SPA_FEATURE_EXTENSIBLE_DATASET, tx);
232 	}
233 	VERIFY0(dmu_object_free(mos, object, tx));
234 }
235