1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, 2014 by Delphix. All rights reserved. 25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 26 * Copyright (c) 2012, Joyent, Inc. All rights reserved. 27 * Copyright 2013 DEY Storage Systems, Inc. 28 * Copyright 2014 HybridCluster. All rights reserved. 29 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. 30 */ 31 32 /* Portions Copyright 2010 Robert Milkowski */ 33 34 #ifndef _SYS_DMU_H 35 #define _SYS_DMU_H 36 37 /* 38 * This file describes the interface that the DMU provides for its 39 * consumers. 40 * 41 * The DMU also interacts with the SPA. That interface is described in 42 * dmu_spa.h. 43 */ 44 45 #include <sys/zfs_context.h> 46 #include <sys/inttypes.h> 47 #include <sys/cred.h> 48 #include <sys/fs/zfs.h> 49 #include <sys/zio_priority.h> 50 51 #ifdef __cplusplus 52 extern "C" { 53 #endif 54 55 struct uio; 56 struct xuio; 57 struct page; 58 struct vnode; 59 struct spa; 60 struct zilog; 61 struct zio; 62 struct blkptr; 63 struct zap_cursor; 64 struct dsl_dataset; 65 struct dsl_pool; 66 struct dnode; 67 struct drr_begin; 68 struct drr_end; 69 struct zbookmark_phys; 70 struct spa; 71 struct nvlist; 72 struct arc_buf; 73 struct zio_prop; 74 struct sa_handle; 75 76 typedef struct objset objset_t; 77 typedef struct dmu_tx dmu_tx_t; 78 typedef struct dsl_dir dsl_dir_t; 79 80 typedef enum dmu_object_byteswap { 81 DMU_BSWAP_UINT8, 82 DMU_BSWAP_UINT16, 83 DMU_BSWAP_UINT32, 84 DMU_BSWAP_UINT64, 85 DMU_BSWAP_ZAP, 86 DMU_BSWAP_DNODE, 87 DMU_BSWAP_OBJSET, 88 DMU_BSWAP_ZNODE, 89 DMU_BSWAP_OLDACL, 90 DMU_BSWAP_ACL, 91 /* 92 * Allocating a new byteswap type number makes the on-disk format 93 * incompatible with any other format that uses the same number. 94 * 95 * Data can usually be structured to work with one of the 96 * DMU_BSWAP_UINT* or DMU_BSWAP_ZAP types. 97 */ 98 DMU_BSWAP_NUMFUNCS 99 } dmu_object_byteswap_t; 100 101 #define DMU_OT_NEWTYPE 0x80 102 #define DMU_OT_METADATA 0x40 103 #define DMU_OT_BYTESWAP_MASK 0x3f 104 105 /* 106 * Defines a uint8_t object type. Object types specify if the data 107 * in the object is metadata (boolean) and how to byteswap the data 108 * (dmu_object_byteswap_t). 109 */ 110 #define DMU_OT(byteswap, metadata) \ 111 (DMU_OT_NEWTYPE | \ 112 ((metadata) ? DMU_OT_METADATA : 0) | \ 113 ((byteswap) & DMU_OT_BYTESWAP_MASK)) 114 115 #define DMU_OT_IS_VALID(ot) (((ot) & DMU_OT_NEWTYPE) ? \ 116 ((ot) & DMU_OT_BYTESWAP_MASK) < DMU_BSWAP_NUMFUNCS : \ 117 (ot) < DMU_OT_NUMTYPES) 118 119 #define DMU_OT_IS_METADATA(ot) (((ot) & DMU_OT_NEWTYPE) ? \ 120 ((ot) & DMU_OT_METADATA) : \ 121 dmu_ot[(ot)].ot_metadata) 122 123 /* 124 * These object types use bp_fill != 1 for their L0 bp's. Therefore they can't 125 * have their data embedded (i.e. use a BP_IS_EMBEDDED() bp), because bp_fill 126 * is repurposed for embedded BPs. 127 */ 128 #define DMU_OT_HAS_FILL(ot) \ 129 ((ot) == DMU_OT_DNODE || (ot) == DMU_OT_OBJSET) 130 131 #define DMU_OT_BYTESWAP(ot) (((ot) & DMU_OT_NEWTYPE) ? \ 132 ((ot) & DMU_OT_BYTESWAP_MASK) : \ 133 dmu_ot[(ot)].ot_byteswap) 134 135 typedef enum dmu_object_type { 136 DMU_OT_NONE, 137 /* general: */ 138 DMU_OT_OBJECT_DIRECTORY, /* ZAP */ 139 DMU_OT_OBJECT_ARRAY, /* UINT64 */ 140 DMU_OT_PACKED_NVLIST, /* UINT8 (XDR by nvlist_pack/unpack) */ 141 DMU_OT_PACKED_NVLIST_SIZE, /* UINT64 */ 142 DMU_OT_BPOBJ, /* UINT64 */ 143 DMU_OT_BPOBJ_HDR, /* UINT64 */ 144 /* spa: */ 145 DMU_OT_SPACE_MAP_HEADER, /* UINT64 */ 146 DMU_OT_SPACE_MAP, /* UINT64 */ 147 /* zil: */ 148 DMU_OT_INTENT_LOG, /* UINT64 */ 149 /* dmu: */ 150 DMU_OT_DNODE, /* DNODE */ 151 DMU_OT_OBJSET, /* OBJSET */ 152 /* dsl: */ 153 DMU_OT_DSL_DIR, /* UINT64 */ 154 DMU_OT_DSL_DIR_CHILD_MAP, /* ZAP */ 155 DMU_OT_DSL_DS_SNAP_MAP, /* ZAP */ 156 DMU_OT_DSL_PROPS, /* ZAP */ 157 DMU_OT_DSL_DATASET, /* UINT64 */ 158 /* zpl: */ 159 DMU_OT_ZNODE, /* ZNODE */ 160 DMU_OT_OLDACL, /* Old ACL */ 161 DMU_OT_PLAIN_FILE_CONTENTS, /* UINT8 */ 162 DMU_OT_DIRECTORY_CONTENTS, /* ZAP */ 163 DMU_OT_MASTER_NODE, /* ZAP */ 164 DMU_OT_UNLINKED_SET, /* ZAP */ 165 /* zvol: */ 166 DMU_OT_ZVOL, /* UINT8 */ 167 DMU_OT_ZVOL_PROP, /* ZAP */ 168 /* other; for testing only! */ 169 DMU_OT_PLAIN_OTHER, /* UINT8 */ 170 DMU_OT_UINT64_OTHER, /* UINT64 */ 171 DMU_OT_ZAP_OTHER, /* ZAP */ 172 /* new object types: */ 173 DMU_OT_ERROR_LOG, /* ZAP */ 174 DMU_OT_SPA_HISTORY, /* UINT8 */ 175 DMU_OT_SPA_HISTORY_OFFSETS, /* spa_his_phys_t */ 176 DMU_OT_POOL_PROPS, /* ZAP */ 177 DMU_OT_DSL_PERMS, /* ZAP */ 178 DMU_OT_ACL, /* ACL */ 179 DMU_OT_SYSACL, /* SYSACL */ 180 DMU_OT_FUID, /* FUID table (Packed NVLIST UINT8) */ 181 DMU_OT_FUID_SIZE, /* FUID table size UINT64 */ 182 DMU_OT_NEXT_CLONES, /* ZAP */ 183 DMU_OT_SCAN_QUEUE, /* ZAP */ 184 DMU_OT_USERGROUP_USED, /* ZAP */ 185 DMU_OT_USERGROUP_QUOTA, /* ZAP */ 186 DMU_OT_USERREFS, /* ZAP */ 187 DMU_OT_DDT_ZAP, /* ZAP */ 188 DMU_OT_DDT_STATS, /* ZAP */ 189 DMU_OT_SA, /* System attr */ 190 DMU_OT_SA_MASTER_NODE, /* ZAP */ 191 DMU_OT_SA_ATTR_REGISTRATION, /* ZAP */ 192 DMU_OT_SA_ATTR_LAYOUTS, /* ZAP */ 193 DMU_OT_SCAN_XLATE, /* ZAP */ 194 DMU_OT_DEDUP, /* fake dedup BP from ddt_bp_create() */ 195 DMU_OT_DEADLIST, /* ZAP */ 196 DMU_OT_DEADLIST_HDR, /* UINT64 */ 197 DMU_OT_DSL_CLONES, /* ZAP */ 198 DMU_OT_BPOBJ_SUBOBJ, /* UINT64 */ 199 /* 200 * Do not allocate new object types here. Doing so makes the on-disk 201 * format incompatible with any other format that uses the same object 202 * type number. 203 * 204 * When creating an object which does not have one of the above types 205 * use the DMU_OTN_* type with the correct byteswap and metadata 206 * values. 207 * 208 * The DMU_OTN_* types do not have entries in the dmu_ot table, 209 * use the DMU_OT_IS_METDATA() and DMU_OT_BYTESWAP() macros instead 210 * of indexing into dmu_ot directly (this works for both DMU_OT_* types 211 * and DMU_OTN_* types). 212 */ 213 DMU_OT_NUMTYPES, 214 215 /* 216 * Names for valid types declared with DMU_OT(). 217 */ 218 DMU_OTN_UINT8_DATA = DMU_OT(DMU_BSWAP_UINT8, B_FALSE), 219 DMU_OTN_UINT8_METADATA = DMU_OT(DMU_BSWAP_UINT8, B_TRUE), 220 DMU_OTN_UINT16_DATA = DMU_OT(DMU_BSWAP_UINT16, B_FALSE), 221 DMU_OTN_UINT16_METADATA = DMU_OT(DMU_BSWAP_UINT16, B_TRUE), 222 DMU_OTN_UINT32_DATA = DMU_OT(DMU_BSWAP_UINT32, B_FALSE), 223 DMU_OTN_UINT32_METADATA = DMU_OT(DMU_BSWAP_UINT32, B_TRUE), 224 DMU_OTN_UINT64_DATA = DMU_OT(DMU_BSWAP_UINT64, B_FALSE), 225 DMU_OTN_UINT64_METADATA = DMU_OT(DMU_BSWAP_UINT64, B_TRUE), 226 DMU_OTN_ZAP_DATA = DMU_OT(DMU_BSWAP_ZAP, B_FALSE), 227 DMU_OTN_ZAP_METADATA = DMU_OT(DMU_BSWAP_ZAP, B_TRUE), 228 } dmu_object_type_t; 229 230 typedef enum txg_how { 231 TXG_WAIT = 1, 232 TXG_NOWAIT, 233 TXG_WAITED, 234 } txg_how_t; 235 236 void byteswap_uint64_array(void *buf, size_t size); 237 void byteswap_uint32_array(void *buf, size_t size); 238 void byteswap_uint16_array(void *buf, size_t size); 239 void byteswap_uint8_array(void *buf, size_t size); 240 void zap_byteswap(void *buf, size_t size); 241 void zfs_oldacl_byteswap(void *buf, size_t size); 242 void zfs_acl_byteswap(void *buf, size_t size); 243 void zfs_znode_byteswap(void *buf, size_t size); 244 245 #define DS_FIND_SNAPSHOTS (1<<0) 246 #define DS_FIND_CHILDREN (1<<1) 247 #define DS_FIND_SERIALIZE (1<<2) 248 249 /* 250 * The maximum number of bytes that can be accessed as part of one 251 * operation, including metadata. 252 */ 253 #define DMU_MAX_ACCESS (32 * 1024 * 1024) /* 32MB */ 254 #define DMU_MAX_DELETEBLKCNT (20480) /* ~5MB of indirect blocks */ 255 256 #define DMU_USERUSED_OBJECT (-1ULL) 257 #define DMU_GROUPUSED_OBJECT (-2ULL) 258 259 /* 260 * artificial blkids for bonus buffer and spill blocks 261 */ 262 #define DMU_BONUS_BLKID (-1ULL) 263 #define DMU_SPILL_BLKID (-2ULL) 264 /* 265 * Public routines to create, destroy, open, and close objsets. 266 */ 267 int dmu_objset_hold(const char *name, void *tag, objset_t **osp); 268 int dmu_objset_own(const char *name, dmu_objset_type_t type, 269 boolean_t readonly, void *tag, objset_t **osp); 270 void dmu_objset_rele(objset_t *os, void *tag); 271 void dmu_objset_disown(objset_t *os, void *tag); 272 int dmu_objset_open_ds(struct dsl_dataset *ds, objset_t **osp); 273 274 void dmu_objset_evict_dbufs(objset_t *os); 275 int dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags, 276 void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg); 277 int dmu_objset_clone(const char *name, const char *origin); 278 int dsl_destroy_snapshots_nvl(struct nvlist *snaps, boolean_t defer, 279 struct nvlist *errlist); 280 int dmu_objset_snapshot_one(const char *fsname, const char *snapname); 281 int dmu_objset_snapshot_tmp(const char *, const char *, int); 282 int dmu_objset_find(char *name, int func(const char *, void *), void *arg, 283 int flags); 284 void dmu_objset_byteswap(void *buf, size_t size); 285 int dsl_dataset_rename_snapshot(const char *fsname, 286 const char *oldsnapname, const char *newsnapname, boolean_t recursive); 287 288 typedef struct dmu_buf { 289 uint64_t db_object; /* object that this buffer is part of */ 290 uint64_t db_offset; /* byte offset in this object */ 291 uint64_t db_size; /* size of buffer in bytes */ 292 void *db_data; /* data in buffer */ 293 } dmu_buf_t; 294 295 /* 296 * The names of zap entries in the DIRECTORY_OBJECT of the MOS. 297 */ 298 #define DMU_POOL_DIRECTORY_OBJECT 1 299 #define DMU_POOL_CONFIG "config" 300 #define DMU_POOL_FEATURES_FOR_WRITE "features_for_write" 301 #define DMU_POOL_FEATURES_FOR_READ "features_for_read" 302 #define DMU_POOL_FEATURE_DESCRIPTIONS "feature_descriptions" 303 #define DMU_POOL_FEATURE_ENABLED_TXG "feature_enabled_txg" 304 #define DMU_POOL_ROOT_DATASET "root_dataset" 305 #define DMU_POOL_SYNC_BPOBJ "sync_bplist" 306 #define DMU_POOL_ERRLOG_SCRUB "errlog_scrub" 307 #define DMU_POOL_ERRLOG_LAST "errlog_last" 308 #define DMU_POOL_SPARES "spares" 309 #define DMU_POOL_DEFLATE "deflate" 310 #define DMU_POOL_HISTORY "history" 311 #define DMU_POOL_PROPS "pool_props" 312 #define DMU_POOL_L2CACHE "l2cache" 313 #define DMU_POOL_TMP_USERREFS "tmp_userrefs" 314 #define DMU_POOL_DDT "DDT-%s-%s-%s" 315 #define DMU_POOL_DDT_STATS "DDT-statistics" 316 #define DMU_POOL_CREATION_VERSION "creation_version" 317 #define DMU_POOL_SCAN "scan" 318 #define DMU_POOL_FREE_BPOBJ "free_bpobj" 319 #define DMU_POOL_BPTREE_OBJ "bptree_obj" 320 #define DMU_POOL_EMPTY_BPOBJ "empty_bpobj" 321 322 /* 323 * Allocate an object from this objset. The range of object numbers 324 * available is (0, DN_MAX_OBJECT). Object 0 is the meta-dnode. 325 * 326 * The transaction must be assigned to a txg. The newly allocated 327 * object will be "held" in the transaction (ie. you can modify the 328 * newly allocated object in this transaction). 329 * 330 * dmu_object_alloc() chooses an object and returns it in *objectp. 331 * 332 * dmu_object_claim() allocates a specific object number. If that 333 * number is already allocated, it fails and returns EEXIST. 334 * 335 * Return 0 on success, or ENOSPC or EEXIST as specified above. 336 */ 337 uint64_t dmu_object_alloc(objset_t *os, dmu_object_type_t ot, 338 int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx); 339 int dmu_object_claim(objset_t *os, uint64_t object, dmu_object_type_t ot, 340 int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx); 341 int dmu_object_reclaim(objset_t *os, uint64_t object, dmu_object_type_t ot, 342 int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *txp); 343 344 /* 345 * Free an object from this objset. 346 * 347 * The object's data will be freed as well (ie. you don't need to call 348 * dmu_free(object, 0, -1, tx)). 349 * 350 * The object need not be held in the transaction. 351 * 352 * If there are any holds on this object's buffers (via dmu_buf_hold()), 353 * or tx holds on the object (via dmu_tx_hold_object()), you can not 354 * free it; it fails and returns EBUSY. 355 * 356 * If the object is not allocated, it fails and returns ENOENT. 357 * 358 * Return 0 on success, or EBUSY or ENOENT as specified above. 359 */ 360 int dmu_object_free(objset_t *os, uint64_t object, dmu_tx_t *tx); 361 362 /* 363 * Find the next allocated or free object. 364 * 365 * The objectp parameter is in-out. It will be updated to be the next 366 * object which is allocated. Ignore objects which have not been 367 * modified since txg. 368 * 369 * XXX Can only be called on a objset with no dirty data. 370 * 371 * Returns 0 on success, or ENOENT if there are no more objects. 372 */ 373 int dmu_object_next(objset_t *os, uint64_t *objectp, 374 boolean_t hole, uint64_t txg); 375 376 /* 377 * Set the data blocksize for an object. 378 * 379 * The object cannot have any blocks allcated beyond the first. If 380 * the first block is allocated already, the new size must be greater 381 * than the current block size. If these conditions are not met, 382 * ENOTSUP will be returned. 383 * 384 * Returns 0 on success, or EBUSY if there are any holds on the object 385 * contents, or ENOTSUP as described above. 386 */ 387 int dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, 388 int ibs, dmu_tx_t *tx); 389 390 /* 391 * Set the checksum property on a dnode. The new checksum algorithm will 392 * apply to all newly written blocks; existing blocks will not be affected. 393 */ 394 void dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum, 395 dmu_tx_t *tx); 396 397 /* 398 * Set the compress property on a dnode. The new compression algorithm will 399 * apply to all newly written blocks; existing blocks will not be affected. 400 */ 401 void dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress, 402 dmu_tx_t *tx); 403 404 void 405 dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset, 406 void *data, uint8_t etype, uint8_t comp, int uncompressed_size, 407 int compressed_size, int byteorder, dmu_tx_t *tx); 408 409 /* 410 * Decide how to write a block: checksum, compression, number of copies, etc. 411 */ 412 #define WP_NOFILL 0x1 413 #define WP_DMU_SYNC 0x2 414 #define WP_SPILL 0x4 415 416 void dmu_write_policy(objset_t *os, struct dnode *dn, int level, int wp, 417 struct zio_prop *zp); 418 /* 419 * The bonus data is accessed more or less like a regular buffer. 420 * You must dmu_bonus_hold() to get the buffer, which will give you a 421 * dmu_buf_t with db_offset==-1ULL, and db_size = the size of the bonus 422 * data. As with any normal buffer, you must call dmu_buf_read() to 423 * read db_data, dmu_buf_will_dirty() before modifying it, and the 424 * object must be held in an assigned transaction before calling 425 * dmu_buf_will_dirty. You may use dmu_buf_set_user() on the bonus 426 * buffer as well. You must release your hold with dmu_buf_rele(). 427 * 428 * Returns ENOENT, EIO, or 0. 429 */ 430 int dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **); 431 int dmu_bonus_max(void); 432 int dmu_set_bonus(dmu_buf_t *, int, dmu_tx_t *); 433 int dmu_set_bonustype(dmu_buf_t *, dmu_object_type_t, dmu_tx_t *); 434 dmu_object_type_t dmu_get_bonustype(dmu_buf_t *); 435 int dmu_rm_spill(objset_t *, uint64_t, dmu_tx_t *); 436 437 /* 438 * Special spill buffer support used by "SA" framework 439 */ 440 441 int dmu_spill_hold_by_bonus(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp); 442 int dmu_spill_hold_by_dnode(struct dnode *dn, uint32_t flags, 443 void *tag, dmu_buf_t **dbp); 444 int dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp); 445 446 /* 447 * Obtain the DMU buffer from the specified object which contains the 448 * specified offset. dmu_buf_hold() puts a "hold" on the buffer, so 449 * that it will remain in memory. You must release the hold with 450 * dmu_buf_rele(). You musn't access the dmu_buf_t after releasing your 451 * hold. You must have a hold on any dmu_buf_t* you pass to the DMU. 452 * 453 * You must call dmu_buf_read, dmu_buf_will_dirty, or dmu_buf_will_fill 454 * on the returned buffer before reading or writing the buffer's 455 * db_data. The comments for those routines describe what particular 456 * operations are valid after calling them. 457 * 458 * The object number must be a valid, allocated object number. 459 */ 460 int dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset, 461 void *tag, dmu_buf_t **, int flags); 462 463 /* 464 * Add a reference to a dmu buffer that has already been held via 465 * dmu_buf_hold() in the current context. 466 */ 467 void dmu_buf_add_ref(dmu_buf_t *db, void* tag); 468 469 /* 470 * Attempt to add a reference to a dmu buffer that is in an unknown state, 471 * using a pointer that may have been invalidated by eviction processing. 472 * The request will succeed if the passed in dbuf still represents the 473 * same os/object/blkid, is ineligible for eviction, and has at least 474 * one hold by a user other than the syncer. 475 */ 476 boolean_t dmu_buf_try_add_ref(dmu_buf_t *, objset_t *os, uint64_t object, 477 uint64_t blkid, void *tag); 478 479 void dmu_buf_rele(dmu_buf_t *db, void *tag); 480 uint64_t dmu_buf_refcount(dmu_buf_t *db); 481 482 /* 483 * dmu_buf_hold_array holds the DMU buffers which contain all bytes in a 484 * range of an object. A pointer to an array of dmu_buf_t*'s is 485 * returned (in *dbpp). 486 * 487 * dmu_buf_rele_array releases the hold on an array of dmu_buf_t*'s, and 488 * frees the array. The hold on the array of buffers MUST be released 489 * with dmu_buf_rele_array. You can NOT release the hold on each buffer 490 * individually with dmu_buf_rele. 491 */ 492 int dmu_buf_hold_array_by_bonus(dmu_buf_t *db, uint64_t offset, 493 uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp); 494 void dmu_buf_rele_array(dmu_buf_t **, int numbufs, void *tag); 495 496 typedef void dmu_buf_evict_func_t(void *user_ptr); 497 498 /* 499 * A DMU buffer user object may be associated with a dbuf for the 500 * duration of its lifetime. This allows the user of a dbuf (client) 501 * to attach private data to a dbuf (e.g. in-core only data such as a 502 * dnode_children_t, zap_t, or zap_leaf_t) and be optionally notified 503 * when that dbuf has been evicted. Clients typically respond to the 504 * eviction notification by freeing their private data, thus ensuring 505 * the same lifetime for both dbuf and private data. 506 * 507 * The mapping from a dmu_buf_user_t to any client private data is the 508 * client's responsibility. All current consumers of the API with private 509 * data embed a dmu_buf_user_t as the first member of the structure for 510 * their private data. This allows conversions between the two types 511 * with a simple cast. Since the DMU buf user API never needs access 512 * to the private data, other strategies can be employed if necessary 513 * or convenient for the client (e.g. using container_of() to do the 514 * conversion for private data that cannot have the dmu_buf_user_t as 515 * its first member). 516 * 517 * Eviction callbacks are executed without the dbuf mutex held or any 518 * other type of mechanism to guarantee that the dbuf is still available. 519 * For this reason, users must assume the dbuf has already been freed 520 * and not reference the dbuf from the callback context. 521 * 522 * Users requesting "immediate eviction" are notified as soon as the dbuf 523 * is only referenced by dirty records (dirties == holds). Otherwise the 524 * notification occurs after eviction processing for the dbuf begins. 525 */ 526 typedef struct dmu_buf_user { 527 /* 528 * Asynchronous user eviction callback state. 529 */ 530 taskq_ent_t dbu_tqent; 531 532 /* 533 * This instance's eviction function pointers. 534 * 535 * dbu_evict_func_sync is called synchronously and then 536 * dbu_evict_func_async is executed asynchronously on a taskq. 537 */ 538 dmu_buf_evict_func_t *dbu_evict_func_sync; 539 dmu_buf_evict_func_t *dbu_evict_func_async; 540 #ifdef ZFS_DEBUG 541 /* 542 * Pointer to user's dbuf pointer. NULL for clients that do 543 * not associate a dbuf with their user data. 544 * 545 * The dbuf pointer is cleared upon eviction so as to catch 546 * use-after-evict bugs in clients. 547 */ 548 dmu_buf_t **dbu_clear_on_evict_dbufp; 549 #endif 550 } dmu_buf_user_t; 551 552 /* 553 * Initialize the given dmu_buf_user_t instance with the eviction function 554 * evict_func, to be called when the user is evicted. 555 * 556 * NOTE: This function should only be called once on a given dmu_buf_user_t. 557 * To allow enforcement of this, dbu must already be zeroed on entry. 558 */ 559 #ifdef __lint 560 /* Very ugly, but it beats issuing suppression directives in many Makefiles. */ 561 extern void 562 dmu_buf_init_user(dmu_buf_user_t *dbu, dmu_buf_evict_func_t *evict_func, 563 dmu_buf_evict_func_t *evict_func_async, dmu_buf_t **clear_on_evict_dbufp); 564 #else /* __lint */ 565 inline void 566 dmu_buf_init_user(dmu_buf_user_t *dbu, dmu_buf_evict_func_t *evict_func_sync, 567 dmu_buf_evict_func_t *evict_func_async, dmu_buf_t **clear_on_evict_dbufp) 568 { 569 ASSERT(dbu->dbu_evict_func_sync == NULL); 570 ASSERT(dbu->dbu_evict_func_async == NULL); 571 572 /* must have at least one evict func */ 573 IMPLY(evict_func_sync == NULL, evict_func_async != NULL); 574 dbu->dbu_evict_func_sync = evict_func_sync; 575 dbu->dbu_evict_func_async = evict_func_async; 576 #ifdef ZFS_DEBUG 577 dbu->dbu_clear_on_evict_dbufp = clear_on_evict_dbufp; 578 #endif 579 } 580 #endif /* __lint */ 581 582 /* 583 * Attach user data to a dbuf and mark it for normal (when the dbuf's 584 * data is cleared or its reference count goes to zero) eviction processing. 585 * 586 * Returns NULL on success, or the existing user if another user currently 587 * owns the buffer. 588 */ 589 void *dmu_buf_set_user(dmu_buf_t *db, dmu_buf_user_t *user); 590 591 /* 592 * Attach user data to a dbuf and mark it for immediate (its dirty and 593 * reference counts are equal) eviction processing. 594 * 595 * Returns NULL on success, or the existing user if another user currently 596 * owns the buffer. 597 */ 598 void *dmu_buf_set_user_ie(dmu_buf_t *db, dmu_buf_user_t *user); 599 600 /* 601 * Replace the current user of a dbuf. 602 * 603 * If given the current user of a dbuf, replaces the dbuf's user with 604 * "new_user" and returns the user data pointer that was replaced. 605 * Otherwise returns the current, and unmodified, dbuf user pointer. 606 */ 607 void *dmu_buf_replace_user(dmu_buf_t *db, 608 dmu_buf_user_t *old_user, dmu_buf_user_t *new_user); 609 610 /* 611 * Remove the specified user data for a DMU buffer. 612 * 613 * Returns the user that was removed on success, or the current user if 614 * another user currently owns the buffer. 615 */ 616 void *dmu_buf_remove_user(dmu_buf_t *db, dmu_buf_user_t *user); 617 618 /* 619 * Returns the user data (dmu_buf_user_t *) associated with this dbuf. 620 */ 621 void *dmu_buf_get_user(dmu_buf_t *db); 622 623 /* Block until any in-progress dmu buf user evictions complete. */ 624 void dmu_buf_user_evict_wait(void); 625 626 /* 627 * Returns the blkptr associated with this dbuf, or NULL if not set. 628 */ 629 struct blkptr *dmu_buf_get_blkptr(dmu_buf_t *db); 630 631 /* 632 * Indicate that you are going to modify the buffer's data (db_data). 633 * 634 * The transaction (tx) must be assigned to a txg (ie. you've called 635 * dmu_tx_assign()). The buffer's object must be held in the tx 636 * (ie. you've called dmu_tx_hold_object(tx, db->db_object)). 637 */ 638 void dmu_buf_will_dirty(dmu_buf_t *db, dmu_tx_t *tx); 639 640 /* 641 * Tells if the given dbuf is freeable. 642 */ 643 boolean_t dmu_buf_freeable(dmu_buf_t *); 644 645 /* 646 * You must create a transaction, then hold the objects which you will 647 * (or might) modify as part of this transaction. Then you must assign 648 * the transaction to a transaction group. Once the transaction has 649 * been assigned, you can modify buffers which belong to held objects as 650 * part of this transaction. You can't modify buffers before the 651 * transaction has been assigned; you can't modify buffers which don't 652 * belong to objects which this transaction holds; you can't hold 653 * objects once the transaction has been assigned. You may hold an 654 * object which you are going to free (with dmu_object_free()), but you 655 * don't have to. 656 * 657 * You can abort the transaction before it has been assigned. 658 * 659 * Note that you may hold buffers (with dmu_buf_hold) at any time, 660 * regardless of transaction state. 661 */ 662 663 #define DMU_NEW_OBJECT (-1ULL) 664 #define DMU_OBJECT_END (-1ULL) 665 666 dmu_tx_t *dmu_tx_create(objset_t *os); 667 void dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len); 668 void dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, 669 uint64_t len); 670 void dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name); 671 void dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object); 672 void dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object); 673 void dmu_tx_hold_sa(dmu_tx_t *tx, struct sa_handle *hdl, boolean_t may_grow); 674 void dmu_tx_hold_sa_create(dmu_tx_t *tx, int total_size); 675 void dmu_tx_abort(dmu_tx_t *tx); 676 int dmu_tx_assign(dmu_tx_t *tx, enum txg_how txg_how); 677 void dmu_tx_wait(dmu_tx_t *tx); 678 void dmu_tx_commit(dmu_tx_t *tx); 679 void dmu_tx_mark_netfree(dmu_tx_t *tx); 680 681 /* 682 * To register a commit callback, dmu_tx_callback_register() must be called. 683 * 684 * dcb_data is a pointer to caller private data that is passed on as a 685 * callback parameter. The caller is responsible for properly allocating and 686 * freeing it. 687 * 688 * When registering a callback, the transaction must be already created, but 689 * it cannot be committed or aborted. It can be assigned to a txg or not. 690 * 691 * The callback will be called after the transaction has been safely written 692 * to stable storage and will also be called if the dmu_tx is aborted. 693 * If there is any error which prevents the transaction from being committed to 694 * disk, the callback will be called with a value of error != 0. 695 */ 696 typedef void dmu_tx_callback_func_t(void *dcb_data, int error); 697 698 void dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *dcb_func, 699 void *dcb_data); 700 701 /* 702 * Free up the data blocks for a defined range of a file. If size is 703 * -1, the range from offset to end-of-file is freed. 704 */ 705 int dmu_free_range(objset_t *os, uint64_t object, uint64_t offset, 706 uint64_t size, dmu_tx_t *tx); 707 int dmu_free_long_range(objset_t *os, uint64_t object, uint64_t offset, 708 uint64_t size); 709 int dmu_free_long_object(objset_t *os, uint64_t object); 710 711 /* 712 * Convenience functions. 713 * 714 * Canfail routines will return 0 on success, or an errno if there is a 715 * nonrecoverable I/O error. 716 */ 717 #define DMU_READ_PREFETCH 0 /* prefetch */ 718 #define DMU_READ_NO_PREFETCH 1 /* don't prefetch */ 719 int dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 720 void *buf, uint32_t flags); 721 void dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 722 const void *buf, dmu_tx_t *tx); 723 void dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 724 dmu_tx_t *tx); 725 int dmu_read_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size); 726 int dmu_read_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size); 727 int dmu_write_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size, 728 dmu_tx_t *tx); 729 int dmu_write_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size, 730 dmu_tx_t *tx); 731 int dmu_write_pages(objset_t *os, uint64_t object, uint64_t offset, 732 uint64_t size, struct page *pp, dmu_tx_t *tx); 733 struct arc_buf *dmu_request_arcbuf(dmu_buf_t *handle, int size); 734 void dmu_return_arcbuf(struct arc_buf *buf); 735 void dmu_assign_arcbuf(dmu_buf_t *handle, uint64_t offset, struct arc_buf *buf, 736 dmu_tx_t *tx); 737 int dmu_xuio_init(struct xuio *uio, int niov); 738 void dmu_xuio_fini(struct xuio *uio); 739 int dmu_xuio_add(struct xuio *uio, struct arc_buf *abuf, offset_t off, 740 size_t n); 741 int dmu_xuio_cnt(struct xuio *uio); 742 struct arc_buf *dmu_xuio_arcbuf(struct xuio *uio, int i); 743 void dmu_xuio_clear(struct xuio *uio, int i); 744 void xuio_stat_wbuf_copied(); 745 void xuio_stat_wbuf_nocopy(); 746 747 extern int zfs_prefetch_disable; 748 extern int zfs_max_recordsize; 749 750 /* 751 * Asynchronously try to read in the data. 752 */ 753 void dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset, 754 uint64_t len, enum zio_priority pri); 755 756 typedef struct dmu_object_info { 757 /* All sizes are in bytes unless otherwise indicated. */ 758 uint32_t doi_data_block_size; 759 uint32_t doi_metadata_block_size; 760 dmu_object_type_t doi_type; 761 dmu_object_type_t doi_bonus_type; 762 uint64_t doi_bonus_size; 763 uint8_t doi_indirection; /* 2 = dnode->indirect->data */ 764 uint8_t doi_checksum; 765 uint8_t doi_compress; 766 uint8_t doi_nblkptr; 767 uint8_t doi_pad[4]; 768 uint64_t doi_physical_blocks_512; /* data + metadata, 512b blks */ 769 uint64_t doi_max_offset; 770 uint64_t doi_fill_count; /* number of non-empty blocks */ 771 } dmu_object_info_t; 772 773 typedef void arc_byteswap_func_t(void *buf, size_t size); 774 775 typedef struct dmu_object_type_info { 776 dmu_object_byteswap_t ot_byteswap; 777 boolean_t ot_metadata; 778 char *ot_name; 779 } dmu_object_type_info_t; 780 781 typedef struct dmu_object_byteswap_info { 782 arc_byteswap_func_t *ob_func; 783 char *ob_name; 784 } dmu_object_byteswap_info_t; 785 786 extern const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES]; 787 extern const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS]; 788 789 /* 790 * Get information on a DMU object. 791 * 792 * Return 0 on success or ENOENT if object is not allocated. 793 * 794 * If doi is NULL, just indicates whether the object exists. 795 */ 796 int dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi); 797 /* Like dmu_object_info, but faster if you have a held dnode in hand. */ 798 void dmu_object_info_from_dnode(struct dnode *dn, dmu_object_info_t *doi); 799 /* Like dmu_object_info, but faster if you have a held dbuf in hand. */ 800 void dmu_object_info_from_db(dmu_buf_t *db, dmu_object_info_t *doi); 801 /* 802 * Like dmu_object_info_from_db, but faster still when you only care about 803 * the size. This is specifically optimized for zfs_getattr(). 804 */ 805 void dmu_object_size_from_db(dmu_buf_t *db, uint32_t *blksize, 806 u_longlong_t *nblk512); 807 808 typedef struct dmu_objset_stats { 809 uint64_t dds_num_clones; /* number of clones of this */ 810 uint64_t dds_creation_txg; 811 uint64_t dds_guid; 812 dmu_objset_type_t dds_type; 813 uint8_t dds_is_snapshot; 814 uint8_t dds_inconsistent; 815 char dds_origin[ZFS_MAX_DATASET_NAME_LEN]; 816 } dmu_objset_stats_t; 817 818 /* 819 * Get stats on a dataset. 820 */ 821 void dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat); 822 823 /* 824 * Add entries to the nvlist for all the objset's properties. See 825 * zfs_prop_table[] and zfs(1m) for details on the properties. 826 */ 827 void dmu_objset_stats(objset_t *os, struct nvlist *nv); 828 829 /* 830 * Get the space usage statistics for statvfs(). 831 * 832 * refdbytes is the amount of space "referenced" by this objset. 833 * availbytes is the amount of space available to this objset, taking 834 * into account quotas & reservations, assuming that no other objsets 835 * use the space first. These values correspond to the 'referenced' and 836 * 'available' properties, described in the zfs(1m) manpage. 837 * 838 * usedobjs and availobjs are the number of objects currently allocated, 839 * and available. 840 */ 841 void dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp, 842 uint64_t *usedobjsp, uint64_t *availobjsp); 843 844 /* 845 * The fsid_guid is a 56-bit ID that can change to avoid collisions. 846 * (Contrast with the ds_guid which is a 64-bit ID that will never 847 * change, so there is a small probability that it will collide.) 848 */ 849 uint64_t dmu_objset_fsid_guid(objset_t *os); 850 851 /* 852 * Get the [cm]time for an objset's snapshot dir 853 */ 854 timestruc_t dmu_objset_snap_cmtime(objset_t *os); 855 856 int dmu_objset_is_snapshot(objset_t *os); 857 858 extern struct spa *dmu_objset_spa(objset_t *os); 859 extern struct zilog *dmu_objset_zil(objset_t *os); 860 extern struct dsl_pool *dmu_objset_pool(objset_t *os); 861 extern struct dsl_dataset *dmu_objset_ds(objset_t *os); 862 extern void dmu_objset_name(objset_t *os, char *buf); 863 extern dmu_objset_type_t dmu_objset_type(objset_t *os); 864 extern uint64_t dmu_objset_id(objset_t *os); 865 extern zfs_sync_type_t dmu_objset_syncprop(objset_t *os); 866 extern zfs_logbias_op_t dmu_objset_logbias(objset_t *os); 867 extern int dmu_snapshot_list_next(objset_t *os, int namelen, char *name, 868 uint64_t *id, uint64_t *offp, boolean_t *case_conflict); 869 extern int dmu_snapshot_realname(objset_t *os, char *name, char *real, 870 int maxlen, boolean_t *conflict); 871 extern int dmu_dir_list_next(objset_t *os, int namelen, char *name, 872 uint64_t *idp, uint64_t *offp); 873 874 typedef int objset_used_cb_t(dmu_object_type_t bonustype, 875 void *bonus, uint64_t *userp, uint64_t *groupp); 876 extern void dmu_objset_register_type(dmu_objset_type_t ost, 877 objset_used_cb_t *cb); 878 extern void dmu_objset_set_user(objset_t *os, void *user_ptr); 879 extern void *dmu_objset_get_user(objset_t *os); 880 881 /* 882 * Return the txg number for the given assigned transaction. 883 */ 884 uint64_t dmu_tx_get_txg(dmu_tx_t *tx); 885 886 /* 887 * Synchronous write. 888 * If a parent zio is provided this function initiates a write on the 889 * provided buffer as a child of the parent zio. 890 * In the absence of a parent zio, the write is completed synchronously. 891 * At write completion, blk is filled with the bp of the written block. 892 * Note that while the data covered by this function will be on stable 893 * storage when the write completes this new data does not become a 894 * permanent part of the file until the associated transaction commits. 895 */ 896 897 /* 898 * {zfs,zvol,ztest}_get_done() args 899 */ 900 typedef struct zgd { 901 struct zilog *zgd_zilog; 902 struct blkptr *zgd_bp; 903 dmu_buf_t *zgd_db; 904 struct rl *zgd_rl; 905 void *zgd_private; 906 } zgd_t; 907 908 typedef void dmu_sync_cb_t(zgd_t *arg, int error); 909 int dmu_sync(struct zio *zio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd); 910 911 /* 912 * Find the next hole or data block in file starting at *off 913 * Return found offset in *off. Return ESRCH for end of file. 914 */ 915 int dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, 916 uint64_t *off); 917 918 /* 919 * Check if a DMU object has any dirty blocks. If so, sync out 920 * all pending transaction groups. Otherwise, this function 921 * does not alter DMU state. This could be improved to only sync 922 * out the necessary transaction groups for this particular 923 * object. 924 */ 925 int dmu_object_wait_synced(objset_t *os, uint64_t object); 926 927 /* 928 * Initial setup and final teardown. 929 */ 930 extern void dmu_init(void); 931 extern void dmu_fini(void); 932 933 typedef void (*dmu_traverse_cb_t)(objset_t *os, void *arg, struct blkptr *bp, 934 uint64_t object, uint64_t offset, int len); 935 void dmu_traverse_objset(objset_t *os, uint64_t txg_start, 936 dmu_traverse_cb_t cb, void *arg); 937 938 int dmu_diff(const char *tosnap_name, const char *fromsnap_name, 939 struct vnode *vp, offset_t *offp); 940 941 /* CRC64 table */ 942 #define ZFS_CRC64_POLY 0xC96C5795D7870F42ULL /* ECMA-182, reflected form */ 943 extern uint64_t zfs_crc64_table[256]; 944 945 extern int zfs_mdcomp_disable; 946 947 #ifdef __cplusplus 948 } 949 #endif 950 951 #endif /* _SYS_DMU_H */ 952