1eda14cbcSMatt Macy /* 2eda14cbcSMatt Macy * CDDL HEADER START 3eda14cbcSMatt Macy * 4eda14cbcSMatt Macy * The contents of this file are subject to the terms of the 5eda14cbcSMatt Macy * Common Development and Distribution License (the "License"). 6eda14cbcSMatt Macy * You may not use this file except in compliance with the License. 7eda14cbcSMatt Macy * 8eda14cbcSMatt Macy * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9271171e0SMartin Matuska * or https://opensource.org/licenses/CDDL-1.0. 10eda14cbcSMatt Macy * See the License for the specific language governing permissions 11eda14cbcSMatt Macy * and limitations under the License. 12eda14cbcSMatt Macy * 13eda14cbcSMatt Macy * When distributing Covered Code, include this CDDL HEADER in each 14eda14cbcSMatt Macy * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15eda14cbcSMatt Macy * If applicable, add the following below this CDDL HEADER, with the 16eda14cbcSMatt Macy * fields enclosed by brackets "[]" replaced with your own identifying 17eda14cbcSMatt Macy * information: Portions Copyright [yyyy] [name of copyright owner] 18eda14cbcSMatt Macy * 19eda14cbcSMatt Macy * CDDL HEADER END 20eda14cbcSMatt Macy */ 21eda14cbcSMatt Macy /* 22eda14cbcSMatt Macy * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23eda14cbcSMatt Macy * Copyright (c) 2011, 2020 by Delphix. All rights reserved. 24eda14cbcSMatt Macy * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. 25eda14cbcSMatt Macy * Copyright (c) 2013, Joyent, Inc. All rights reserved. 26eda14cbcSMatt Macy * Copyright (c) 2016, Nexenta Systems, Inc. All rights reserved. 27eda14cbcSMatt Macy * Copyright (c) 2015 by Chunwei Chen. All rights reserved. 28eda14cbcSMatt Macy * Copyright (c) 2019 Datto Inc. 29eda14cbcSMatt Macy * Copyright (c) 2019, Klara Inc. 30eda14cbcSMatt Macy * Copyright (c) 2019, Allan Jude 31dbd5678dSMartin Matuska * Copyright (c) 2022 Hewlett Packard Enterprise Development LP. 322a58b312SMartin Matuska * Copyright (c) 2021, 2022 by Pawel Jakub Dawidek 33eda14cbcSMatt Macy */ 34eda14cbcSMatt Macy 35eda14cbcSMatt Macy #include <sys/dmu.h> 36eda14cbcSMatt Macy #include <sys/dmu_impl.h> 37eda14cbcSMatt Macy #include <sys/dmu_tx.h> 38eda14cbcSMatt Macy #include <sys/dbuf.h> 39eda14cbcSMatt Macy #include <sys/dnode.h> 40eda14cbcSMatt Macy #include <sys/zfs_context.h> 41eda14cbcSMatt Macy #include <sys/dmu_objset.h> 42eda14cbcSMatt Macy #include <sys/dmu_traverse.h> 43eda14cbcSMatt Macy #include <sys/dsl_dataset.h> 44eda14cbcSMatt Macy #include <sys/dsl_dir.h> 45eda14cbcSMatt Macy #include <sys/dsl_pool.h> 46eda14cbcSMatt Macy #include <sys/dsl_synctask.h> 47eda14cbcSMatt Macy #include <sys/dsl_prop.h> 48eda14cbcSMatt Macy #include <sys/dmu_zfetch.h> 49eda14cbcSMatt Macy #include <sys/zfs_ioctl.h> 50eda14cbcSMatt Macy #include <sys/zap.h> 51eda14cbcSMatt Macy #include <sys/zio_checksum.h> 52eda14cbcSMatt Macy #include <sys/zio_compress.h> 53eda14cbcSMatt Macy #include <sys/sa.h> 54eda14cbcSMatt Macy #include <sys/zfeature.h> 55eda14cbcSMatt Macy #include <sys/abd.h> 562a58b312SMartin Matuska #include <sys/brt.h> 57eda14cbcSMatt Macy #include <sys/trace_zfs.h> 58ba27dd8bSMartin Matuska #include <sys/zfs_racct.h> 59eda14cbcSMatt Macy #include <sys/zfs_rlock.h> 60eda14cbcSMatt Macy #ifdef _KERNEL 61eda14cbcSMatt Macy #include <sys/vmsystm.h> 62eda14cbcSMatt Macy #include <sys/zfs_znode.h> 63eda14cbcSMatt Macy #endif 64eda14cbcSMatt Macy 65eda14cbcSMatt Macy /* 66eda14cbcSMatt Macy * Enable/disable nopwrite feature. 67eda14cbcSMatt Macy */ 68e92ffd9bSMartin Matuska static int zfs_nopwrite_enabled = 1; 69eda14cbcSMatt Macy 70eda14cbcSMatt Macy /* 71eda14cbcSMatt Macy * Tunable to control percentage of dirtied L1 blocks from frees allowed into 72eda14cbcSMatt Macy * one TXG. After this threshold is crossed, additional dirty blocks from frees 73eda14cbcSMatt Macy * will wait until the next TXG. 74eda14cbcSMatt Macy * A value of zero will disable this throttle. 75eda14cbcSMatt Macy */ 76dbd5678dSMartin Matuska static uint_t zfs_per_txg_dirty_frees_percent = 30; 77eda14cbcSMatt Macy 78eda14cbcSMatt Macy /* 79681ce946SMartin Matuska * Enable/disable forcing txg sync when dirty checking for holes with lseek(). 80681ce946SMartin Matuska * By default this is enabled to ensure accurate hole reporting, it can result 81681ce946SMartin Matuska * in a significant performance penalty for lseek(SEEK_HOLE) heavy workloads. 82681ce946SMartin Matuska * Disabling this option will result in holes never being reported in dirty 83681ce946SMartin Matuska * files which is always safe. 84eda14cbcSMatt Macy */ 85e92ffd9bSMartin Matuska static int zfs_dmu_offset_next_sync = 1; 86eda14cbcSMatt Macy 87eda14cbcSMatt Macy /* 88eda14cbcSMatt Macy * Limit the amount we can prefetch with one call to this amount. This 89eda14cbcSMatt Macy * helps to limit the amount of memory that can be used by prefetching. 90eda14cbcSMatt Macy * Larger objects should be prefetched a bit at a time. 91eda14cbcSMatt Macy */ 92be181ee2SMartin Matuska uint_t dmu_prefetch_max = 8 * SPA_MAXBLOCKSIZE; 93eda14cbcSMatt Macy 94eda14cbcSMatt Macy const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = { 95eda14cbcSMatt Macy {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "unallocated" }, 96eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "object directory" }, 97eda14cbcSMatt Macy {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "object array" }, 98eda14cbcSMatt Macy {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "packed nvlist" }, 99eda14cbcSMatt Macy {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "packed nvlist size" }, 100eda14cbcSMatt Macy {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj" }, 101eda14cbcSMatt Macy {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj header" }, 102eda14cbcSMatt Macy {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map header" }, 103eda14cbcSMatt Macy {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map" }, 104eda14cbcSMatt Macy {DMU_BSWAP_UINT64, TRUE, FALSE, TRUE, "ZIL intent log" }, 105eda14cbcSMatt Macy {DMU_BSWAP_DNODE, TRUE, FALSE, TRUE, "DMU dnode" }, 106eda14cbcSMatt Macy {DMU_BSWAP_OBJSET, TRUE, TRUE, FALSE, "DMU objset" }, 107eda14cbcSMatt Macy {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL directory" }, 108eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL directory child map"}, 109eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset snap map" }, 110eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL props" }, 111eda14cbcSMatt Macy {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL dataset" }, 112eda14cbcSMatt Macy {DMU_BSWAP_ZNODE, TRUE, FALSE, FALSE, "ZFS znode" }, 113eda14cbcSMatt Macy {DMU_BSWAP_OLDACL, TRUE, FALSE, TRUE, "ZFS V0 ACL" }, 114eda14cbcSMatt Macy {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "ZFS plain file" }, 115eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS directory" }, 116eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "ZFS master node" }, 117eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS delete queue" }, 118eda14cbcSMatt Macy {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "zvol object" }, 119eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "zvol prop" }, 120eda14cbcSMatt Macy {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "other uint8[]" }, 121eda14cbcSMatt Macy {DMU_BSWAP_UINT64, FALSE, FALSE, TRUE, "other uint64[]" }, 122eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "other ZAP" }, 123eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "persistent error log" }, 124eda14cbcSMatt Macy {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "SPA history" }, 125eda14cbcSMatt Macy {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA history offsets" }, 126eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "Pool properties" }, 127eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL permissions" }, 128eda14cbcSMatt Macy {DMU_BSWAP_ACL, TRUE, FALSE, TRUE, "ZFS ACL" }, 129eda14cbcSMatt Macy {DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "ZFS SYSACL" }, 130eda14cbcSMatt Macy {DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "FUID table" }, 131eda14cbcSMatt Macy {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "FUID table size" }, 132eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset next clones"}, 133eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan work queue" }, 134eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/project used" }, 135eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/project quota"}, 136eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "snapshot refcount tags"}, 137eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT ZAP algorithm" }, 138eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT statistics" }, 139eda14cbcSMatt Macy {DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "System attributes" }, 140eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA master node" }, 141eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr registration" }, 142eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr layouts" }, 143eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan translations" }, 144eda14cbcSMatt Macy {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "deduplicated block" }, 145eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL deadlist map" }, 146eda14cbcSMatt Macy {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL deadlist map hdr" }, 147eda14cbcSMatt Macy {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dir clones" }, 148eda14cbcSMatt Macy {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj subobj" } 149eda14cbcSMatt Macy }; 150eda14cbcSMatt Macy 151be181ee2SMartin Matuska dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = { 152eda14cbcSMatt Macy { byteswap_uint8_array, "uint8" }, 153eda14cbcSMatt Macy { byteswap_uint16_array, "uint16" }, 154eda14cbcSMatt Macy { byteswap_uint32_array, "uint32" }, 155eda14cbcSMatt Macy { byteswap_uint64_array, "uint64" }, 156eda14cbcSMatt Macy { zap_byteswap, "zap" }, 157eda14cbcSMatt Macy { dnode_buf_byteswap, "dnode" }, 158eda14cbcSMatt Macy { dmu_objset_byteswap, "objset" }, 159eda14cbcSMatt Macy { zfs_znode_byteswap, "znode" }, 160eda14cbcSMatt Macy { zfs_oldacl_byteswap, "oldacl" }, 161eda14cbcSMatt Macy { zfs_acl_byteswap, "acl" } 162eda14cbcSMatt Macy }; 163eda14cbcSMatt Macy 164eda14cbcSMatt Macy static int 165eda14cbcSMatt Macy dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset, 166a0b956f5SMartin Matuska const void *tag, dmu_buf_t **dbp) 167eda14cbcSMatt Macy { 168eda14cbcSMatt Macy uint64_t blkid; 169eda14cbcSMatt Macy dmu_buf_impl_t *db; 170eda14cbcSMatt Macy 171eda14cbcSMatt Macy rw_enter(&dn->dn_struct_rwlock, RW_READER); 172eda14cbcSMatt Macy blkid = dbuf_whichblock(dn, 0, offset); 173eda14cbcSMatt Macy db = dbuf_hold(dn, blkid, tag); 174eda14cbcSMatt Macy rw_exit(&dn->dn_struct_rwlock); 175eda14cbcSMatt Macy 176eda14cbcSMatt Macy if (db == NULL) { 177eda14cbcSMatt Macy *dbp = NULL; 178eda14cbcSMatt Macy return (SET_ERROR(EIO)); 179eda14cbcSMatt Macy } 180eda14cbcSMatt Macy 181eda14cbcSMatt Macy *dbp = &db->db; 182eda14cbcSMatt Macy return (0); 183eda14cbcSMatt Macy } 184eda14cbcSMatt Macy int 185eda14cbcSMatt Macy dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset, 186a0b956f5SMartin Matuska const void *tag, dmu_buf_t **dbp) 187eda14cbcSMatt Macy { 188eda14cbcSMatt Macy dnode_t *dn; 189eda14cbcSMatt Macy uint64_t blkid; 190eda14cbcSMatt Macy dmu_buf_impl_t *db; 191eda14cbcSMatt Macy int err; 192eda14cbcSMatt Macy 193eda14cbcSMatt Macy err = dnode_hold(os, object, FTAG, &dn); 194eda14cbcSMatt Macy if (err) 195eda14cbcSMatt Macy return (err); 196eda14cbcSMatt Macy rw_enter(&dn->dn_struct_rwlock, RW_READER); 197eda14cbcSMatt Macy blkid = dbuf_whichblock(dn, 0, offset); 198eda14cbcSMatt Macy db = dbuf_hold(dn, blkid, tag); 199eda14cbcSMatt Macy rw_exit(&dn->dn_struct_rwlock); 200eda14cbcSMatt Macy dnode_rele(dn, FTAG); 201eda14cbcSMatt Macy 202eda14cbcSMatt Macy if (db == NULL) { 203eda14cbcSMatt Macy *dbp = NULL; 204eda14cbcSMatt Macy return (SET_ERROR(EIO)); 205eda14cbcSMatt Macy } 206eda14cbcSMatt Macy 207eda14cbcSMatt Macy *dbp = &db->db; 208eda14cbcSMatt Macy return (err); 209eda14cbcSMatt Macy } 210eda14cbcSMatt Macy 211eda14cbcSMatt Macy int 212eda14cbcSMatt Macy dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset, 213a0b956f5SMartin Matuska const void *tag, dmu_buf_t **dbp, int flags) 214eda14cbcSMatt Macy { 215eda14cbcSMatt Macy int err; 216eda14cbcSMatt Macy int db_flags = DB_RF_CANFAIL; 217eda14cbcSMatt Macy 218eda14cbcSMatt Macy if (flags & DMU_READ_NO_PREFETCH) 219eda14cbcSMatt Macy db_flags |= DB_RF_NOPREFETCH; 220eda14cbcSMatt Macy if (flags & DMU_READ_NO_DECRYPT) 221eda14cbcSMatt Macy db_flags |= DB_RF_NO_DECRYPT; 222eda14cbcSMatt Macy 223eda14cbcSMatt Macy err = dmu_buf_hold_noread_by_dnode(dn, offset, tag, dbp); 224eda14cbcSMatt Macy if (err == 0) { 225eda14cbcSMatt Macy dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp); 226eda14cbcSMatt Macy err = dbuf_read(db, NULL, db_flags); 227eda14cbcSMatt Macy if (err != 0) { 228eda14cbcSMatt Macy dbuf_rele(db, tag); 229eda14cbcSMatt Macy *dbp = NULL; 230eda14cbcSMatt Macy } 231eda14cbcSMatt Macy } 232eda14cbcSMatt Macy 233eda14cbcSMatt Macy return (err); 234eda14cbcSMatt Macy } 235eda14cbcSMatt Macy 236eda14cbcSMatt Macy int 237eda14cbcSMatt Macy dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset, 238a0b956f5SMartin Matuska const void *tag, dmu_buf_t **dbp, int flags) 239eda14cbcSMatt Macy { 240eda14cbcSMatt Macy int err; 241eda14cbcSMatt Macy int db_flags = DB_RF_CANFAIL; 242eda14cbcSMatt Macy 243eda14cbcSMatt Macy if (flags & DMU_READ_NO_PREFETCH) 244eda14cbcSMatt Macy db_flags |= DB_RF_NOPREFETCH; 245eda14cbcSMatt Macy if (flags & DMU_READ_NO_DECRYPT) 246eda14cbcSMatt Macy db_flags |= DB_RF_NO_DECRYPT; 247eda14cbcSMatt Macy 248eda14cbcSMatt Macy err = dmu_buf_hold_noread(os, object, offset, tag, dbp); 249eda14cbcSMatt Macy if (err == 0) { 250eda14cbcSMatt Macy dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp); 251eda14cbcSMatt Macy err = dbuf_read(db, NULL, db_flags); 252eda14cbcSMatt Macy if (err != 0) { 253eda14cbcSMatt Macy dbuf_rele(db, tag); 254eda14cbcSMatt Macy *dbp = NULL; 255eda14cbcSMatt Macy } 256eda14cbcSMatt Macy } 257eda14cbcSMatt Macy 258eda14cbcSMatt Macy return (err); 259eda14cbcSMatt Macy } 260eda14cbcSMatt Macy 261eda14cbcSMatt Macy int 262eda14cbcSMatt Macy dmu_bonus_max(void) 263eda14cbcSMatt Macy { 264eda14cbcSMatt Macy return (DN_OLD_MAX_BONUSLEN); 265eda14cbcSMatt Macy } 266eda14cbcSMatt Macy 267eda14cbcSMatt Macy int 268eda14cbcSMatt Macy dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx) 269eda14cbcSMatt Macy { 270eda14cbcSMatt Macy dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 271eda14cbcSMatt Macy dnode_t *dn; 272eda14cbcSMatt Macy int error; 273eda14cbcSMatt Macy 274eda14cbcSMatt Macy DB_DNODE_ENTER(db); 275eda14cbcSMatt Macy dn = DB_DNODE(db); 276eda14cbcSMatt Macy 277eda14cbcSMatt Macy if (dn->dn_bonus != db) { 278eda14cbcSMatt Macy error = SET_ERROR(EINVAL); 279eda14cbcSMatt Macy } else if (newsize < 0 || newsize > db_fake->db_size) { 280eda14cbcSMatt Macy error = SET_ERROR(EINVAL); 281eda14cbcSMatt Macy } else { 282eda14cbcSMatt Macy dnode_setbonuslen(dn, newsize, tx); 283eda14cbcSMatt Macy error = 0; 284eda14cbcSMatt Macy } 285eda14cbcSMatt Macy 286eda14cbcSMatt Macy DB_DNODE_EXIT(db); 287eda14cbcSMatt Macy return (error); 288eda14cbcSMatt Macy } 289eda14cbcSMatt Macy 290eda14cbcSMatt Macy int 291eda14cbcSMatt Macy dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx) 292eda14cbcSMatt Macy { 293eda14cbcSMatt Macy dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 294eda14cbcSMatt Macy dnode_t *dn; 295eda14cbcSMatt Macy int error; 296eda14cbcSMatt Macy 297eda14cbcSMatt Macy DB_DNODE_ENTER(db); 298eda14cbcSMatt Macy dn = DB_DNODE(db); 299eda14cbcSMatt Macy 300eda14cbcSMatt Macy if (!DMU_OT_IS_VALID(type)) { 301eda14cbcSMatt Macy error = SET_ERROR(EINVAL); 302eda14cbcSMatt Macy } else if (dn->dn_bonus != db) { 303eda14cbcSMatt Macy error = SET_ERROR(EINVAL); 304eda14cbcSMatt Macy } else { 305eda14cbcSMatt Macy dnode_setbonus_type(dn, type, tx); 306eda14cbcSMatt Macy error = 0; 307eda14cbcSMatt Macy } 308eda14cbcSMatt Macy 309eda14cbcSMatt Macy DB_DNODE_EXIT(db); 310eda14cbcSMatt Macy return (error); 311eda14cbcSMatt Macy } 312eda14cbcSMatt Macy 313eda14cbcSMatt Macy dmu_object_type_t 314eda14cbcSMatt Macy dmu_get_bonustype(dmu_buf_t *db_fake) 315eda14cbcSMatt Macy { 316eda14cbcSMatt Macy dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 317eda14cbcSMatt Macy dnode_t *dn; 318eda14cbcSMatt Macy dmu_object_type_t type; 319eda14cbcSMatt Macy 320eda14cbcSMatt Macy DB_DNODE_ENTER(db); 321eda14cbcSMatt Macy dn = DB_DNODE(db); 322eda14cbcSMatt Macy type = dn->dn_bonustype; 323eda14cbcSMatt Macy DB_DNODE_EXIT(db); 324eda14cbcSMatt Macy 325eda14cbcSMatt Macy return (type); 326eda14cbcSMatt Macy } 327eda14cbcSMatt Macy 328eda14cbcSMatt Macy int 329eda14cbcSMatt Macy dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx) 330eda14cbcSMatt Macy { 331eda14cbcSMatt Macy dnode_t *dn; 332eda14cbcSMatt Macy int error; 333eda14cbcSMatt Macy 334eda14cbcSMatt Macy error = dnode_hold(os, object, FTAG, &dn); 335eda14cbcSMatt Macy dbuf_rm_spill(dn, tx); 336eda14cbcSMatt Macy rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 337eda14cbcSMatt Macy dnode_rm_spill(dn, tx); 338eda14cbcSMatt Macy rw_exit(&dn->dn_struct_rwlock); 339eda14cbcSMatt Macy dnode_rele(dn, FTAG); 340eda14cbcSMatt Macy return (error); 341eda14cbcSMatt Macy } 342eda14cbcSMatt Macy 343eda14cbcSMatt Macy /* 344eda14cbcSMatt Macy * Lookup and hold the bonus buffer for the provided dnode. If the dnode 345eda14cbcSMatt Macy * has not yet been allocated a new bonus dbuf a will be allocated. 346eda14cbcSMatt Macy * Returns ENOENT, EIO, or 0. 347eda14cbcSMatt Macy */ 348a0b956f5SMartin Matuska int dmu_bonus_hold_by_dnode(dnode_t *dn, const void *tag, dmu_buf_t **dbp, 349eda14cbcSMatt Macy uint32_t flags) 350eda14cbcSMatt Macy { 351eda14cbcSMatt Macy dmu_buf_impl_t *db; 352eda14cbcSMatt Macy int error; 353eda14cbcSMatt Macy uint32_t db_flags = DB_RF_MUST_SUCCEED; 354eda14cbcSMatt Macy 355eda14cbcSMatt Macy if (flags & DMU_READ_NO_PREFETCH) 356eda14cbcSMatt Macy db_flags |= DB_RF_NOPREFETCH; 357eda14cbcSMatt Macy if (flags & DMU_READ_NO_DECRYPT) 358eda14cbcSMatt Macy db_flags |= DB_RF_NO_DECRYPT; 359eda14cbcSMatt Macy 360eda14cbcSMatt Macy rw_enter(&dn->dn_struct_rwlock, RW_READER); 361eda14cbcSMatt Macy if (dn->dn_bonus == NULL) { 3622a58b312SMartin Matuska if (!rw_tryupgrade(&dn->dn_struct_rwlock)) { 363eda14cbcSMatt Macy rw_exit(&dn->dn_struct_rwlock); 364eda14cbcSMatt Macy rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 3652a58b312SMartin Matuska } 366eda14cbcSMatt Macy if (dn->dn_bonus == NULL) 367eda14cbcSMatt Macy dbuf_create_bonus(dn); 368eda14cbcSMatt Macy } 369eda14cbcSMatt Macy db = dn->dn_bonus; 370eda14cbcSMatt Macy 371eda14cbcSMatt Macy /* as long as the bonus buf is held, the dnode will be held */ 372eda14cbcSMatt Macy if (zfs_refcount_add(&db->db_holds, tag) == 1) { 373eda14cbcSMatt Macy VERIFY(dnode_add_ref(dn, db)); 374eda14cbcSMatt Macy atomic_inc_32(&dn->dn_dbufs_count); 375eda14cbcSMatt Macy } 376eda14cbcSMatt Macy 377eda14cbcSMatt Macy /* 378eda14cbcSMatt Macy * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's 379eda14cbcSMatt Macy * hold and incrementing the dbuf count to ensure that dnode_move() sees 380eda14cbcSMatt Macy * a dnode hold for every dbuf. 381eda14cbcSMatt Macy */ 382eda14cbcSMatt Macy rw_exit(&dn->dn_struct_rwlock); 383eda14cbcSMatt Macy 384eda14cbcSMatt Macy error = dbuf_read(db, NULL, db_flags); 385eda14cbcSMatt Macy if (error) { 386eda14cbcSMatt Macy dnode_evict_bonus(dn); 387eda14cbcSMatt Macy dbuf_rele(db, tag); 388eda14cbcSMatt Macy *dbp = NULL; 389eda14cbcSMatt Macy return (error); 390eda14cbcSMatt Macy } 391eda14cbcSMatt Macy 392eda14cbcSMatt Macy *dbp = &db->db; 393eda14cbcSMatt Macy return (0); 394eda14cbcSMatt Macy } 395eda14cbcSMatt Macy 396eda14cbcSMatt Macy int 397a0b956f5SMartin Matuska dmu_bonus_hold(objset_t *os, uint64_t object, const void *tag, dmu_buf_t **dbp) 398eda14cbcSMatt Macy { 399eda14cbcSMatt Macy dnode_t *dn; 400eda14cbcSMatt Macy int error; 401eda14cbcSMatt Macy 402eda14cbcSMatt Macy error = dnode_hold(os, object, FTAG, &dn); 403eda14cbcSMatt Macy if (error) 404eda14cbcSMatt Macy return (error); 405eda14cbcSMatt Macy 406eda14cbcSMatt Macy error = dmu_bonus_hold_by_dnode(dn, tag, dbp, DMU_READ_NO_PREFETCH); 407eda14cbcSMatt Macy dnode_rele(dn, FTAG); 408eda14cbcSMatt Macy 409eda14cbcSMatt Macy return (error); 410eda14cbcSMatt Macy } 411eda14cbcSMatt Macy 412eda14cbcSMatt Macy /* 413eda14cbcSMatt Macy * returns ENOENT, EIO, or 0. 414eda14cbcSMatt Macy * 415eda14cbcSMatt Macy * This interface will allocate a blank spill dbuf when a spill blk 416eda14cbcSMatt Macy * doesn't already exist on the dnode. 417eda14cbcSMatt Macy * 418eda14cbcSMatt Macy * if you only want to find an already existing spill db, then 419eda14cbcSMatt Macy * dmu_spill_hold_existing() should be used. 420eda14cbcSMatt Macy */ 421eda14cbcSMatt Macy int 422a0b956f5SMartin Matuska dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, const void *tag, 423a0b956f5SMartin Matuska dmu_buf_t **dbp) 424eda14cbcSMatt Macy { 425eda14cbcSMatt Macy dmu_buf_impl_t *db = NULL; 426eda14cbcSMatt Macy int err; 427eda14cbcSMatt Macy 428eda14cbcSMatt Macy if ((flags & DB_RF_HAVESTRUCT) == 0) 429eda14cbcSMatt Macy rw_enter(&dn->dn_struct_rwlock, RW_READER); 430eda14cbcSMatt Macy 431eda14cbcSMatt Macy db = dbuf_hold(dn, DMU_SPILL_BLKID, tag); 432eda14cbcSMatt Macy 433eda14cbcSMatt Macy if ((flags & DB_RF_HAVESTRUCT) == 0) 434eda14cbcSMatt Macy rw_exit(&dn->dn_struct_rwlock); 435eda14cbcSMatt Macy 436eda14cbcSMatt Macy if (db == NULL) { 437eda14cbcSMatt Macy *dbp = NULL; 438eda14cbcSMatt Macy return (SET_ERROR(EIO)); 439eda14cbcSMatt Macy } 440eda14cbcSMatt Macy err = dbuf_read(db, NULL, flags); 441eda14cbcSMatt Macy if (err == 0) 442eda14cbcSMatt Macy *dbp = &db->db; 443eda14cbcSMatt Macy else { 444eda14cbcSMatt Macy dbuf_rele(db, tag); 445eda14cbcSMatt Macy *dbp = NULL; 446eda14cbcSMatt Macy } 447eda14cbcSMatt Macy return (err); 448eda14cbcSMatt Macy } 449eda14cbcSMatt Macy 450eda14cbcSMatt Macy int 451a0b956f5SMartin Matuska dmu_spill_hold_existing(dmu_buf_t *bonus, const void *tag, dmu_buf_t **dbp) 452eda14cbcSMatt Macy { 453eda14cbcSMatt Macy dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus; 454eda14cbcSMatt Macy dnode_t *dn; 455eda14cbcSMatt Macy int err; 456eda14cbcSMatt Macy 457eda14cbcSMatt Macy DB_DNODE_ENTER(db); 458eda14cbcSMatt Macy dn = DB_DNODE(db); 459eda14cbcSMatt Macy 460eda14cbcSMatt Macy if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) { 461eda14cbcSMatt Macy err = SET_ERROR(EINVAL); 462eda14cbcSMatt Macy } else { 463eda14cbcSMatt Macy rw_enter(&dn->dn_struct_rwlock, RW_READER); 464eda14cbcSMatt Macy 465eda14cbcSMatt Macy if (!dn->dn_have_spill) { 466eda14cbcSMatt Macy err = SET_ERROR(ENOENT); 467eda14cbcSMatt Macy } else { 468eda14cbcSMatt Macy err = dmu_spill_hold_by_dnode(dn, 469eda14cbcSMatt Macy DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp); 470eda14cbcSMatt Macy } 471eda14cbcSMatt Macy 472eda14cbcSMatt Macy rw_exit(&dn->dn_struct_rwlock); 473eda14cbcSMatt Macy } 474eda14cbcSMatt Macy 475eda14cbcSMatt Macy DB_DNODE_EXIT(db); 476eda14cbcSMatt Macy return (err); 477eda14cbcSMatt Macy } 478eda14cbcSMatt Macy 479eda14cbcSMatt Macy int 480a0b956f5SMartin Matuska dmu_spill_hold_by_bonus(dmu_buf_t *bonus, uint32_t flags, const void *tag, 481eda14cbcSMatt Macy dmu_buf_t **dbp) 482eda14cbcSMatt Macy { 483eda14cbcSMatt Macy dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus; 484eda14cbcSMatt Macy dnode_t *dn; 485eda14cbcSMatt Macy int err; 486eda14cbcSMatt Macy uint32_t db_flags = DB_RF_CANFAIL; 487eda14cbcSMatt Macy 488eda14cbcSMatt Macy if (flags & DMU_READ_NO_DECRYPT) 489eda14cbcSMatt Macy db_flags |= DB_RF_NO_DECRYPT; 490eda14cbcSMatt Macy 491eda14cbcSMatt Macy DB_DNODE_ENTER(db); 492eda14cbcSMatt Macy dn = DB_DNODE(db); 493eda14cbcSMatt Macy err = dmu_spill_hold_by_dnode(dn, db_flags, tag, dbp); 494eda14cbcSMatt Macy DB_DNODE_EXIT(db); 495eda14cbcSMatt Macy 496eda14cbcSMatt Macy return (err); 497eda14cbcSMatt Macy } 498eda14cbcSMatt Macy 499eda14cbcSMatt Macy /* 500eda14cbcSMatt Macy * Note: longer-term, we should modify all of the dmu_buf_*() interfaces 501eda14cbcSMatt Macy * to take a held dnode rather than <os, object> -- the lookup is wasteful, 502eda14cbcSMatt Macy * and can induce severe lock contention when writing to several files 503eda14cbcSMatt Macy * whose dnodes are in the same block. 504eda14cbcSMatt Macy */ 505eda14cbcSMatt Macy int 506eda14cbcSMatt Macy dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length, 507a0b956f5SMartin Matuska boolean_t read, const void *tag, int *numbufsp, dmu_buf_t ***dbpp, 508a0b956f5SMartin Matuska uint32_t flags) 509eda14cbcSMatt Macy { 510eda14cbcSMatt Macy dmu_buf_t **dbp; 511f9693befSMartin Matuska zstream_t *zs = NULL; 512eda14cbcSMatt Macy uint64_t blkid, nblks, i; 513eda14cbcSMatt Macy uint32_t dbuf_flags; 514eda14cbcSMatt Macy int err; 5157877fdebSMatt Macy zio_t *zio = NULL; 516f9693befSMartin Matuska boolean_t missed = B_FALSE; 517eda14cbcSMatt Macy 5182a58b312SMartin Matuska ASSERT(!read || length <= DMU_MAX_ACCESS); 519eda14cbcSMatt Macy 520eda14cbcSMatt Macy /* 521eda14cbcSMatt Macy * Note: We directly notify the prefetch code of this read, so that 522eda14cbcSMatt Macy * we can tell it about the multi-block read. dbuf_read() only knows 523eda14cbcSMatt Macy * about the one block it is accessing. 524eda14cbcSMatt Macy */ 525eda14cbcSMatt Macy dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT | 526eda14cbcSMatt Macy DB_RF_NOPREFETCH; 527eda14cbcSMatt Macy 528271171e0SMartin Matuska if ((flags & DMU_READ_NO_DECRYPT) != 0) 529271171e0SMartin Matuska dbuf_flags |= DB_RF_NO_DECRYPT; 530271171e0SMartin Matuska 531eda14cbcSMatt Macy rw_enter(&dn->dn_struct_rwlock, RW_READER); 532eda14cbcSMatt Macy if (dn->dn_datablkshift) { 533eda14cbcSMatt Macy int blkshift = dn->dn_datablkshift; 534eda14cbcSMatt Macy nblks = (P2ROUNDUP(offset + length, 1ULL << blkshift) - 535eda14cbcSMatt Macy P2ALIGN(offset, 1ULL << blkshift)) >> blkshift; 536eda14cbcSMatt Macy } else { 537eda14cbcSMatt Macy if (offset + length > dn->dn_datablksz) { 538eda14cbcSMatt Macy zfs_panic_recover("zfs: accessing past end of object " 539eda14cbcSMatt Macy "%llx/%llx (size=%u access=%llu+%llu)", 540eda14cbcSMatt Macy (longlong_t)dn->dn_objset-> 541eda14cbcSMatt Macy os_dsl_dataset->ds_object, 542eda14cbcSMatt Macy (longlong_t)dn->dn_object, dn->dn_datablksz, 543eda14cbcSMatt Macy (longlong_t)offset, (longlong_t)length); 544eda14cbcSMatt Macy rw_exit(&dn->dn_struct_rwlock); 545eda14cbcSMatt Macy return (SET_ERROR(EIO)); 546eda14cbcSMatt Macy } 547eda14cbcSMatt Macy nblks = 1; 548eda14cbcSMatt Macy } 549eda14cbcSMatt Macy dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP); 550eda14cbcSMatt Macy 5517877fdebSMatt Macy if (read) 5527877fdebSMatt Macy zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, 5537877fdebSMatt Macy ZIO_FLAG_CANFAIL); 554eda14cbcSMatt Macy blkid = dbuf_whichblock(dn, 0, offset); 555f9693befSMartin Matuska if ((flags & DMU_READ_NO_PREFETCH) == 0 && 55615f0b8c3SMartin Matuska length <= zfetch_array_rd_sz) { 557f9693befSMartin Matuska /* 558f9693befSMartin Matuska * Prepare the zfetch before initiating the demand reads, so 559f9693befSMartin Matuska * that if multiple threads block on same indirect block, we 560f9693befSMartin Matuska * base predictions on the original less racy request order. 561f9693befSMartin Matuska */ 56215f0b8c3SMartin Matuska zs = dmu_zfetch_prepare(&dn->dn_zfetch, blkid, nblks, read, 56315f0b8c3SMartin Matuska B_TRUE); 564f9693befSMartin Matuska } 565eda14cbcSMatt Macy for (i = 0; i < nblks; i++) { 566eda14cbcSMatt Macy dmu_buf_impl_t *db = dbuf_hold(dn, blkid + i, tag); 567eda14cbcSMatt Macy if (db == NULL) { 568f9693befSMartin Matuska if (zs) 569f9693befSMartin Matuska dmu_zfetch_run(zs, missed, B_TRUE); 570eda14cbcSMatt Macy rw_exit(&dn->dn_struct_rwlock); 571eda14cbcSMatt Macy dmu_buf_rele_array(dbp, nblks, tag); 5727877fdebSMatt Macy if (read) 573eda14cbcSMatt Macy zio_nowait(zio); 574eda14cbcSMatt Macy return (SET_ERROR(EIO)); 575eda14cbcSMatt Macy } 576eda14cbcSMatt Macy 577f9693befSMartin Matuska /* 578f9693befSMartin Matuska * Initiate async demand data read. 579f9693befSMartin Matuska * We check the db_state after calling dbuf_read() because 580f9693befSMartin Matuska * (1) dbuf_read() may change the state to CACHED due to a 581f9693befSMartin Matuska * hit in the ARC, and (2) on a cache miss, a child will 582f9693befSMartin Matuska * have been added to "zio" but not yet completed, so the 583f9693befSMartin Matuska * state will not yet be CACHED. 584f9693befSMartin Matuska */ 585f9693befSMartin Matuska if (read) { 58615f0b8c3SMartin Matuska if (i == nblks - 1 && blkid + i < dn->dn_maxblkid && 58715f0b8c3SMartin Matuska offset + length < db->db.db_offset + 58815f0b8c3SMartin Matuska db->db.db_size) { 58915f0b8c3SMartin Matuska if (offset <= db->db.db_offset) 59015f0b8c3SMartin Matuska dbuf_flags |= DB_RF_PARTIAL_FIRST; 59115f0b8c3SMartin Matuska else 59215f0b8c3SMartin Matuska dbuf_flags |= DB_RF_PARTIAL_MORE; 59315f0b8c3SMartin Matuska } 594eda14cbcSMatt Macy (void) dbuf_read(db, zio, dbuf_flags); 595f9693befSMartin Matuska if (db->db_state != DB_CACHED) 596f9693befSMartin Matuska missed = B_TRUE; 597f9693befSMartin Matuska } 598eda14cbcSMatt Macy dbp[i] = &db->db; 599eda14cbcSMatt Macy } 600eda14cbcSMatt Macy 601ba27dd8bSMartin Matuska if (!read) 602ba27dd8bSMartin Matuska zfs_racct_write(length, nblks); 603ba27dd8bSMartin Matuska 604f9693befSMartin Matuska if (zs) 605f9693befSMartin Matuska dmu_zfetch_run(zs, missed, B_TRUE); 606eda14cbcSMatt Macy rw_exit(&dn->dn_struct_rwlock); 607eda14cbcSMatt Macy 6087877fdebSMatt Macy if (read) { 6097877fdebSMatt Macy /* wait for async read i/o */ 610eda14cbcSMatt Macy err = zio_wait(zio); 611eda14cbcSMatt Macy if (err) { 612eda14cbcSMatt Macy dmu_buf_rele_array(dbp, nblks, tag); 613eda14cbcSMatt Macy return (err); 614eda14cbcSMatt Macy } 615eda14cbcSMatt Macy 616eda14cbcSMatt Macy /* wait for other io to complete */ 617eda14cbcSMatt Macy for (i = 0; i < nblks; i++) { 618eda14cbcSMatt Macy dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i]; 619eda14cbcSMatt Macy mutex_enter(&db->db_mtx); 620eda14cbcSMatt Macy while (db->db_state == DB_READ || 621eda14cbcSMatt Macy db->db_state == DB_FILL) 622eda14cbcSMatt Macy cv_wait(&db->db_changed, &db->db_mtx); 623eda14cbcSMatt Macy if (db->db_state == DB_UNCACHED) 624eda14cbcSMatt Macy err = SET_ERROR(EIO); 625eda14cbcSMatt Macy mutex_exit(&db->db_mtx); 626eda14cbcSMatt Macy if (err) { 627eda14cbcSMatt Macy dmu_buf_rele_array(dbp, nblks, tag); 628eda14cbcSMatt Macy return (err); 629eda14cbcSMatt Macy } 630eda14cbcSMatt Macy } 631eda14cbcSMatt Macy } 632eda14cbcSMatt Macy 633eda14cbcSMatt Macy *numbufsp = nblks; 634eda14cbcSMatt Macy *dbpp = dbp; 635eda14cbcSMatt Macy return (0); 636eda14cbcSMatt Macy } 637eda14cbcSMatt Macy 6386ba2210eSMartin Matuska int 639eda14cbcSMatt Macy dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset, 640a0b956f5SMartin Matuska uint64_t length, int read, const void *tag, int *numbufsp, 641a0b956f5SMartin Matuska dmu_buf_t ***dbpp) 642eda14cbcSMatt Macy { 643eda14cbcSMatt Macy dnode_t *dn; 644eda14cbcSMatt Macy int err; 645eda14cbcSMatt Macy 646eda14cbcSMatt Macy err = dnode_hold(os, object, FTAG, &dn); 647eda14cbcSMatt Macy if (err) 648eda14cbcSMatt Macy return (err); 649eda14cbcSMatt Macy 650eda14cbcSMatt Macy err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 651eda14cbcSMatt Macy numbufsp, dbpp, DMU_READ_PREFETCH); 652eda14cbcSMatt Macy 653eda14cbcSMatt Macy dnode_rele(dn, FTAG); 654eda14cbcSMatt Macy 655eda14cbcSMatt Macy return (err); 656eda14cbcSMatt Macy } 657eda14cbcSMatt Macy 658eda14cbcSMatt Macy int 659eda14cbcSMatt Macy dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset, 660a0b956f5SMartin Matuska uint64_t length, boolean_t read, const void *tag, int *numbufsp, 661eda14cbcSMatt Macy dmu_buf_t ***dbpp) 662eda14cbcSMatt Macy { 663eda14cbcSMatt Macy dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 664eda14cbcSMatt Macy dnode_t *dn; 665eda14cbcSMatt Macy int err; 666eda14cbcSMatt Macy 667eda14cbcSMatt Macy DB_DNODE_ENTER(db); 668eda14cbcSMatt Macy dn = DB_DNODE(db); 669eda14cbcSMatt Macy err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag, 670eda14cbcSMatt Macy numbufsp, dbpp, DMU_READ_PREFETCH); 671eda14cbcSMatt Macy DB_DNODE_EXIT(db); 672eda14cbcSMatt Macy 673eda14cbcSMatt Macy return (err); 674eda14cbcSMatt Macy } 675eda14cbcSMatt Macy 676eda14cbcSMatt Macy void 677a0b956f5SMartin Matuska dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, const void *tag) 678eda14cbcSMatt Macy { 679eda14cbcSMatt Macy int i; 680eda14cbcSMatt Macy dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake; 681eda14cbcSMatt Macy 682eda14cbcSMatt Macy if (numbufs == 0) 683eda14cbcSMatt Macy return; 684eda14cbcSMatt Macy 685eda14cbcSMatt Macy for (i = 0; i < numbufs; i++) { 686eda14cbcSMatt Macy if (dbp[i]) 687eda14cbcSMatt Macy dbuf_rele(dbp[i], tag); 688eda14cbcSMatt Macy } 689eda14cbcSMatt Macy 690eda14cbcSMatt Macy kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs); 691eda14cbcSMatt Macy } 692eda14cbcSMatt Macy 693eda14cbcSMatt Macy /* 694eda14cbcSMatt Macy * Issue prefetch i/os for the given blocks. If level is greater than 0, the 695eda14cbcSMatt Macy * indirect blocks prefetched will be those that point to the blocks containing 696eda14cbcSMatt Macy * the data starting at offset, and continuing to offset + len. 697eda14cbcSMatt Macy * 698eda14cbcSMatt Macy * Note that if the indirect blocks above the blocks being prefetched are not 699eda14cbcSMatt Macy * in cache, they will be asynchronously read in. 700eda14cbcSMatt Macy */ 701eda14cbcSMatt Macy void 702eda14cbcSMatt Macy dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset, 703eda14cbcSMatt Macy uint64_t len, zio_priority_t pri) 704eda14cbcSMatt Macy { 705eda14cbcSMatt Macy dnode_t *dn; 706eda14cbcSMatt Macy uint64_t blkid; 707eda14cbcSMatt Macy int nblks, err; 708eda14cbcSMatt Macy 709eda14cbcSMatt Macy if (len == 0) { /* they're interested in the bonus buffer */ 710eda14cbcSMatt Macy dn = DMU_META_DNODE(os); 711eda14cbcSMatt Macy 712eda14cbcSMatt Macy if (object == 0 || object >= DN_MAX_OBJECT) 713eda14cbcSMatt Macy return; 714eda14cbcSMatt Macy 715eda14cbcSMatt Macy rw_enter(&dn->dn_struct_rwlock, RW_READER); 716eda14cbcSMatt Macy blkid = dbuf_whichblock(dn, level, 717eda14cbcSMatt Macy object * sizeof (dnode_phys_t)); 718eda14cbcSMatt Macy dbuf_prefetch(dn, level, blkid, pri, 0); 719eda14cbcSMatt Macy rw_exit(&dn->dn_struct_rwlock); 720eda14cbcSMatt Macy return; 721eda14cbcSMatt Macy } 722eda14cbcSMatt Macy 723eda14cbcSMatt Macy /* 724eda14cbcSMatt Macy * See comment before the definition of dmu_prefetch_max. 725eda14cbcSMatt Macy */ 726eda14cbcSMatt Macy len = MIN(len, dmu_prefetch_max); 727eda14cbcSMatt Macy 728eda14cbcSMatt Macy /* 729eda14cbcSMatt Macy * XXX - Note, if the dnode for the requested object is not 730eda14cbcSMatt Macy * already cached, we will do a *synchronous* read in the 731eda14cbcSMatt Macy * dnode_hold() call. The same is true for any indirects. 732eda14cbcSMatt Macy */ 733eda14cbcSMatt Macy err = dnode_hold(os, object, FTAG, &dn); 734eda14cbcSMatt Macy if (err != 0) 735eda14cbcSMatt Macy return; 736eda14cbcSMatt Macy 737eda14cbcSMatt Macy /* 738eda14cbcSMatt Macy * offset + len - 1 is the last byte we want to prefetch for, and offset 739eda14cbcSMatt Macy * is the first. Then dbuf_whichblk(dn, level, off + len - 1) is the 740eda14cbcSMatt Macy * last block we want to prefetch, and dbuf_whichblock(dn, level, 741eda14cbcSMatt Macy * offset) is the first. Then the number we need to prefetch is the 742eda14cbcSMatt Macy * last - first + 1. 743eda14cbcSMatt Macy */ 744eda14cbcSMatt Macy rw_enter(&dn->dn_struct_rwlock, RW_READER); 745eda14cbcSMatt Macy if (level > 0 || dn->dn_datablkshift != 0) { 746eda14cbcSMatt Macy nblks = dbuf_whichblock(dn, level, offset + len - 1) - 747eda14cbcSMatt Macy dbuf_whichblock(dn, level, offset) + 1; 748eda14cbcSMatt Macy } else { 749eda14cbcSMatt Macy nblks = (offset < dn->dn_datablksz); 750eda14cbcSMatt Macy } 751eda14cbcSMatt Macy 752eda14cbcSMatt Macy if (nblks != 0) { 753eda14cbcSMatt Macy blkid = dbuf_whichblock(dn, level, offset); 754eda14cbcSMatt Macy for (int i = 0; i < nblks; i++) 755eda14cbcSMatt Macy dbuf_prefetch(dn, level, blkid + i, pri, 0); 756eda14cbcSMatt Macy } 757eda14cbcSMatt Macy rw_exit(&dn->dn_struct_rwlock); 758eda14cbcSMatt Macy 759eda14cbcSMatt Macy dnode_rele(dn, FTAG); 760eda14cbcSMatt Macy } 761eda14cbcSMatt Macy 762eda14cbcSMatt Macy /* 763eda14cbcSMatt Macy * Get the next "chunk" of file data to free. We traverse the file from 764eda14cbcSMatt Macy * the end so that the file gets shorter over time (if we crashes in the 765eda14cbcSMatt Macy * middle, this will leave us in a better state). We find allocated file 766eda14cbcSMatt Macy * data by simply searching the allocated level 1 indirects. 767eda14cbcSMatt Macy * 768eda14cbcSMatt Macy * On input, *start should be the first offset that does not need to be 769eda14cbcSMatt Macy * freed (e.g. "offset + length"). On return, *start will be the first 770eda14cbcSMatt Macy * offset that should be freed and l1blks is set to the number of level 1 771eda14cbcSMatt Macy * indirect blocks found within the chunk. 772eda14cbcSMatt Macy */ 773eda14cbcSMatt Macy static int 774eda14cbcSMatt Macy get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum, uint64_t *l1blks) 775eda14cbcSMatt Macy { 776eda14cbcSMatt Macy uint64_t blks; 777eda14cbcSMatt Macy uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1); 778eda14cbcSMatt Macy /* bytes of data covered by a level-1 indirect block */ 779eda14cbcSMatt Macy uint64_t iblkrange = (uint64_t)dn->dn_datablksz * 780eda14cbcSMatt Macy EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT); 781eda14cbcSMatt Macy 782eda14cbcSMatt Macy ASSERT3U(minimum, <=, *start); 783eda14cbcSMatt Macy 784eda14cbcSMatt Macy /* 785eda14cbcSMatt Macy * Check if we can free the entire range assuming that all of the 786eda14cbcSMatt Macy * L1 blocks in this range have data. If we can, we use this 787eda14cbcSMatt Macy * worst case value as an estimate so we can avoid having to look 788eda14cbcSMatt Macy * at the object's actual data. 789eda14cbcSMatt Macy */ 790eda14cbcSMatt Macy uint64_t total_l1blks = 791eda14cbcSMatt Macy (roundup(*start, iblkrange) - (minimum / iblkrange * iblkrange)) / 792eda14cbcSMatt Macy iblkrange; 793eda14cbcSMatt Macy if (total_l1blks <= maxblks) { 794eda14cbcSMatt Macy *l1blks = total_l1blks; 795eda14cbcSMatt Macy *start = minimum; 796eda14cbcSMatt Macy return (0); 797eda14cbcSMatt Macy } 798eda14cbcSMatt Macy ASSERT(ISP2(iblkrange)); 799eda14cbcSMatt Macy 800eda14cbcSMatt Macy for (blks = 0; *start > minimum && blks < maxblks; blks++) { 801eda14cbcSMatt Macy int err; 802eda14cbcSMatt Macy 803eda14cbcSMatt Macy /* 804eda14cbcSMatt Macy * dnode_next_offset(BACKWARDS) will find an allocated L1 805eda14cbcSMatt Macy * indirect block at or before the input offset. We must 806eda14cbcSMatt Macy * decrement *start so that it is at the end of the region 807eda14cbcSMatt Macy * to search. 808eda14cbcSMatt Macy */ 809eda14cbcSMatt Macy (*start)--; 810eda14cbcSMatt Macy 811eda14cbcSMatt Macy err = dnode_next_offset(dn, 812eda14cbcSMatt Macy DNODE_FIND_BACKWARDS, start, 2, 1, 0); 813eda14cbcSMatt Macy 814eda14cbcSMatt Macy /* if there are no indirect blocks before start, we are done */ 815eda14cbcSMatt Macy if (err == ESRCH) { 816eda14cbcSMatt Macy *start = minimum; 817eda14cbcSMatt Macy break; 818eda14cbcSMatt Macy } else if (err != 0) { 819eda14cbcSMatt Macy *l1blks = blks; 820eda14cbcSMatt Macy return (err); 821eda14cbcSMatt Macy } 822eda14cbcSMatt Macy 823eda14cbcSMatt Macy /* set start to the beginning of this L1 indirect */ 824eda14cbcSMatt Macy *start = P2ALIGN(*start, iblkrange); 825eda14cbcSMatt Macy } 826eda14cbcSMatt Macy if (*start < minimum) 827eda14cbcSMatt Macy *start = minimum; 828eda14cbcSMatt Macy *l1blks = blks; 829eda14cbcSMatt Macy 830eda14cbcSMatt Macy return (0); 831eda14cbcSMatt Macy } 832eda14cbcSMatt Macy 833eda14cbcSMatt Macy /* 834eda14cbcSMatt Macy * If this objset is of type OST_ZFS return true if vfs's unmounted flag is set, 835eda14cbcSMatt Macy * otherwise return false. 836eda14cbcSMatt Macy * Used below in dmu_free_long_range_impl() to enable abort when unmounting 837eda14cbcSMatt Macy */ 838eda14cbcSMatt Macy static boolean_t 839eda14cbcSMatt Macy dmu_objset_zfs_unmounting(objset_t *os) 840eda14cbcSMatt Macy { 841eda14cbcSMatt Macy #ifdef _KERNEL 842eda14cbcSMatt Macy if (dmu_objset_type(os) == DMU_OST_ZFS) 843eda14cbcSMatt Macy return (zfs_get_vfs_flag_unmounted(os)); 844e92ffd9bSMartin Matuska #else 845e92ffd9bSMartin Matuska (void) os; 846eda14cbcSMatt Macy #endif 847eda14cbcSMatt Macy return (B_FALSE); 848eda14cbcSMatt Macy } 849eda14cbcSMatt Macy 850eda14cbcSMatt Macy static int 851eda14cbcSMatt Macy dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset, 852eda14cbcSMatt Macy uint64_t length) 853eda14cbcSMatt Macy { 854eda14cbcSMatt Macy uint64_t object_size; 855eda14cbcSMatt Macy int err; 856eda14cbcSMatt Macy uint64_t dirty_frees_threshold; 857eda14cbcSMatt Macy dsl_pool_t *dp = dmu_objset_pool(os); 858eda14cbcSMatt Macy 859eda14cbcSMatt Macy if (dn == NULL) 860eda14cbcSMatt Macy return (SET_ERROR(EINVAL)); 861eda14cbcSMatt Macy 862eda14cbcSMatt Macy object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz; 863eda14cbcSMatt Macy if (offset >= object_size) 864eda14cbcSMatt Macy return (0); 865eda14cbcSMatt Macy 866eda14cbcSMatt Macy if (zfs_per_txg_dirty_frees_percent <= 100) 867eda14cbcSMatt Macy dirty_frees_threshold = 868eda14cbcSMatt Macy zfs_per_txg_dirty_frees_percent * zfs_dirty_data_max / 100; 869eda14cbcSMatt Macy else 870eda14cbcSMatt Macy dirty_frees_threshold = zfs_dirty_data_max / 20; 871eda14cbcSMatt Macy 872eda14cbcSMatt Macy if (length == DMU_OBJECT_END || offset + length > object_size) 873eda14cbcSMatt Macy length = object_size - offset; 874eda14cbcSMatt Macy 875eda14cbcSMatt Macy while (length != 0) { 876eda14cbcSMatt Macy uint64_t chunk_end, chunk_begin, chunk_len; 877eda14cbcSMatt Macy uint64_t l1blks; 878eda14cbcSMatt Macy dmu_tx_t *tx; 879eda14cbcSMatt Macy 880eda14cbcSMatt Macy if (dmu_objset_zfs_unmounting(dn->dn_objset)) 881eda14cbcSMatt Macy return (SET_ERROR(EINTR)); 882eda14cbcSMatt Macy 883eda14cbcSMatt Macy chunk_end = chunk_begin = offset + length; 884eda14cbcSMatt Macy 885eda14cbcSMatt Macy /* move chunk_begin backwards to the beginning of this chunk */ 886eda14cbcSMatt Macy err = get_next_chunk(dn, &chunk_begin, offset, &l1blks); 887eda14cbcSMatt Macy if (err) 888eda14cbcSMatt Macy return (err); 889eda14cbcSMatt Macy ASSERT3U(chunk_begin, >=, offset); 890eda14cbcSMatt Macy ASSERT3U(chunk_begin, <=, chunk_end); 891eda14cbcSMatt Macy 892eda14cbcSMatt Macy chunk_len = chunk_end - chunk_begin; 893eda14cbcSMatt Macy 894eda14cbcSMatt Macy tx = dmu_tx_create(os); 895eda14cbcSMatt Macy dmu_tx_hold_free(tx, dn->dn_object, chunk_begin, chunk_len); 896eda14cbcSMatt Macy 897eda14cbcSMatt Macy /* 898eda14cbcSMatt Macy * Mark this transaction as typically resulting in a net 899eda14cbcSMatt Macy * reduction in space used. 900eda14cbcSMatt Macy */ 901eda14cbcSMatt Macy dmu_tx_mark_netfree(tx); 902eda14cbcSMatt Macy err = dmu_tx_assign(tx, TXG_WAIT); 903eda14cbcSMatt Macy if (err) { 904eda14cbcSMatt Macy dmu_tx_abort(tx); 905eda14cbcSMatt Macy return (err); 906eda14cbcSMatt Macy } 907eda14cbcSMatt Macy 908eda14cbcSMatt Macy uint64_t txg = dmu_tx_get_txg(tx); 909eda14cbcSMatt Macy 910eda14cbcSMatt Macy mutex_enter(&dp->dp_lock); 911eda14cbcSMatt Macy uint64_t long_free_dirty = 912eda14cbcSMatt Macy dp->dp_long_free_dirty_pertxg[txg & TXG_MASK]; 913eda14cbcSMatt Macy mutex_exit(&dp->dp_lock); 914eda14cbcSMatt Macy 915eda14cbcSMatt Macy /* 916eda14cbcSMatt Macy * To avoid filling up a TXG with just frees, wait for 917eda14cbcSMatt Macy * the next TXG to open before freeing more chunks if 918eda14cbcSMatt Macy * we have reached the threshold of frees. 919eda14cbcSMatt Macy */ 920eda14cbcSMatt Macy if (dirty_frees_threshold != 0 && 921eda14cbcSMatt Macy long_free_dirty >= dirty_frees_threshold) { 922eda14cbcSMatt Macy DMU_TX_STAT_BUMP(dmu_tx_dirty_frees_delay); 923eda14cbcSMatt Macy dmu_tx_commit(tx); 924eda14cbcSMatt Macy txg_wait_open(dp, 0, B_TRUE); 925eda14cbcSMatt Macy continue; 926eda14cbcSMatt Macy } 927eda14cbcSMatt Macy 928eda14cbcSMatt Macy /* 929eda14cbcSMatt Macy * In order to prevent unnecessary write throttling, for each 930eda14cbcSMatt Macy * TXG, we track the cumulative size of L1 blocks being dirtied 931eda14cbcSMatt Macy * in dnode_free_range() below. We compare this number to a 932eda14cbcSMatt Macy * tunable threshold, past which we prevent new L1 dirty freeing 933eda14cbcSMatt Macy * blocks from being added into the open TXG. See 934eda14cbcSMatt Macy * dmu_free_long_range_impl() for details. The threshold 935eda14cbcSMatt Macy * prevents write throttle activation due to dirty freeing L1 936eda14cbcSMatt Macy * blocks taking up a large percentage of zfs_dirty_data_max. 937eda14cbcSMatt Macy */ 938eda14cbcSMatt Macy mutex_enter(&dp->dp_lock); 939eda14cbcSMatt Macy dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] += 940eda14cbcSMatt Macy l1blks << dn->dn_indblkshift; 941eda14cbcSMatt Macy mutex_exit(&dp->dp_lock); 942eda14cbcSMatt Macy DTRACE_PROBE3(free__long__range, 943eda14cbcSMatt Macy uint64_t, long_free_dirty, uint64_t, chunk_len, 944eda14cbcSMatt Macy uint64_t, txg); 945eda14cbcSMatt Macy dnode_free_range(dn, chunk_begin, chunk_len, tx); 946eda14cbcSMatt Macy 947eda14cbcSMatt Macy dmu_tx_commit(tx); 948eda14cbcSMatt Macy 949eda14cbcSMatt Macy length -= chunk_len; 950eda14cbcSMatt Macy } 951eda14cbcSMatt Macy return (0); 952eda14cbcSMatt Macy } 953eda14cbcSMatt Macy 954eda14cbcSMatt Macy int 955eda14cbcSMatt Macy dmu_free_long_range(objset_t *os, uint64_t object, 956eda14cbcSMatt Macy uint64_t offset, uint64_t length) 957eda14cbcSMatt Macy { 958eda14cbcSMatt Macy dnode_t *dn; 959eda14cbcSMatt Macy int err; 960eda14cbcSMatt Macy 961eda14cbcSMatt Macy err = dnode_hold(os, object, FTAG, &dn); 962eda14cbcSMatt Macy if (err != 0) 963eda14cbcSMatt Macy return (err); 964eda14cbcSMatt Macy err = dmu_free_long_range_impl(os, dn, offset, length); 965eda14cbcSMatt Macy 966eda14cbcSMatt Macy /* 967eda14cbcSMatt Macy * It is important to zero out the maxblkid when freeing the entire 968eda14cbcSMatt Macy * file, so that (a) subsequent calls to dmu_free_long_range_impl() 969eda14cbcSMatt Macy * will take the fast path, and (b) dnode_reallocate() can verify 970eda14cbcSMatt Macy * that the entire file has been freed. 971eda14cbcSMatt Macy */ 972eda14cbcSMatt Macy if (err == 0 && offset == 0 && length == DMU_OBJECT_END) 973eda14cbcSMatt Macy dn->dn_maxblkid = 0; 974eda14cbcSMatt Macy 975eda14cbcSMatt Macy dnode_rele(dn, FTAG); 976eda14cbcSMatt Macy return (err); 977eda14cbcSMatt Macy } 978eda14cbcSMatt Macy 979eda14cbcSMatt Macy int 980eda14cbcSMatt Macy dmu_free_long_object(objset_t *os, uint64_t object) 981eda14cbcSMatt Macy { 982eda14cbcSMatt Macy dmu_tx_t *tx; 983eda14cbcSMatt Macy int err; 984eda14cbcSMatt Macy 985eda14cbcSMatt Macy err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END); 986eda14cbcSMatt Macy if (err != 0) 987eda14cbcSMatt Macy return (err); 988eda14cbcSMatt Macy 989eda14cbcSMatt Macy tx = dmu_tx_create(os); 990eda14cbcSMatt Macy dmu_tx_hold_bonus(tx, object); 991eda14cbcSMatt Macy dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); 992eda14cbcSMatt Macy dmu_tx_mark_netfree(tx); 993eda14cbcSMatt Macy err = dmu_tx_assign(tx, TXG_WAIT); 994eda14cbcSMatt Macy if (err == 0) { 995eda14cbcSMatt Macy err = dmu_object_free(os, object, tx); 996eda14cbcSMatt Macy dmu_tx_commit(tx); 997eda14cbcSMatt Macy } else { 998eda14cbcSMatt Macy dmu_tx_abort(tx); 999eda14cbcSMatt Macy } 1000eda14cbcSMatt Macy 1001eda14cbcSMatt Macy return (err); 1002eda14cbcSMatt Macy } 1003eda14cbcSMatt Macy 1004eda14cbcSMatt Macy int 1005eda14cbcSMatt Macy dmu_free_range(objset_t *os, uint64_t object, uint64_t offset, 1006eda14cbcSMatt Macy uint64_t size, dmu_tx_t *tx) 1007eda14cbcSMatt Macy { 1008eda14cbcSMatt Macy dnode_t *dn; 1009eda14cbcSMatt Macy int err = dnode_hold(os, object, FTAG, &dn); 1010eda14cbcSMatt Macy if (err) 1011eda14cbcSMatt Macy return (err); 1012eda14cbcSMatt Macy ASSERT(offset < UINT64_MAX); 1013eda14cbcSMatt Macy ASSERT(size == DMU_OBJECT_END || size <= UINT64_MAX - offset); 1014eda14cbcSMatt Macy dnode_free_range(dn, offset, size, tx); 1015eda14cbcSMatt Macy dnode_rele(dn, FTAG); 1016eda14cbcSMatt Macy return (0); 1017eda14cbcSMatt Macy } 1018eda14cbcSMatt Macy 1019eda14cbcSMatt Macy static int 1020eda14cbcSMatt Macy dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size, 1021eda14cbcSMatt Macy void *buf, uint32_t flags) 1022eda14cbcSMatt Macy { 1023eda14cbcSMatt Macy dmu_buf_t **dbp; 1024eda14cbcSMatt Macy int numbufs, err = 0; 1025eda14cbcSMatt Macy 1026eda14cbcSMatt Macy /* 1027eda14cbcSMatt Macy * Deal with odd block sizes, where there can't be data past the first 1028eda14cbcSMatt Macy * block. If we ever do the tail block optimization, we will need to 1029eda14cbcSMatt Macy * handle that here as well. 1030eda14cbcSMatt Macy */ 1031eda14cbcSMatt Macy if (dn->dn_maxblkid == 0) { 1032eda14cbcSMatt Macy uint64_t newsz = offset > dn->dn_datablksz ? 0 : 1033eda14cbcSMatt Macy MIN(size, dn->dn_datablksz - offset); 1034da5137abSMartin Matuska memset((char *)buf + newsz, 0, size - newsz); 1035eda14cbcSMatt Macy size = newsz; 1036eda14cbcSMatt Macy } 1037eda14cbcSMatt Macy 1038eda14cbcSMatt Macy while (size > 0) { 1039eda14cbcSMatt Macy uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2); 1040eda14cbcSMatt Macy int i; 1041eda14cbcSMatt Macy 1042eda14cbcSMatt Macy /* 1043eda14cbcSMatt Macy * NB: we could do this block-at-a-time, but it's nice 1044eda14cbcSMatt Macy * to be reading in parallel. 1045eda14cbcSMatt Macy */ 1046eda14cbcSMatt Macy err = dmu_buf_hold_array_by_dnode(dn, offset, mylen, 1047eda14cbcSMatt Macy TRUE, FTAG, &numbufs, &dbp, flags); 1048eda14cbcSMatt Macy if (err) 1049eda14cbcSMatt Macy break; 1050eda14cbcSMatt Macy 1051eda14cbcSMatt Macy for (i = 0; i < numbufs; i++) { 1052eda14cbcSMatt Macy uint64_t tocpy; 1053eda14cbcSMatt Macy int64_t bufoff; 1054eda14cbcSMatt Macy dmu_buf_t *db = dbp[i]; 1055eda14cbcSMatt Macy 1056eda14cbcSMatt Macy ASSERT(size > 0); 1057eda14cbcSMatt Macy 1058eda14cbcSMatt Macy bufoff = offset - db->db_offset; 1059eda14cbcSMatt Macy tocpy = MIN(db->db_size - bufoff, size); 1060eda14cbcSMatt Macy 1061eda14cbcSMatt Macy (void) memcpy(buf, (char *)db->db_data + bufoff, tocpy); 1062eda14cbcSMatt Macy 1063eda14cbcSMatt Macy offset += tocpy; 1064eda14cbcSMatt Macy size -= tocpy; 1065eda14cbcSMatt Macy buf = (char *)buf + tocpy; 1066eda14cbcSMatt Macy } 1067eda14cbcSMatt Macy dmu_buf_rele_array(dbp, numbufs, FTAG); 1068eda14cbcSMatt Macy } 1069eda14cbcSMatt Macy return (err); 1070eda14cbcSMatt Macy } 1071eda14cbcSMatt Macy 1072eda14cbcSMatt Macy int 1073eda14cbcSMatt Macy dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 1074eda14cbcSMatt Macy void *buf, uint32_t flags) 1075eda14cbcSMatt Macy { 1076eda14cbcSMatt Macy dnode_t *dn; 1077eda14cbcSMatt Macy int err; 1078eda14cbcSMatt Macy 1079eda14cbcSMatt Macy err = dnode_hold(os, object, FTAG, &dn); 1080eda14cbcSMatt Macy if (err != 0) 1081eda14cbcSMatt Macy return (err); 1082eda14cbcSMatt Macy 1083eda14cbcSMatt Macy err = dmu_read_impl(dn, offset, size, buf, flags); 1084eda14cbcSMatt Macy dnode_rele(dn, FTAG); 1085eda14cbcSMatt Macy return (err); 1086eda14cbcSMatt Macy } 1087eda14cbcSMatt Macy 1088eda14cbcSMatt Macy int 1089eda14cbcSMatt Macy dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf, 1090eda14cbcSMatt Macy uint32_t flags) 1091eda14cbcSMatt Macy { 1092eda14cbcSMatt Macy return (dmu_read_impl(dn, offset, size, buf, flags)); 1093eda14cbcSMatt Macy } 1094eda14cbcSMatt Macy 1095eda14cbcSMatt Macy static void 1096eda14cbcSMatt Macy dmu_write_impl(dmu_buf_t **dbp, int numbufs, uint64_t offset, uint64_t size, 1097eda14cbcSMatt Macy const void *buf, dmu_tx_t *tx) 1098eda14cbcSMatt Macy { 1099eda14cbcSMatt Macy int i; 1100eda14cbcSMatt Macy 1101eda14cbcSMatt Macy for (i = 0; i < numbufs; i++) { 1102eda14cbcSMatt Macy uint64_t tocpy; 1103eda14cbcSMatt Macy int64_t bufoff; 1104eda14cbcSMatt Macy dmu_buf_t *db = dbp[i]; 1105eda14cbcSMatt Macy 1106eda14cbcSMatt Macy ASSERT(size > 0); 1107eda14cbcSMatt Macy 1108eda14cbcSMatt Macy bufoff = offset - db->db_offset; 1109eda14cbcSMatt Macy tocpy = MIN(db->db_size - bufoff, size); 1110eda14cbcSMatt Macy 1111eda14cbcSMatt Macy ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 1112eda14cbcSMatt Macy 1113eda14cbcSMatt Macy if (tocpy == db->db_size) 1114eda14cbcSMatt Macy dmu_buf_will_fill(db, tx); 1115eda14cbcSMatt Macy else 1116eda14cbcSMatt Macy dmu_buf_will_dirty(db, tx); 1117eda14cbcSMatt Macy 1118eda14cbcSMatt Macy (void) memcpy((char *)db->db_data + bufoff, buf, tocpy); 1119eda14cbcSMatt Macy 1120eda14cbcSMatt Macy if (tocpy == db->db_size) 1121eda14cbcSMatt Macy dmu_buf_fill_done(db, tx); 1122eda14cbcSMatt Macy 1123eda14cbcSMatt Macy offset += tocpy; 1124eda14cbcSMatt Macy size -= tocpy; 1125eda14cbcSMatt Macy buf = (char *)buf + tocpy; 1126eda14cbcSMatt Macy } 1127eda14cbcSMatt Macy } 1128eda14cbcSMatt Macy 1129eda14cbcSMatt Macy void 1130eda14cbcSMatt Macy dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 1131eda14cbcSMatt Macy const void *buf, dmu_tx_t *tx) 1132eda14cbcSMatt Macy { 1133eda14cbcSMatt Macy dmu_buf_t **dbp; 1134eda14cbcSMatt Macy int numbufs; 1135eda14cbcSMatt Macy 1136eda14cbcSMatt Macy if (size == 0) 1137eda14cbcSMatt Macy return; 1138eda14cbcSMatt Macy 1139eda14cbcSMatt Macy VERIFY0(dmu_buf_hold_array(os, object, offset, size, 1140eda14cbcSMatt Macy FALSE, FTAG, &numbufs, &dbp)); 1141eda14cbcSMatt Macy dmu_write_impl(dbp, numbufs, offset, size, buf, tx); 1142eda14cbcSMatt Macy dmu_buf_rele_array(dbp, numbufs, FTAG); 1143eda14cbcSMatt Macy } 1144eda14cbcSMatt Macy 1145eda14cbcSMatt Macy /* 1146eda14cbcSMatt Macy * Note: Lustre is an external consumer of this interface. 1147eda14cbcSMatt Macy */ 1148eda14cbcSMatt Macy void 1149eda14cbcSMatt Macy dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, 1150eda14cbcSMatt Macy const void *buf, dmu_tx_t *tx) 1151eda14cbcSMatt Macy { 1152eda14cbcSMatt Macy dmu_buf_t **dbp; 1153eda14cbcSMatt Macy int numbufs; 1154eda14cbcSMatt Macy 1155eda14cbcSMatt Macy if (size == 0) 1156eda14cbcSMatt Macy return; 1157eda14cbcSMatt Macy 1158eda14cbcSMatt Macy VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size, 1159eda14cbcSMatt Macy FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH)); 1160eda14cbcSMatt Macy dmu_write_impl(dbp, numbufs, offset, size, buf, tx); 1161eda14cbcSMatt Macy dmu_buf_rele_array(dbp, numbufs, FTAG); 1162eda14cbcSMatt Macy } 1163eda14cbcSMatt Macy 1164eda14cbcSMatt Macy void 1165eda14cbcSMatt Macy dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 1166eda14cbcSMatt Macy dmu_tx_t *tx) 1167eda14cbcSMatt Macy { 1168eda14cbcSMatt Macy dmu_buf_t **dbp; 1169eda14cbcSMatt Macy int numbufs, i; 1170eda14cbcSMatt Macy 1171eda14cbcSMatt Macy if (size == 0) 1172eda14cbcSMatt Macy return; 1173eda14cbcSMatt Macy 1174eda14cbcSMatt Macy VERIFY(0 == dmu_buf_hold_array(os, object, offset, size, 1175eda14cbcSMatt Macy FALSE, FTAG, &numbufs, &dbp)); 1176eda14cbcSMatt Macy 1177eda14cbcSMatt Macy for (i = 0; i < numbufs; i++) { 1178eda14cbcSMatt Macy dmu_buf_t *db = dbp[i]; 1179eda14cbcSMatt Macy 1180eda14cbcSMatt Macy dmu_buf_will_not_fill(db, tx); 1181eda14cbcSMatt Macy } 1182eda14cbcSMatt Macy dmu_buf_rele_array(dbp, numbufs, FTAG); 1183eda14cbcSMatt Macy } 1184eda14cbcSMatt Macy 1185eda14cbcSMatt Macy void 1186eda14cbcSMatt Macy dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset, 1187eda14cbcSMatt Macy void *data, uint8_t etype, uint8_t comp, int uncompressed_size, 1188eda14cbcSMatt Macy int compressed_size, int byteorder, dmu_tx_t *tx) 1189eda14cbcSMatt Macy { 1190eda14cbcSMatt Macy dmu_buf_t *db; 1191eda14cbcSMatt Macy 1192eda14cbcSMatt Macy ASSERT3U(etype, <, NUM_BP_EMBEDDED_TYPES); 1193eda14cbcSMatt Macy ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS); 1194eda14cbcSMatt Macy VERIFY0(dmu_buf_hold_noread(os, object, offset, 1195eda14cbcSMatt Macy FTAG, &db)); 1196eda14cbcSMatt Macy 1197eda14cbcSMatt Macy dmu_buf_write_embedded(db, 1198eda14cbcSMatt Macy data, (bp_embedded_type_t)etype, (enum zio_compress)comp, 1199eda14cbcSMatt Macy uncompressed_size, compressed_size, byteorder, tx); 1200eda14cbcSMatt Macy 1201eda14cbcSMatt Macy dmu_buf_rele(db, FTAG); 1202eda14cbcSMatt Macy } 1203eda14cbcSMatt Macy 1204eda14cbcSMatt Macy void 1205eda14cbcSMatt Macy dmu_redact(objset_t *os, uint64_t object, uint64_t offset, uint64_t size, 1206eda14cbcSMatt Macy dmu_tx_t *tx) 1207eda14cbcSMatt Macy { 1208eda14cbcSMatt Macy int numbufs, i; 1209eda14cbcSMatt Macy dmu_buf_t **dbp; 1210eda14cbcSMatt Macy 1211eda14cbcSMatt Macy VERIFY0(dmu_buf_hold_array(os, object, offset, size, FALSE, FTAG, 1212eda14cbcSMatt Macy &numbufs, &dbp)); 1213eda14cbcSMatt Macy for (i = 0; i < numbufs; i++) 1214eda14cbcSMatt Macy dmu_buf_redact(dbp[i], tx); 1215eda14cbcSMatt Macy dmu_buf_rele_array(dbp, numbufs, FTAG); 1216eda14cbcSMatt Macy } 1217eda14cbcSMatt Macy 1218eda14cbcSMatt Macy #ifdef _KERNEL 1219eda14cbcSMatt Macy int 1220184c1b94SMartin Matuska dmu_read_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size) 1221eda14cbcSMatt Macy { 1222eda14cbcSMatt Macy dmu_buf_t **dbp; 1223eda14cbcSMatt Macy int numbufs, i, err; 1224eda14cbcSMatt Macy 1225eda14cbcSMatt Macy /* 1226eda14cbcSMatt Macy * NB: we could do this block-at-a-time, but it's nice 1227eda14cbcSMatt Macy * to be reading in parallel. 1228eda14cbcSMatt Macy */ 1229184c1b94SMartin Matuska err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size, 1230eda14cbcSMatt Macy TRUE, FTAG, &numbufs, &dbp, 0); 1231eda14cbcSMatt Macy if (err) 1232eda14cbcSMatt Macy return (err); 1233eda14cbcSMatt Macy 1234eda14cbcSMatt Macy for (i = 0; i < numbufs; i++) { 1235eda14cbcSMatt Macy uint64_t tocpy; 1236eda14cbcSMatt Macy int64_t bufoff; 1237eda14cbcSMatt Macy dmu_buf_t *db = dbp[i]; 1238eda14cbcSMatt Macy 1239eda14cbcSMatt Macy ASSERT(size > 0); 1240eda14cbcSMatt Macy 1241184c1b94SMartin Matuska bufoff = zfs_uio_offset(uio) - db->db_offset; 1242eda14cbcSMatt Macy tocpy = MIN(db->db_size - bufoff, size); 1243eda14cbcSMatt Macy 1244184c1b94SMartin Matuska err = zfs_uio_fault_move((char *)db->db_data + bufoff, tocpy, 1245eda14cbcSMatt Macy UIO_READ, uio); 1246184c1b94SMartin Matuska 1247eda14cbcSMatt Macy if (err) 1248eda14cbcSMatt Macy break; 1249eda14cbcSMatt Macy 1250eda14cbcSMatt Macy size -= tocpy; 1251eda14cbcSMatt Macy } 1252eda14cbcSMatt Macy dmu_buf_rele_array(dbp, numbufs, FTAG); 1253eda14cbcSMatt Macy 1254eda14cbcSMatt Macy return (err); 1255eda14cbcSMatt Macy } 1256eda14cbcSMatt Macy 1257eda14cbcSMatt Macy /* 1258eda14cbcSMatt Macy * Read 'size' bytes into the uio buffer. 1259eda14cbcSMatt Macy * From object zdb->db_object. 1260184c1b94SMartin Matuska * Starting at zfs_uio_offset(uio). 1261eda14cbcSMatt Macy * 1262eda14cbcSMatt Macy * If the caller already has a dbuf in the target object 1263eda14cbcSMatt Macy * (e.g. its bonus buffer), this routine is faster than dmu_read_uio(), 1264eda14cbcSMatt Macy * because we don't have to find the dnode_t for the object. 1265eda14cbcSMatt Macy */ 1266eda14cbcSMatt Macy int 1267184c1b94SMartin Matuska dmu_read_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size) 1268eda14cbcSMatt Macy { 1269eda14cbcSMatt Macy dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb; 1270eda14cbcSMatt Macy dnode_t *dn; 1271eda14cbcSMatt Macy int err; 1272eda14cbcSMatt Macy 1273eda14cbcSMatt Macy if (size == 0) 1274eda14cbcSMatt Macy return (0); 1275eda14cbcSMatt Macy 1276eda14cbcSMatt Macy DB_DNODE_ENTER(db); 1277eda14cbcSMatt Macy dn = DB_DNODE(db); 1278eda14cbcSMatt Macy err = dmu_read_uio_dnode(dn, uio, size); 1279eda14cbcSMatt Macy DB_DNODE_EXIT(db); 1280eda14cbcSMatt Macy 1281eda14cbcSMatt Macy return (err); 1282eda14cbcSMatt Macy } 1283eda14cbcSMatt Macy 1284eda14cbcSMatt Macy /* 1285eda14cbcSMatt Macy * Read 'size' bytes into the uio buffer. 1286eda14cbcSMatt Macy * From the specified object 1287184c1b94SMartin Matuska * Starting at offset zfs_uio_offset(uio). 1288eda14cbcSMatt Macy */ 1289eda14cbcSMatt Macy int 1290184c1b94SMartin Matuska dmu_read_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size) 1291eda14cbcSMatt Macy { 1292eda14cbcSMatt Macy dnode_t *dn; 1293eda14cbcSMatt Macy int err; 1294eda14cbcSMatt Macy 1295eda14cbcSMatt Macy if (size == 0) 1296eda14cbcSMatt Macy return (0); 1297eda14cbcSMatt Macy 1298eda14cbcSMatt Macy err = dnode_hold(os, object, FTAG, &dn); 1299eda14cbcSMatt Macy if (err) 1300eda14cbcSMatt Macy return (err); 1301eda14cbcSMatt Macy 1302eda14cbcSMatt Macy err = dmu_read_uio_dnode(dn, uio, size); 1303eda14cbcSMatt Macy 1304eda14cbcSMatt Macy dnode_rele(dn, FTAG); 1305eda14cbcSMatt Macy 1306eda14cbcSMatt Macy return (err); 1307eda14cbcSMatt Macy } 1308eda14cbcSMatt Macy 1309eda14cbcSMatt Macy int 1310184c1b94SMartin Matuska dmu_write_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size, dmu_tx_t *tx) 1311eda14cbcSMatt Macy { 1312eda14cbcSMatt Macy dmu_buf_t **dbp; 1313eda14cbcSMatt Macy int numbufs; 1314eda14cbcSMatt Macy int err = 0; 1315eda14cbcSMatt Macy int i; 1316eda14cbcSMatt Macy 1317184c1b94SMartin Matuska err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size, 1318eda14cbcSMatt Macy FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH); 1319eda14cbcSMatt Macy if (err) 1320eda14cbcSMatt Macy return (err); 1321eda14cbcSMatt Macy 1322eda14cbcSMatt Macy for (i = 0; i < numbufs; i++) { 1323eda14cbcSMatt Macy uint64_t tocpy; 1324eda14cbcSMatt Macy int64_t bufoff; 1325eda14cbcSMatt Macy dmu_buf_t *db = dbp[i]; 1326eda14cbcSMatt Macy 1327eda14cbcSMatt Macy ASSERT(size > 0); 1328eda14cbcSMatt Macy 1329184c1b94SMartin Matuska bufoff = zfs_uio_offset(uio) - db->db_offset; 1330eda14cbcSMatt Macy tocpy = MIN(db->db_size - bufoff, size); 1331eda14cbcSMatt Macy 1332eda14cbcSMatt Macy ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size); 1333eda14cbcSMatt Macy 1334eda14cbcSMatt Macy if (tocpy == db->db_size) 1335eda14cbcSMatt Macy dmu_buf_will_fill(db, tx); 1336eda14cbcSMatt Macy else 1337eda14cbcSMatt Macy dmu_buf_will_dirty(db, tx); 1338eda14cbcSMatt Macy 1339eda14cbcSMatt Macy /* 1340184c1b94SMartin Matuska * XXX zfs_uiomove could block forever (eg.nfs-backed 1341eda14cbcSMatt Macy * pages). There needs to be a uiolockdown() function 1342184c1b94SMartin Matuska * to lock the pages in memory, so that zfs_uiomove won't 1343eda14cbcSMatt Macy * block. 1344eda14cbcSMatt Macy */ 1345184c1b94SMartin Matuska err = zfs_uio_fault_move((char *)db->db_data + bufoff, 1346184c1b94SMartin Matuska tocpy, UIO_WRITE, uio); 1347184c1b94SMartin Matuska 1348eda14cbcSMatt Macy if (tocpy == db->db_size) 1349eda14cbcSMatt Macy dmu_buf_fill_done(db, tx); 1350eda14cbcSMatt Macy 1351eda14cbcSMatt Macy if (err) 1352eda14cbcSMatt Macy break; 1353eda14cbcSMatt Macy 1354eda14cbcSMatt Macy size -= tocpy; 1355eda14cbcSMatt Macy } 1356eda14cbcSMatt Macy 1357eda14cbcSMatt Macy dmu_buf_rele_array(dbp, numbufs, FTAG); 1358eda14cbcSMatt Macy return (err); 1359eda14cbcSMatt Macy } 1360eda14cbcSMatt Macy 1361eda14cbcSMatt Macy /* 1362eda14cbcSMatt Macy * Write 'size' bytes from the uio buffer. 1363eda14cbcSMatt Macy * To object zdb->db_object. 1364184c1b94SMartin Matuska * Starting at offset zfs_uio_offset(uio). 1365eda14cbcSMatt Macy * 1366eda14cbcSMatt Macy * If the caller already has a dbuf in the target object 1367eda14cbcSMatt Macy * (e.g. its bonus buffer), this routine is faster than dmu_write_uio(), 1368eda14cbcSMatt Macy * because we don't have to find the dnode_t for the object. 1369eda14cbcSMatt Macy */ 1370eda14cbcSMatt Macy int 1371184c1b94SMartin Matuska dmu_write_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size, 1372eda14cbcSMatt Macy dmu_tx_t *tx) 1373eda14cbcSMatt Macy { 1374eda14cbcSMatt Macy dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb; 1375eda14cbcSMatt Macy dnode_t *dn; 1376eda14cbcSMatt Macy int err; 1377eda14cbcSMatt Macy 1378eda14cbcSMatt Macy if (size == 0) 1379eda14cbcSMatt Macy return (0); 1380eda14cbcSMatt Macy 1381eda14cbcSMatt Macy DB_DNODE_ENTER(db); 1382eda14cbcSMatt Macy dn = DB_DNODE(db); 1383eda14cbcSMatt Macy err = dmu_write_uio_dnode(dn, uio, size, tx); 1384eda14cbcSMatt Macy DB_DNODE_EXIT(db); 1385eda14cbcSMatt Macy 1386eda14cbcSMatt Macy return (err); 1387eda14cbcSMatt Macy } 1388eda14cbcSMatt Macy 1389eda14cbcSMatt Macy /* 1390eda14cbcSMatt Macy * Write 'size' bytes from the uio buffer. 1391eda14cbcSMatt Macy * To the specified object. 1392184c1b94SMartin Matuska * Starting at offset zfs_uio_offset(uio). 1393eda14cbcSMatt Macy */ 1394eda14cbcSMatt Macy int 1395184c1b94SMartin Matuska dmu_write_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size, 1396eda14cbcSMatt Macy dmu_tx_t *tx) 1397eda14cbcSMatt Macy { 1398eda14cbcSMatt Macy dnode_t *dn; 1399eda14cbcSMatt Macy int err; 1400eda14cbcSMatt Macy 1401eda14cbcSMatt Macy if (size == 0) 1402eda14cbcSMatt Macy return (0); 1403eda14cbcSMatt Macy 1404eda14cbcSMatt Macy err = dnode_hold(os, object, FTAG, &dn); 1405eda14cbcSMatt Macy if (err) 1406eda14cbcSMatt Macy return (err); 1407eda14cbcSMatt Macy 1408eda14cbcSMatt Macy err = dmu_write_uio_dnode(dn, uio, size, tx); 1409eda14cbcSMatt Macy 1410eda14cbcSMatt Macy dnode_rele(dn, FTAG); 1411eda14cbcSMatt Macy 1412eda14cbcSMatt Macy return (err); 1413eda14cbcSMatt Macy } 1414eda14cbcSMatt Macy #endif /* _KERNEL */ 1415eda14cbcSMatt Macy 1416eda14cbcSMatt Macy /* 1417eda14cbcSMatt Macy * Allocate a loaned anonymous arc buffer. 1418eda14cbcSMatt Macy */ 1419eda14cbcSMatt Macy arc_buf_t * 1420eda14cbcSMatt Macy dmu_request_arcbuf(dmu_buf_t *handle, int size) 1421eda14cbcSMatt Macy { 1422eda14cbcSMatt Macy dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle; 1423eda14cbcSMatt Macy 1424eda14cbcSMatt Macy return (arc_loan_buf(db->db_objset->os_spa, B_FALSE, size)); 1425eda14cbcSMatt Macy } 1426eda14cbcSMatt Macy 1427eda14cbcSMatt Macy /* 1428eda14cbcSMatt Macy * Free a loaned arc buffer. 1429eda14cbcSMatt Macy */ 1430eda14cbcSMatt Macy void 1431eda14cbcSMatt Macy dmu_return_arcbuf(arc_buf_t *buf) 1432eda14cbcSMatt Macy { 1433eda14cbcSMatt Macy arc_return_buf(buf, FTAG); 1434eda14cbcSMatt Macy arc_buf_destroy(buf, FTAG); 1435eda14cbcSMatt Macy } 1436eda14cbcSMatt Macy 1437eda14cbcSMatt Macy /* 14387877fdebSMatt Macy * A "lightweight" write is faster than a regular write (e.g. 14397877fdebSMatt Macy * dmu_write_by_dnode() or dmu_assign_arcbuf_by_dnode()), because it avoids the 14407877fdebSMatt Macy * CPU cost of creating a dmu_buf_impl_t and arc_buf_[hdr_]_t. However, the 14417877fdebSMatt Macy * data can not be read or overwritten until the transaction's txg has been 14427877fdebSMatt Macy * synced. This makes it appropriate for workloads that are known to be 14437877fdebSMatt Macy * (temporarily) write-only, like "zfs receive". 14447877fdebSMatt Macy * 14457877fdebSMatt Macy * A single block is written, starting at the specified offset in bytes. If 14467877fdebSMatt Macy * the call is successful, it returns 0 and the provided abd has been 14477877fdebSMatt Macy * consumed (the caller should not free it). 14487877fdebSMatt Macy */ 14497877fdebSMatt Macy int 14507877fdebSMatt Macy dmu_lightweight_write_by_dnode(dnode_t *dn, uint64_t offset, abd_t *abd, 1451dbd5678dSMartin Matuska const zio_prop_t *zp, zio_flag_t flags, dmu_tx_t *tx) 14527877fdebSMatt Macy { 14537877fdebSMatt Macy dbuf_dirty_record_t *dr = 14547877fdebSMatt Macy dbuf_dirty_lightweight(dn, dbuf_whichblock(dn, 0, offset), tx); 14557877fdebSMatt Macy if (dr == NULL) 14567877fdebSMatt Macy return (SET_ERROR(EIO)); 14577877fdebSMatt Macy dr->dt.dll.dr_abd = abd; 14587877fdebSMatt Macy dr->dt.dll.dr_props = *zp; 14597877fdebSMatt Macy dr->dt.dll.dr_flags = flags; 14607877fdebSMatt Macy return (0); 14617877fdebSMatt Macy } 14627877fdebSMatt Macy 14637877fdebSMatt Macy /* 1464eda14cbcSMatt Macy * When possible directly assign passed loaned arc buffer to a dbuf. 1465eda14cbcSMatt Macy * If this is not possible copy the contents of passed arc buf via 1466eda14cbcSMatt Macy * dmu_write(). 1467eda14cbcSMatt Macy */ 1468eda14cbcSMatt Macy int 1469eda14cbcSMatt Macy dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf, 1470eda14cbcSMatt Macy dmu_tx_t *tx) 1471eda14cbcSMatt Macy { 1472eda14cbcSMatt Macy dmu_buf_impl_t *db; 1473eda14cbcSMatt Macy objset_t *os = dn->dn_objset; 1474eda14cbcSMatt Macy uint64_t object = dn->dn_object; 1475eda14cbcSMatt Macy uint32_t blksz = (uint32_t)arc_buf_lsize(buf); 1476eda14cbcSMatt Macy uint64_t blkid; 1477eda14cbcSMatt Macy 1478eda14cbcSMatt Macy rw_enter(&dn->dn_struct_rwlock, RW_READER); 1479eda14cbcSMatt Macy blkid = dbuf_whichblock(dn, 0, offset); 1480eda14cbcSMatt Macy db = dbuf_hold(dn, blkid, FTAG); 1481eda14cbcSMatt Macy if (db == NULL) 1482eda14cbcSMatt Macy return (SET_ERROR(EIO)); 1483eda14cbcSMatt Macy rw_exit(&dn->dn_struct_rwlock); 1484eda14cbcSMatt Macy 1485eda14cbcSMatt Macy /* 14867877fdebSMatt Macy * We can only assign if the offset is aligned and the arc buf is the 14877877fdebSMatt Macy * same size as the dbuf. 1488eda14cbcSMatt Macy */ 1489eda14cbcSMatt Macy if (offset == db->db.db_offset && blksz == db->db.db_size) { 1490ba27dd8bSMartin Matuska zfs_racct_write(blksz, 1); 1491eda14cbcSMatt Macy dbuf_assign_arcbuf(db, buf, tx); 1492eda14cbcSMatt Macy dbuf_rele(db, FTAG); 1493eda14cbcSMatt Macy } else { 1494eda14cbcSMatt Macy /* compressed bufs must always be assignable to their dbuf */ 1495eda14cbcSMatt Macy ASSERT3U(arc_get_compression(buf), ==, ZIO_COMPRESS_OFF); 1496eda14cbcSMatt Macy ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED)); 1497eda14cbcSMatt Macy 1498eda14cbcSMatt Macy dbuf_rele(db, FTAG); 1499eda14cbcSMatt Macy dmu_write(os, object, offset, blksz, buf->b_data, tx); 1500eda14cbcSMatt Macy dmu_return_arcbuf(buf); 1501eda14cbcSMatt Macy } 1502eda14cbcSMatt Macy 1503eda14cbcSMatt Macy return (0); 1504eda14cbcSMatt Macy } 1505eda14cbcSMatt Macy 1506eda14cbcSMatt Macy int 1507eda14cbcSMatt Macy dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf, 1508eda14cbcSMatt Macy dmu_tx_t *tx) 1509eda14cbcSMatt Macy { 1510eda14cbcSMatt Macy int err; 1511eda14cbcSMatt Macy dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle; 1512eda14cbcSMatt Macy 1513eda14cbcSMatt Macy DB_DNODE_ENTER(dbuf); 1514eda14cbcSMatt Macy err = dmu_assign_arcbuf_by_dnode(DB_DNODE(dbuf), offset, buf, tx); 1515eda14cbcSMatt Macy DB_DNODE_EXIT(dbuf); 1516eda14cbcSMatt Macy 1517eda14cbcSMatt Macy return (err); 1518eda14cbcSMatt Macy } 1519eda14cbcSMatt Macy 1520eda14cbcSMatt Macy typedef struct { 1521eda14cbcSMatt Macy dbuf_dirty_record_t *dsa_dr; 1522eda14cbcSMatt Macy dmu_sync_cb_t *dsa_done; 1523eda14cbcSMatt Macy zgd_t *dsa_zgd; 1524eda14cbcSMatt Macy dmu_tx_t *dsa_tx; 1525eda14cbcSMatt Macy } dmu_sync_arg_t; 1526eda14cbcSMatt Macy 1527eda14cbcSMatt Macy static void 1528eda14cbcSMatt Macy dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg) 1529eda14cbcSMatt Macy { 1530e92ffd9bSMartin Matuska (void) buf; 1531eda14cbcSMatt Macy dmu_sync_arg_t *dsa = varg; 1532eda14cbcSMatt Macy dmu_buf_t *db = dsa->dsa_zgd->zgd_db; 1533eda14cbcSMatt Macy blkptr_t *bp = zio->io_bp; 1534eda14cbcSMatt Macy 1535eda14cbcSMatt Macy if (zio->io_error == 0) { 1536eda14cbcSMatt Macy if (BP_IS_HOLE(bp)) { 1537eda14cbcSMatt Macy /* 1538eda14cbcSMatt Macy * A block of zeros may compress to a hole, but the 1539eda14cbcSMatt Macy * block size still needs to be known for replay. 1540eda14cbcSMatt Macy */ 1541eda14cbcSMatt Macy BP_SET_LSIZE(bp, db->db_size); 1542eda14cbcSMatt Macy } else if (!BP_IS_EMBEDDED(bp)) { 1543eda14cbcSMatt Macy ASSERT(BP_GET_LEVEL(bp) == 0); 1544eda14cbcSMatt Macy BP_SET_FILL(bp, 1); 1545eda14cbcSMatt Macy } 1546eda14cbcSMatt Macy } 1547eda14cbcSMatt Macy } 1548eda14cbcSMatt Macy 1549eda14cbcSMatt Macy static void 1550eda14cbcSMatt Macy dmu_sync_late_arrival_ready(zio_t *zio) 1551eda14cbcSMatt Macy { 1552eda14cbcSMatt Macy dmu_sync_ready(zio, NULL, zio->io_private); 1553eda14cbcSMatt Macy } 1554eda14cbcSMatt Macy 1555eda14cbcSMatt Macy static void 1556eda14cbcSMatt Macy dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg) 1557eda14cbcSMatt Macy { 1558e92ffd9bSMartin Matuska (void) buf; 1559eda14cbcSMatt Macy dmu_sync_arg_t *dsa = varg; 1560eda14cbcSMatt Macy dbuf_dirty_record_t *dr = dsa->dsa_dr; 1561eda14cbcSMatt Macy dmu_buf_impl_t *db = dr->dr_dbuf; 1562eda14cbcSMatt Macy zgd_t *zgd = dsa->dsa_zgd; 1563eda14cbcSMatt Macy 1564eda14cbcSMatt Macy /* 1565eda14cbcSMatt Macy * Record the vdev(s) backing this blkptr so they can be flushed after 1566eda14cbcSMatt Macy * the writes for the lwb have completed. 1567eda14cbcSMatt Macy */ 1568eda14cbcSMatt Macy if (zio->io_error == 0) { 1569eda14cbcSMatt Macy zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp); 1570eda14cbcSMatt Macy } 1571eda14cbcSMatt Macy 1572eda14cbcSMatt Macy mutex_enter(&db->db_mtx); 1573eda14cbcSMatt Macy ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC); 1574eda14cbcSMatt Macy if (zio->io_error == 0) { 1575eda14cbcSMatt Macy dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE); 1576eda14cbcSMatt Macy if (dr->dt.dl.dr_nopwrite) { 1577eda14cbcSMatt Macy blkptr_t *bp = zio->io_bp; 1578eda14cbcSMatt Macy blkptr_t *bp_orig = &zio->io_bp_orig; 1579eda14cbcSMatt Macy uint8_t chksum = BP_GET_CHECKSUM(bp_orig); 1580eda14cbcSMatt Macy 1581eda14cbcSMatt Macy ASSERT(BP_EQUAL(bp, bp_orig)); 1582eda14cbcSMatt Macy VERIFY(BP_EQUAL(bp, db->db_blkptr)); 1583eda14cbcSMatt Macy ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF); 1584eda14cbcSMatt Macy VERIFY(zio_checksum_table[chksum].ci_flags & 1585eda14cbcSMatt Macy ZCHECKSUM_FLAG_NOPWRITE); 1586eda14cbcSMatt Macy } 1587eda14cbcSMatt Macy dr->dt.dl.dr_overridden_by = *zio->io_bp; 1588eda14cbcSMatt Macy dr->dt.dl.dr_override_state = DR_OVERRIDDEN; 1589eda14cbcSMatt Macy dr->dt.dl.dr_copies = zio->io_prop.zp_copies; 1590eda14cbcSMatt Macy 1591eda14cbcSMatt Macy /* 1592eda14cbcSMatt Macy * Old style holes are filled with all zeros, whereas 1593eda14cbcSMatt Macy * new-style holes maintain their lsize, type, level, 1594eda14cbcSMatt Macy * and birth time (see zio_write_compress). While we 1595eda14cbcSMatt Macy * need to reset the BP_SET_LSIZE() call that happened 1596eda14cbcSMatt Macy * in dmu_sync_ready for old style holes, we do *not* 1597eda14cbcSMatt Macy * want to wipe out the information contained in new 1598eda14cbcSMatt Macy * style holes. Thus, only zero out the block pointer if 1599eda14cbcSMatt Macy * it's an old style hole. 1600eda14cbcSMatt Macy */ 1601eda14cbcSMatt Macy if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) && 1602eda14cbcSMatt Macy dr->dt.dl.dr_overridden_by.blk_birth == 0) 1603eda14cbcSMatt Macy BP_ZERO(&dr->dt.dl.dr_overridden_by); 1604eda14cbcSMatt Macy } else { 1605eda14cbcSMatt Macy dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN; 1606eda14cbcSMatt Macy } 1607eda14cbcSMatt Macy cv_broadcast(&db->db_changed); 1608eda14cbcSMatt Macy mutex_exit(&db->db_mtx); 1609eda14cbcSMatt Macy 1610eda14cbcSMatt Macy dsa->dsa_done(dsa->dsa_zgd, zio->io_error); 1611eda14cbcSMatt Macy 1612eda14cbcSMatt Macy kmem_free(dsa, sizeof (*dsa)); 1613eda14cbcSMatt Macy } 1614eda14cbcSMatt Macy 1615eda14cbcSMatt Macy static void 1616eda14cbcSMatt Macy dmu_sync_late_arrival_done(zio_t *zio) 1617eda14cbcSMatt Macy { 1618eda14cbcSMatt Macy blkptr_t *bp = zio->io_bp; 1619eda14cbcSMatt Macy dmu_sync_arg_t *dsa = zio->io_private; 1620eda14cbcSMatt Macy zgd_t *zgd = dsa->dsa_zgd; 1621eda14cbcSMatt Macy 1622eda14cbcSMatt Macy if (zio->io_error == 0) { 1623eda14cbcSMatt Macy /* 1624eda14cbcSMatt Macy * Record the vdev(s) backing this blkptr so they can be 1625eda14cbcSMatt Macy * flushed after the writes for the lwb have completed. 1626eda14cbcSMatt Macy */ 1627eda14cbcSMatt Macy zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp); 1628eda14cbcSMatt Macy 1629eda14cbcSMatt Macy if (!BP_IS_HOLE(bp)) { 1630eda14cbcSMatt Macy blkptr_t *bp_orig __maybe_unused = &zio->io_bp_orig; 1631eda14cbcSMatt Macy ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE)); 1632eda14cbcSMatt Macy ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig)); 1633eda14cbcSMatt Macy ASSERT(zio->io_bp->blk_birth == zio->io_txg); 1634eda14cbcSMatt Macy ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa)); 1635eda14cbcSMatt Macy zio_free(zio->io_spa, zio->io_txg, zio->io_bp); 1636eda14cbcSMatt Macy } 1637eda14cbcSMatt Macy } 1638eda14cbcSMatt Macy 1639eda14cbcSMatt Macy dmu_tx_commit(dsa->dsa_tx); 1640eda14cbcSMatt Macy 1641eda14cbcSMatt Macy dsa->dsa_done(dsa->dsa_zgd, zio->io_error); 1642eda14cbcSMatt Macy 1643184c1b94SMartin Matuska abd_free(zio->io_abd); 1644eda14cbcSMatt Macy kmem_free(dsa, sizeof (*dsa)); 1645eda14cbcSMatt Macy } 1646eda14cbcSMatt Macy 1647eda14cbcSMatt Macy static int 1648eda14cbcSMatt Macy dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd, 1649eda14cbcSMatt Macy zio_prop_t *zp, zbookmark_phys_t *zb) 1650eda14cbcSMatt Macy { 1651eda14cbcSMatt Macy dmu_sync_arg_t *dsa; 1652eda14cbcSMatt Macy dmu_tx_t *tx; 1653eda14cbcSMatt Macy 1654eda14cbcSMatt Macy tx = dmu_tx_create(os); 1655eda14cbcSMatt Macy dmu_tx_hold_space(tx, zgd->zgd_db->db_size); 1656eda14cbcSMatt Macy if (dmu_tx_assign(tx, TXG_WAIT) != 0) { 1657eda14cbcSMatt Macy dmu_tx_abort(tx); 1658eda14cbcSMatt Macy /* Make zl_get_data do txg_waited_synced() */ 1659eda14cbcSMatt Macy return (SET_ERROR(EIO)); 1660eda14cbcSMatt Macy } 1661eda14cbcSMatt Macy 1662eda14cbcSMatt Macy /* 1663eda14cbcSMatt Macy * In order to prevent the zgd's lwb from being free'd prior to 1664eda14cbcSMatt Macy * dmu_sync_late_arrival_done() being called, we have to ensure 1665eda14cbcSMatt Macy * the lwb's "max txg" takes this tx's txg into account. 1666eda14cbcSMatt Macy */ 1667eda14cbcSMatt Macy zil_lwb_add_txg(zgd->zgd_lwb, dmu_tx_get_txg(tx)); 1668eda14cbcSMatt Macy 1669eda14cbcSMatt Macy dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); 1670eda14cbcSMatt Macy dsa->dsa_dr = NULL; 1671eda14cbcSMatt Macy dsa->dsa_done = done; 1672eda14cbcSMatt Macy dsa->dsa_zgd = zgd; 1673eda14cbcSMatt Macy dsa->dsa_tx = tx; 1674eda14cbcSMatt Macy 1675eda14cbcSMatt Macy /* 1676eda14cbcSMatt Macy * Since we are currently syncing this txg, it's nontrivial to 1677eda14cbcSMatt Macy * determine what BP to nopwrite against, so we disable nopwrite. 1678eda14cbcSMatt Macy * 1679eda14cbcSMatt Macy * When syncing, the db_blkptr is initially the BP of the previous 1680eda14cbcSMatt Macy * txg. We can not nopwrite against it because it will be changed 1681eda14cbcSMatt Macy * (this is similar to the non-late-arrival case where the dbuf is 1682eda14cbcSMatt Macy * dirty in a future txg). 1683eda14cbcSMatt Macy * 1684eda14cbcSMatt Macy * Then dbuf_write_ready() sets bp_blkptr to the location we will write. 1685eda14cbcSMatt Macy * We can not nopwrite against it because although the BP will not 1686eda14cbcSMatt Macy * (typically) be changed, the data has not yet been persisted to this 1687eda14cbcSMatt Macy * location. 1688eda14cbcSMatt Macy * 1689eda14cbcSMatt Macy * Finally, when dbuf_write_done() is called, it is theoretically 1690eda14cbcSMatt Macy * possible to always nopwrite, because the data that was written in 1691eda14cbcSMatt Macy * this txg is the same data that we are trying to write. However we 1692eda14cbcSMatt Macy * would need to check that this dbuf is not dirty in any future 1693eda14cbcSMatt Macy * txg's (as we do in the normal dmu_sync() path). For simplicity, we 1694eda14cbcSMatt Macy * don't nopwrite in this case. 1695eda14cbcSMatt Macy */ 1696eda14cbcSMatt Macy zp->zp_nopwrite = B_FALSE; 1697eda14cbcSMatt Macy 1698eda14cbcSMatt Macy zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp, 1699eda14cbcSMatt Macy abd_get_from_buf(zgd->zgd_db->db_data, zgd->zgd_db->db_size), 1700eda14cbcSMatt Macy zgd->zgd_db->db_size, zgd->zgd_db->db_size, zp, 1701eda14cbcSMatt Macy dmu_sync_late_arrival_ready, NULL, NULL, dmu_sync_late_arrival_done, 1702eda14cbcSMatt Macy dsa, ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb)); 1703eda14cbcSMatt Macy 1704eda14cbcSMatt Macy return (0); 1705eda14cbcSMatt Macy } 1706eda14cbcSMatt Macy 1707eda14cbcSMatt Macy /* 1708eda14cbcSMatt Macy * Intent log support: sync the block associated with db to disk. 1709eda14cbcSMatt Macy * N.B. and XXX: the caller is responsible for making sure that the 1710eda14cbcSMatt Macy * data isn't changing while dmu_sync() is writing it. 1711eda14cbcSMatt Macy * 1712eda14cbcSMatt Macy * Return values: 1713eda14cbcSMatt Macy * 1714eda14cbcSMatt Macy * EEXIST: this txg has already been synced, so there's nothing to do. 1715eda14cbcSMatt Macy * The caller should not log the write. 1716eda14cbcSMatt Macy * 1717eda14cbcSMatt Macy * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do. 1718eda14cbcSMatt Macy * The caller should not log the write. 1719eda14cbcSMatt Macy * 1720eda14cbcSMatt Macy * EALREADY: this block is already in the process of being synced. 1721eda14cbcSMatt Macy * The caller should track its progress (somehow). 1722eda14cbcSMatt Macy * 1723eda14cbcSMatt Macy * EIO: could not do the I/O. 1724eda14cbcSMatt Macy * The caller should do a txg_wait_synced(). 1725eda14cbcSMatt Macy * 1726eda14cbcSMatt Macy * 0: the I/O has been initiated. 1727eda14cbcSMatt Macy * The caller should log this blkptr in the done callback. 1728eda14cbcSMatt Macy * It is possible that the I/O will fail, in which case 1729eda14cbcSMatt Macy * the error will be reported to the done callback and 1730eda14cbcSMatt Macy * propagated to pio from zio_done(). 1731eda14cbcSMatt Macy */ 1732eda14cbcSMatt Macy int 1733eda14cbcSMatt Macy dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd) 1734eda14cbcSMatt Macy { 1735eda14cbcSMatt Macy dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db; 1736eda14cbcSMatt Macy objset_t *os = db->db_objset; 1737eda14cbcSMatt Macy dsl_dataset_t *ds = os->os_dsl_dataset; 1738eda14cbcSMatt Macy dbuf_dirty_record_t *dr, *dr_next; 1739eda14cbcSMatt Macy dmu_sync_arg_t *dsa; 1740eda14cbcSMatt Macy zbookmark_phys_t zb; 1741eda14cbcSMatt Macy zio_prop_t zp; 1742eda14cbcSMatt Macy dnode_t *dn; 1743eda14cbcSMatt Macy 1744eda14cbcSMatt Macy ASSERT(pio != NULL); 1745eda14cbcSMatt Macy ASSERT(txg != 0); 1746eda14cbcSMatt Macy 1747eda14cbcSMatt Macy SET_BOOKMARK(&zb, ds->ds_object, 1748eda14cbcSMatt Macy db->db.db_object, db->db_level, db->db_blkid); 1749eda14cbcSMatt Macy 1750eda14cbcSMatt Macy DB_DNODE_ENTER(db); 1751eda14cbcSMatt Macy dn = DB_DNODE(db); 1752eda14cbcSMatt Macy dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp); 1753eda14cbcSMatt Macy DB_DNODE_EXIT(db); 1754eda14cbcSMatt Macy 1755eda14cbcSMatt Macy /* 1756eda14cbcSMatt Macy * If we're frozen (running ziltest), we always need to generate a bp. 1757eda14cbcSMatt Macy */ 1758eda14cbcSMatt Macy if (txg > spa_freeze_txg(os->os_spa)) 1759eda14cbcSMatt Macy return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb)); 1760eda14cbcSMatt Macy 1761eda14cbcSMatt Macy /* 1762eda14cbcSMatt Macy * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf() 1763eda14cbcSMatt Macy * and us. If we determine that this txg is not yet syncing, 1764eda14cbcSMatt Macy * but it begins to sync a moment later, that's OK because the 1765eda14cbcSMatt Macy * sync thread will block in dbuf_sync_leaf() until we drop db_mtx. 1766eda14cbcSMatt Macy */ 1767eda14cbcSMatt Macy mutex_enter(&db->db_mtx); 1768eda14cbcSMatt Macy 1769eda14cbcSMatt Macy if (txg <= spa_last_synced_txg(os->os_spa)) { 1770eda14cbcSMatt Macy /* 1771eda14cbcSMatt Macy * This txg has already synced. There's nothing to do. 1772eda14cbcSMatt Macy */ 1773eda14cbcSMatt Macy mutex_exit(&db->db_mtx); 1774eda14cbcSMatt Macy return (SET_ERROR(EEXIST)); 1775eda14cbcSMatt Macy } 1776eda14cbcSMatt Macy 1777eda14cbcSMatt Macy if (txg <= spa_syncing_txg(os->os_spa)) { 1778eda14cbcSMatt Macy /* 1779eda14cbcSMatt Macy * This txg is currently syncing, so we can't mess with 1780eda14cbcSMatt Macy * the dirty record anymore; just write a new log block. 1781eda14cbcSMatt Macy */ 1782eda14cbcSMatt Macy mutex_exit(&db->db_mtx); 1783eda14cbcSMatt Macy return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb)); 1784eda14cbcSMatt Macy } 1785eda14cbcSMatt Macy 1786eda14cbcSMatt Macy dr = dbuf_find_dirty_eq(db, txg); 1787eda14cbcSMatt Macy 1788eda14cbcSMatt Macy if (dr == NULL) { 1789eda14cbcSMatt Macy /* 1790eda14cbcSMatt Macy * There's no dr for this dbuf, so it must have been freed. 1791eda14cbcSMatt Macy * There's no need to log writes to freed blocks, so we're done. 1792eda14cbcSMatt Macy */ 1793eda14cbcSMatt Macy mutex_exit(&db->db_mtx); 1794eda14cbcSMatt Macy return (SET_ERROR(ENOENT)); 1795eda14cbcSMatt Macy } 1796eda14cbcSMatt Macy 1797eda14cbcSMatt Macy dr_next = list_next(&db->db_dirty_records, dr); 1798eda14cbcSMatt Macy ASSERT(dr_next == NULL || dr_next->dr_txg < txg); 1799eda14cbcSMatt Macy 1800eda14cbcSMatt Macy if (db->db_blkptr != NULL) { 1801eda14cbcSMatt Macy /* 1802eda14cbcSMatt Macy * We need to fill in zgd_bp with the current blkptr so that 1803eda14cbcSMatt Macy * the nopwrite code can check if we're writing the same 1804eda14cbcSMatt Macy * data that's already on disk. We can only nopwrite if we 1805eda14cbcSMatt Macy * are sure that after making the copy, db_blkptr will not 1806eda14cbcSMatt Macy * change until our i/o completes. We ensure this by 1807eda14cbcSMatt Macy * holding the db_mtx, and only allowing nopwrite if the 1808eda14cbcSMatt Macy * block is not already dirty (see below). This is verified 1809eda14cbcSMatt Macy * by dmu_sync_done(), which VERIFYs that the db_blkptr has 1810eda14cbcSMatt Macy * not changed. 1811eda14cbcSMatt Macy */ 1812eda14cbcSMatt Macy *zgd->zgd_bp = *db->db_blkptr; 1813eda14cbcSMatt Macy } 1814eda14cbcSMatt Macy 1815eda14cbcSMatt Macy /* 1816eda14cbcSMatt Macy * Assume the on-disk data is X, the current syncing data (in 1817eda14cbcSMatt Macy * txg - 1) is Y, and the current in-memory data is Z (currently 1818eda14cbcSMatt Macy * in dmu_sync). 1819eda14cbcSMatt Macy * 1820eda14cbcSMatt Macy * We usually want to perform a nopwrite if X and Z are the 1821eda14cbcSMatt Macy * same. However, if Y is different (i.e. the BP is going to 1822eda14cbcSMatt Macy * change before this write takes effect), then a nopwrite will 1823eda14cbcSMatt Macy * be incorrect - we would override with X, which could have 1824eda14cbcSMatt Macy * been freed when Y was written. 1825eda14cbcSMatt Macy * 1826eda14cbcSMatt Macy * (Note that this is not a concern when we are nop-writing from 1827eda14cbcSMatt Macy * syncing context, because X and Y must be identical, because 1828eda14cbcSMatt Macy * all previous txgs have been synced.) 1829eda14cbcSMatt Macy * 1830eda14cbcSMatt Macy * Therefore, we disable nopwrite if the current BP could change 1831eda14cbcSMatt Macy * before this TXG. There are two ways it could change: by 1832eda14cbcSMatt Macy * being dirty (dr_next is non-NULL), or by being freed 1833eda14cbcSMatt Macy * (dnode_block_freed()). This behavior is verified by 1834eda14cbcSMatt Macy * zio_done(), which VERIFYs that the override BP is identical 1835eda14cbcSMatt Macy * to the on-disk BP. 1836eda14cbcSMatt Macy */ 1837eda14cbcSMatt Macy DB_DNODE_ENTER(db); 1838eda14cbcSMatt Macy dn = DB_DNODE(db); 1839eda14cbcSMatt Macy if (dr_next != NULL || dnode_block_freed(dn, db->db_blkid)) 1840eda14cbcSMatt Macy zp.zp_nopwrite = B_FALSE; 1841eda14cbcSMatt Macy DB_DNODE_EXIT(db); 1842eda14cbcSMatt Macy 1843eda14cbcSMatt Macy ASSERT(dr->dr_txg == txg); 1844eda14cbcSMatt Macy if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC || 1845eda14cbcSMatt Macy dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { 1846eda14cbcSMatt Macy /* 1847eda14cbcSMatt Macy * We have already issued a sync write for this buffer, 1848eda14cbcSMatt Macy * or this buffer has already been synced. It could not 1849eda14cbcSMatt Macy * have been dirtied since, or we would have cleared the state. 1850eda14cbcSMatt Macy */ 1851eda14cbcSMatt Macy mutex_exit(&db->db_mtx); 1852eda14cbcSMatt Macy return (SET_ERROR(EALREADY)); 1853eda14cbcSMatt Macy } 1854eda14cbcSMatt Macy 1855eda14cbcSMatt Macy ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); 1856eda14cbcSMatt Macy dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC; 1857eda14cbcSMatt Macy mutex_exit(&db->db_mtx); 1858eda14cbcSMatt Macy 1859eda14cbcSMatt Macy dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP); 1860eda14cbcSMatt Macy dsa->dsa_dr = dr; 1861eda14cbcSMatt Macy dsa->dsa_done = done; 1862eda14cbcSMatt Macy dsa->dsa_zgd = zgd; 1863eda14cbcSMatt Macy dsa->dsa_tx = NULL; 1864eda14cbcSMatt Macy 186515f0b8c3SMartin Matuska zio_nowait(arc_write(pio, os->os_spa, txg, zgd->zgd_bp, 186615f0b8c3SMartin Matuska dr->dt.dl.dr_data, !DBUF_IS_CACHEABLE(db), dbuf_is_l2cacheable(db), 1867eda14cbcSMatt Macy &zp, dmu_sync_ready, NULL, NULL, dmu_sync_done, dsa, 1868eda14cbcSMatt Macy ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb)); 1869eda14cbcSMatt Macy 1870eda14cbcSMatt Macy return (0); 1871eda14cbcSMatt Macy } 1872eda14cbcSMatt Macy 1873eda14cbcSMatt Macy int 1874eda14cbcSMatt Macy dmu_object_set_nlevels(objset_t *os, uint64_t object, int nlevels, dmu_tx_t *tx) 1875eda14cbcSMatt Macy { 1876eda14cbcSMatt Macy dnode_t *dn; 1877eda14cbcSMatt Macy int err; 1878eda14cbcSMatt Macy 1879eda14cbcSMatt Macy err = dnode_hold(os, object, FTAG, &dn); 1880eda14cbcSMatt Macy if (err) 1881eda14cbcSMatt Macy return (err); 1882eda14cbcSMatt Macy err = dnode_set_nlevels(dn, nlevels, tx); 1883eda14cbcSMatt Macy dnode_rele(dn, FTAG); 1884eda14cbcSMatt Macy return (err); 1885eda14cbcSMatt Macy } 1886eda14cbcSMatt Macy 1887eda14cbcSMatt Macy int 1888eda14cbcSMatt Macy dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs, 1889eda14cbcSMatt Macy dmu_tx_t *tx) 1890eda14cbcSMatt Macy { 1891eda14cbcSMatt Macy dnode_t *dn; 1892eda14cbcSMatt Macy int err; 1893eda14cbcSMatt Macy 1894eda14cbcSMatt Macy err = dnode_hold(os, object, FTAG, &dn); 1895eda14cbcSMatt Macy if (err) 1896eda14cbcSMatt Macy return (err); 1897eda14cbcSMatt Macy err = dnode_set_blksz(dn, size, ibs, tx); 1898eda14cbcSMatt Macy dnode_rele(dn, FTAG); 1899eda14cbcSMatt Macy return (err); 1900eda14cbcSMatt Macy } 1901eda14cbcSMatt Macy 1902eda14cbcSMatt Macy int 1903eda14cbcSMatt Macy dmu_object_set_maxblkid(objset_t *os, uint64_t object, uint64_t maxblkid, 1904eda14cbcSMatt Macy dmu_tx_t *tx) 1905eda14cbcSMatt Macy { 1906eda14cbcSMatt Macy dnode_t *dn; 1907eda14cbcSMatt Macy int err; 1908eda14cbcSMatt Macy 1909eda14cbcSMatt Macy err = dnode_hold(os, object, FTAG, &dn); 1910eda14cbcSMatt Macy if (err) 1911eda14cbcSMatt Macy return (err); 1912eda14cbcSMatt Macy rw_enter(&dn->dn_struct_rwlock, RW_WRITER); 1913eda14cbcSMatt Macy dnode_new_blkid(dn, maxblkid, tx, B_FALSE, B_TRUE); 1914eda14cbcSMatt Macy rw_exit(&dn->dn_struct_rwlock); 1915eda14cbcSMatt Macy dnode_rele(dn, FTAG); 1916eda14cbcSMatt Macy return (0); 1917eda14cbcSMatt Macy } 1918eda14cbcSMatt Macy 1919eda14cbcSMatt Macy void 1920eda14cbcSMatt Macy dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum, 1921eda14cbcSMatt Macy dmu_tx_t *tx) 1922eda14cbcSMatt Macy { 1923eda14cbcSMatt Macy dnode_t *dn; 1924eda14cbcSMatt Macy 1925eda14cbcSMatt Macy /* 1926eda14cbcSMatt Macy * Send streams include each object's checksum function. This 1927eda14cbcSMatt Macy * check ensures that the receiving system can understand the 1928eda14cbcSMatt Macy * checksum function transmitted. 1929eda14cbcSMatt Macy */ 1930eda14cbcSMatt Macy ASSERT3U(checksum, <, ZIO_CHECKSUM_LEGACY_FUNCTIONS); 1931eda14cbcSMatt Macy 1932eda14cbcSMatt Macy VERIFY0(dnode_hold(os, object, FTAG, &dn)); 1933eda14cbcSMatt Macy ASSERT3U(checksum, <, ZIO_CHECKSUM_FUNCTIONS); 1934eda14cbcSMatt Macy dn->dn_checksum = checksum; 1935eda14cbcSMatt Macy dnode_setdirty(dn, tx); 1936eda14cbcSMatt Macy dnode_rele(dn, FTAG); 1937eda14cbcSMatt Macy } 1938eda14cbcSMatt Macy 1939eda14cbcSMatt Macy void 1940eda14cbcSMatt Macy dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress, 1941eda14cbcSMatt Macy dmu_tx_t *tx) 1942eda14cbcSMatt Macy { 1943eda14cbcSMatt Macy dnode_t *dn; 1944eda14cbcSMatt Macy 1945eda14cbcSMatt Macy /* 1946eda14cbcSMatt Macy * Send streams include each object's compression function. This 1947eda14cbcSMatt Macy * check ensures that the receiving system can understand the 1948eda14cbcSMatt Macy * compression function transmitted. 1949eda14cbcSMatt Macy */ 1950eda14cbcSMatt Macy ASSERT3U(compress, <, ZIO_COMPRESS_LEGACY_FUNCTIONS); 1951eda14cbcSMatt Macy 1952eda14cbcSMatt Macy VERIFY0(dnode_hold(os, object, FTAG, &dn)); 1953eda14cbcSMatt Macy dn->dn_compress = compress; 1954eda14cbcSMatt Macy dnode_setdirty(dn, tx); 1955eda14cbcSMatt Macy dnode_rele(dn, FTAG); 1956eda14cbcSMatt Macy } 1957eda14cbcSMatt Macy 1958eda14cbcSMatt Macy /* 1959eda14cbcSMatt Macy * When the "redundant_metadata" property is set to "most", only indirect 1960eda14cbcSMatt Macy * blocks of this level and higher will have an additional ditto block. 1961eda14cbcSMatt Macy */ 1962e92ffd9bSMartin Matuska static const int zfs_redundant_metadata_most_ditto_level = 2; 1963eda14cbcSMatt Macy 1964eda14cbcSMatt Macy void 1965eda14cbcSMatt Macy dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp) 1966eda14cbcSMatt Macy { 1967eda14cbcSMatt Macy dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET; 1968eda14cbcSMatt Macy boolean_t ismd = (level > 0 || DMU_OT_IS_METADATA(type) || 1969eda14cbcSMatt Macy (wp & WP_SPILL)); 1970eda14cbcSMatt Macy enum zio_checksum checksum = os->os_checksum; 1971eda14cbcSMatt Macy enum zio_compress compress = os->os_compress; 1972eda14cbcSMatt Macy uint8_t complevel = os->os_complevel; 1973eda14cbcSMatt Macy enum zio_checksum dedup_checksum = os->os_dedup_checksum; 1974eda14cbcSMatt Macy boolean_t dedup = B_FALSE; 1975eda14cbcSMatt Macy boolean_t nopwrite = B_FALSE; 1976eda14cbcSMatt Macy boolean_t dedup_verify = os->os_dedup_verify; 1977eda14cbcSMatt Macy boolean_t encrypt = B_FALSE; 1978eda14cbcSMatt Macy int copies = os->os_copies; 1979eda14cbcSMatt Macy 1980eda14cbcSMatt Macy /* 1981eda14cbcSMatt Macy * We maintain different write policies for each of the following 1982eda14cbcSMatt Macy * types of data: 1983eda14cbcSMatt Macy * 1. metadata 1984eda14cbcSMatt Macy * 2. preallocated blocks (i.e. level-0 blocks of a dump device) 1985eda14cbcSMatt Macy * 3. all other level 0 blocks 1986eda14cbcSMatt Macy */ 1987eda14cbcSMatt Macy if (ismd) { 1988eda14cbcSMatt Macy /* 1989eda14cbcSMatt Macy * XXX -- we should design a compression algorithm 1990eda14cbcSMatt Macy * that specializes in arrays of bps. 1991eda14cbcSMatt Macy */ 1992eda14cbcSMatt Macy compress = zio_compress_select(os->os_spa, 1993eda14cbcSMatt Macy ZIO_COMPRESS_ON, ZIO_COMPRESS_ON); 1994eda14cbcSMatt Macy 1995eda14cbcSMatt Macy /* 1996eda14cbcSMatt Macy * Metadata always gets checksummed. If the data 1997eda14cbcSMatt Macy * checksum is multi-bit correctable, and it's not a 1998eda14cbcSMatt Macy * ZBT-style checksum, then it's suitable for metadata 1999eda14cbcSMatt Macy * as well. Otherwise, the metadata checksum defaults 2000eda14cbcSMatt Macy * to fletcher4. 2001eda14cbcSMatt Macy */ 2002eda14cbcSMatt Macy if (!(zio_checksum_table[checksum].ci_flags & 2003eda14cbcSMatt Macy ZCHECKSUM_FLAG_METADATA) || 2004eda14cbcSMatt Macy (zio_checksum_table[checksum].ci_flags & 2005eda14cbcSMatt Macy ZCHECKSUM_FLAG_EMBEDDED)) 2006eda14cbcSMatt Macy checksum = ZIO_CHECKSUM_FLETCHER_4; 2007eda14cbcSMatt Macy 2008dbd5678dSMartin Matuska switch (os->os_redundant_metadata) { 2009dbd5678dSMartin Matuska case ZFS_REDUNDANT_METADATA_ALL: 2010eda14cbcSMatt Macy copies++; 2011dbd5678dSMartin Matuska break; 2012dbd5678dSMartin Matuska case ZFS_REDUNDANT_METADATA_MOST: 2013dbd5678dSMartin Matuska if (level >= zfs_redundant_metadata_most_ditto_level || 2014dbd5678dSMartin Matuska DMU_OT_IS_METADATA(type) || (wp & WP_SPILL)) 2015dbd5678dSMartin Matuska copies++; 2016dbd5678dSMartin Matuska break; 2017dbd5678dSMartin Matuska case ZFS_REDUNDANT_METADATA_SOME: 2018dbd5678dSMartin Matuska if (DMU_OT_IS_CRITICAL(type)) 2019dbd5678dSMartin Matuska copies++; 2020dbd5678dSMartin Matuska break; 2021dbd5678dSMartin Matuska case ZFS_REDUNDANT_METADATA_NONE: 2022dbd5678dSMartin Matuska break; 2023dbd5678dSMartin Matuska } 2024eda14cbcSMatt Macy } else if (wp & WP_NOFILL) { 2025eda14cbcSMatt Macy ASSERT(level == 0); 2026eda14cbcSMatt Macy 2027eda14cbcSMatt Macy /* 2028eda14cbcSMatt Macy * If we're writing preallocated blocks, we aren't actually 2029eda14cbcSMatt Macy * writing them so don't set any policy properties. These 2030eda14cbcSMatt Macy * blocks are currently only used by an external subsystem 2031eda14cbcSMatt Macy * outside of zfs (i.e. dump) and not written by the zio 2032eda14cbcSMatt Macy * pipeline. 2033eda14cbcSMatt Macy */ 2034eda14cbcSMatt Macy compress = ZIO_COMPRESS_OFF; 2035eda14cbcSMatt Macy checksum = ZIO_CHECKSUM_OFF; 2036eda14cbcSMatt Macy } else { 2037eda14cbcSMatt Macy compress = zio_compress_select(os->os_spa, dn->dn_compress, 2038eda14cbcSMatt Macy compress); 2039eda14cbcSMatt Macy complevel = zio_complevel_select(os->os_spa, compress, 2040eda14cbcSMatt Macy complevel, complevel); 2041eda14cbcSMatt Macy 2042eda14cbcSMatt Macy checksum = (dedup_checksum == ZIO_CHECKSUM_OFF) ? 2043eda14cbcSMatt Macy zio_checksum_select(dn->dn_checksum, checksum) : 2044eda14cbcSMatt Macy dedup_checksum; 2045eda14cbcSMatt Macy 2046eda14cbcSMatt Macy /* 2047eda14cbcSMatt Macy * Determine dedup setting. If we are in dmu_sync(), 2048eda14cbcSMatt Macy * we won't actually dedup now because that's all 2049eda14cbcSMatt Macy * done in syncing context; but we do want to use the 2050eda14cbcSMatt Macy * dedup checksum. If the checksum is not strong 2051eda14cbcSMatt Macy * enough to ensure unique signatures, force 2052eda14cbcSMatt Macy * dedup_verify. 2053eda14cbcSMatt Macy */ 2054eda14cbcSMatt Macy if (dedup_checksum != ZIO_CHECKSUM_OFF) { 2055eda14cbcSMatt Macy dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE; 2056eda14cbcSMatt Macy if (!(zio_checksum_table[checksum].ci_flags & 2057eda14cbcSMatt Macy ZCHECKSUM_FLAG_DEDUP)) 2058eda14cbcSMatt Macy dedup_verify = B_TRUE; 2059eda14cbcSMatt Macy } 2060eda14cbcSMatt Macy 2061eda14cbcSMatt Macy /* 2062eda14cbcSMatt Macy * Enable nopwrite if we have secure enough checksum 2063eda14cbcSMatt Macy * algorithm (see comment in zio_nop_write) and 2064eda14cbcSMatt Macy * compression is enabled. We don't enable nopwrite if 2065eda14cbcSMatt Macy * dedup is enabled as the two features are mutually 2066eda14cbcSMatt Macy * exclusive. 2067eda14cbcSMatt Macy */ 2068eda14cbcSMatt Macy nopwrite = (!dedup && (zio_checksum_table[checksum].ci_flags & 2069eda14cbcSMatt Macy ZCHECKSUM_FLAG_NOPWRITE) && 2070eda14cbcSMatt Macy compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled); 2071eda14cbcSMatt Macy } 2072eda14cbcSMatt Macy 2073eda14cbcSMatt Macy /* 2074eda14cbcSMatt Macy * All objects in an encrypted objset are protected from modification 2075eda14cbcSMatt Macy * via a MAC. Encrypted objects store their IV and salt in the last DVA 2076eda14cbcSMatt Macy * in the bp, so we cannot use all copies. Encrypted objects are also 2077eda14cbcSMatt Macy * not subject to nopwrite since writing the same data will still 2078eda14cbcSMatt Macy * result in a new ciphertext. Only encrypted blocks can be dedup'd 2079eda14cbcSMatt Macy * to avoid ambiguity in the dedup code since the DDT does not store 2080eda14cbcSMatt Macy * object types. 2081eda14cbcSMatt Macy */ 2082eda14cbcSMatt Macy if (os->os_encrypted && (wp & WP_NOFILL) == 0) { 2083eda14cbcSMatt Macy encrypt = B_TRUE; 2084eda14cbcSMatt Macy 2085eda14cbcSMatt Macy if (DMU_OT_IS_ENCRYPTED(type)) { 2086eda14cbcSMatt Macy copies = MIN(copies, SPA_DVAS_PER_BP - 1); 2087eda14cbcSMatt Macy nopwrite = B_FALSE; 2088eda14cbcSMatt Macy } else { 2089eda14cbcSMatt Macy dedup = B_FALSE; 2090eda14cbcSMatt Macy } 2091eda14cbcSMatt Macy 2092eda14cbcSMatt Macy if (level <= 0 && 2093eda14cbcSMatt Macy (type == DMU_OT_DNODE || type == DMU_OT_OBJSET)) { 2094eda14cbcSMatt Macy compress = ZIO_COMPRESS_EMPTY; 2095eda14cbcSMatt Macy } 2096eda14cbcSMatt Macy } 2097eda14cbcSMatt Macy 2098eda14cbcSMatt Macy zp->zp_compress = compress; 2099eda14cbcSMatt Macy zp->zp_complevel = complevel; 2100eda14cbcSMatt Macy zp->zp_checksum = checksum; 2101eda14cbcSMatt Macy zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type; 2102eda14cbcSMatt Macy zp->zp_level = level; 2103eda14cbcSMatt Macy zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa)); 2104eda14cbcSMatt Macy zp->zp_dedup = dedup; 2105eda14cbcSMatt Macy zp->zp_dedup_verify = dedup && dedup_verify; 2106eda14cbcSMatt Macy zp->zp_nopwrite = nopwrite; 2107eda14cbcSMatt Macy zp->zp_encrypt = encrypt; 2108eda14cbcSMatt Macy zp->zp_byteorder = ZFS_HOST_BYTEORDER; 2109da5137abSMartin Matuska memset(zp->zp_salt, 0, ZIO_DATA_SALT_LEN); 2110da5137abSMartin Matuska memset(zp->zp_iv, 0, ZIO_DATA_IV_LEN); 2111da5137abSMartin Matuska memset(zp->zp_mac, 0, ZIO_DATA_MAC_LEN); 2112eda14cbcSMatt Macy zp->zp_zpl_smallblk = DMU_OT_IS_FILE(zp->zp_type) ? 2113eda14cbcSMatt Macy os->os_zpl_special_smallblock : 0; 2114eda14cbcSMatt Macy 2115eda14cbcSMatt Macy ASSERT3U(zp->zp_compress, !=, ZIO_COMPRESS_INHERIT); 2116eda14cbcSMatt Macy } 2117eda14cbcSMatt Macy 2118eda14cbcSMatt Macy /* 21192a58b312SMartin Matuska * Reports the location of data and holes in an object. In order to 21202a58b312SMartin Matuska * accurately report holes all dirty data must be synced to disk. This 21212a58b312SMartin Matuska * causes extremely poor performance when seeking for holes in a dirty file. 21222a58b312SMartin Matuska * As a compromise, only provide hole data when the dnode is clean. When 21232a58b312SMartin Matuska * a dnode is dirty report the dnode as having no holes by returning EBUSY 21242a58b312SMartin Matuska * which is always safe to do. 2125eda14cbcSMatt Macy */ 2126eda14cbcSMatt Macy int 2127eda14cbcSMatt Macy dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off) 2128eda14cbcSMatt Macy { 2129eda14cbcSMatt Macy dnode_t *dn; 21302a58b312SMartin Matuska int restarted = 0, err; 2131eda14cbcSMatt Macy 213281b22a98SMartin Matuska restart: 2133eda14cbcSMatt Macy err = dnode_hold(os, object, FTAG, &dn); 2134eda14cbcSMatt Macy if (err) 2135eda14cbcSMatt Macy return (err); 2136eda14cbcSMatt Macy 213781b22a98SMartin Matuska rw_enter(&dn->dn_struct_rwlock, RW_READER); 2138eda14cbcSMatt Macy 213981b22a98SMartin Matuska if (dnode_is_dirty(dn)) { 2140eda14cbcSMatt Macy /* 214181b22a98SMartin Matuska * If the zfs_dmu_offset_next_sync module option is enabled 21422a58b312SMartin Matuska * then hole reporting has been requested. Dirty dnodes 21432a58b312SMartin Matuska * must be synced to disk to accurately report holes. 214481b22a98SMartin Matuska * 21452a58b312SMartin Matuska * Provided a RL_READER rangelock spanning 0-UINT64_MAX is 21462a58b312SMartin Matuska * held by the caller only a single restart will be required. 21472a58b312SMartin Matuska * We tolerate callers which do not hold the rangelock by 21482a58b312SMartin Matuska * returning EBUSY and not reporting holes after one restart. 2149eda14cbcSMatt Macy */ 215081b22a98SMartin Matuska if (zfs_dmu_offset_next_sync) { 215181b22a98SMartin Matuska rw_exit(&dn->dn_struct_rwlock); 2152eda14cbcSMatt Macy dnode_rele(dn, FTAG); 21532a58b312SMartin Matuska 21542a58b312SMartin Matuska if (restarted) 21552a58b312SMartin Matuska return (SET_ERROR(EBUSY)); 21562a58b312SMartin Matuska 2157eda14cbcSMatt Macy txg_wait_synced(dmu_objset_pool(os), 0); 21582a58b312SMartin Matuska restarted = 1; 215981b22a98SMartin Matuska goto restart; 2160eda14cbcSMatt Macy } 2161eda14cbcSMatt Macy 2162eda14cbcSMatt Macy err = SET_ERROR(EBUSY); 216381b22a98SMartin Matuska } else { 216481b22a98SMartin Matuska err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK | 216581b22a98SMartin Matuska (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0); 216681b22a98SMartin Matuska } 2167eda14cbcSMatt Macy 216881b22a98SMartin Matuska rw_exit(&dn->dn_struct_rwlock); 2169eda14cbcSMatt Macy dnode_rele(dn, FTAG); 2170eda14cbcSMatt Macy 2171eda14cbcSMatt Macy return (err); 2172eda14cbcSMatt Macy } 2173eda14cbcSMatt Macy 21742a58b312SMartin Matuska int 21752a58b312SMartin Matuska dmu_read_l0_bps(objset_t *os, uint64_t object, uint64_t offset, uint64_t length, 2176*e639e0d2SMartin Matuska blkptr_t *bps, size_t *nbpsp) 21772a58b312SMartin Matuska { 21782a58b312SMartin Matuska dmu_buf_t **dbp, *dbuf; 21792a58b312SMartin Matuska dmu_buf_impl_t *db; 21802a58b312SMartin Matuska blkptr_t *bp; 21812a58b312SMartin Matuska int error, numbufs; 21822a58b312SMartin Matuska 21832a58b312SMartin Matuska error = dmu_buf_hold_array(os, object, offset, length, FALSE, FTAG, 21842a58b312SMartin Matuska &numbufs, &dbp); 21852a58b312SMartin Matuska if (error != 0) { 21862a58b312SMartin Matuska if (error == ESRCH) { 21872a58b312SMartin Matuska error = SET_ERROR(ENXIO); 21882a58b312SMartin Matuska } 21892a58b312SMartin Matuska return (error); 21902a58b312SMartin Matuska } 21912a58b312SMartin Matuska 21922a58b312SMartin Matuska ASSERT3U(numbufs, <=, *nbpsp); 21932a58b312SMartin Matuska 21942a58b312SMartin Matuska for (int i = 0; i < numbufs; i++) { 21952a58b312SMartin Matuska dbuf = dbp[i]; 21962a58b312SMartin Matuska db = (dmu_buf_impl_t *)dbuf; 21972a58b312SMartin Matuska 21982a58b312SMartin Matuska mutex_enter(&db->db_mtx); 21992a58b312SMartin Matuska 22002a58b312SMartin Matuska if (!list_is_empty(&db->db_dirty_records)) { 22012a58b312SMartin Matuska dbuf_dirty_record_t *dr; 22022a58b312SMartin Matuska 22032a58b312SMartin Matuska dr = list_head(&db->db_dirty_records); 22042a58b312SMartin Matuska if (dr->dt.dl.dr_brtwrite) { 22052a58b312SMartin Matuska /* 22062a58b312SMartin Matuska * This is very special case where we clone a 22072a58b312SMartin Matuska * block and in the same transaction group we 22082a58b312SMartin Matuska * read its BP (most likely to clone the clone). 22092a58b312SMartin Matuska */ 22102a58b312SMartin Matuska bp = &dr->dt.dl.dr_overridden_by; 22112a58b312SMartin Matuska } else { 22122a58b312SMartin Matuska /* 22132a58b312SMartin Matuska * The block was modified in the same 22142a58b312SMartin Matuska * transaction group. 22152a58b312SMartin Matuska */ 22162a58b312SMartin Matuska mutex_exit(&db->db_mtx); 22172a58b312SMartin Matuska error = SET_ERROR(EAGAIN); 22182a58b312SMartin Matuska goto out; 22192a58b312SMartin Matuska } 22202a58b312SMartin Matuska } else { 22212a58b312SMartin Matuska bp = db->db_blkptr; 22222a58b312SMartin Matuska } 22232a58b312SMartin Matuska 22242a58b312SMartin Matuska mutex_exit(&db->db_mtx); 22252a58b312SMartin Matuska 22262a58b312SMartin Matuska if (bp == NULL) { 22272a58b312SMartin Matuska /* 22282a58b312SMartin Matuska * The block was created in this transaction group, 22292a58b312SMartin Matuska * so it has no BP yet. 22302a58b312SMartin Matuska */ 22312a58b312SMartin Matuska error = SET_ERROR(EAGAIN); 22322a58b312SMartin Matuska goto out; 22332a58b312SMartin Matuska } 22342a58b312SMartin Matuska /* 22352a58b312SMartin Matuska * Make sure we clone only data blocks. 22362a58b312SMartin Matuska */ 22372a58b312SMartin Matuska if (BP_IS_METADATA(bp) && !BP_IS_HOLE(bp)) { 22382a58b312SMartin Matuska error = SET_ERROR(EINVAL); 22392a58b312SMartin Matuska goto out; 22402a58b312SMartin Matuska } 22412a58b312SMartin Matuska 22422a58b312SMartin Matuska bps[i] = *bp; 22432a58b312SMartin Matuska } 22442a58b312SMartin Matuska 22452a58b312SMartin Matuska *nbpsp = numbufs; 22462a58b312SMartin Matuska out: 22472a58b312SMartin Matuska dmu_buf_rele_array(dbp, numbufs, FTAG); 22482a58b312SMartin Matuska 22492a58b312SMartin Matuska return (error); 22502a58b312SMartin Matuska } 22512a58b312SMartin Matuska 2252*e639e0d2SMartin Matuska int 22532a58b312SMartin Matuska dmu_brt_clone(objset_t *os, uint64_t object, uint64_t offset, uint64_t length, 22542a58b312SMartin Matuska dmu_tx_t *tx, const blkptr_t *bps, size_t nbps, boolean_t replay) 22552a58b312SMartin Matuska { 22562a58b312SMartin Matuska spa_t *spa; 22572a58b312SMartin Matuska dmu_buf_t **dbp, *dbuf; 22582a58b312SMartin Matuska dmu_buf_impl_t *db; 22592a58b312SMartin Matuska struct dirty_leaf *dl; 22602a58b312SMartin Matuska dbuf_dirty_record_t *dr; 22612a58b312SMartin Matuska const blkptr_t *bp; 2262*e639e0d2SMartin Matuska int error = 0, i, numbufs; 22632a58b312SMartin Matuska 22642a58b312SMartin Matuska spa = os->os_spa; 22652a58b312SMartin Matuska 22662a58b312SMartin Matuska VERIFY0(dmu_buf_hold_array(os, object, offset, length, FALSE, FTAG, 22672a58b312SMartin Matuska &numbufs, &dbp)); 22682a58b312SMartin Matuska ASSERT3U(nbps, ==, numbufs); 22692a58b312SMartin Matuska 2270*e639e0d2SMartin Matuska /* 2271*e639e0d2SMartin Matuska * Before we start cloning make sure that the dbufs sizes match new BPs 2272*e639e0d2SMartin Matuska * sizes. If they don't, that's a no-go, as we are not able to shrink 2273*e639e0d2SMartin Matuska * dbufs. 2274*e639e0d2SMartin Matuska */ 2275*e639e0d2SMartin Matuska for (i = 0; i < numbufs; i++) { 22762a58b312SMartin Matuska dbuf = dbp[i]; 22772a58b312SMartin Matuska db = (dmu_buf_impl_t *)dbuf; 22782a58b312SMartin Matuska bp = &bps[i]; 22792a58b312SMartin Matuska 22802a58b312SMartin Matuska ASSERT0(db->db_level); 22812a58b312SMartin Matuska ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2282*e639e0d2SMartin Matuska ASSERT(db->db_blkid != DMU_SPILL_BLKID); 22832a58b312SMartin Matuska 2284*e639e0d2SMartin Matuska if (!BP_IS_HOLE(bp) && BP_GET_LSIZE(bp) != dbuf->db_size) { 2285*e639e0d2SMartin Matuska error = SET_ERROR(EXDEV); 2286*e639e0d2SMartin Matuska goto out; 2287*e639e0d2SMartin Matuska } 22882a58b312SMartin Matuska } 22892a58b312SMartin Matuska 2290*e639e0d2SMartin Matuska for (i = 0; i < numbufs; i++) { 2291*e639e0d2SMartin Matuska dbuf = dbp[i]; 2292*e639e0d2SMartin Matuska db = (dmu_buf_impl_t *)dbuf; 2293*e639e0d2SMartin Matuska bp = &bps[i]; 22942a58b312SMartin Matuska 2295*e639e0d2SMartin Matuska ASSERT0(db->db_level); 2296*e639e0d2SMartin Matuska ASSERT(db->db_blkid != DMU_BONUS_BLKID); 2297*e639e0d2SMartin Matuska ASSERT(db->db_blkid != DMU_SPILL_BLKID); 2298*e639e0d2SMartin Matuska ASSERT(BP_IS_HOLE(bp) || dbuf->db_size == BP_GET_LSIZE(bp)); 2299*e639e0d2SMartin Matuska 2300*e639e0d2SMartin Matuska dmu_buf_will_clone(dbuf, tx); 23012a58b312SMartin Matuska 23022a58b312SMartin Matuska mutex_enter(&db->db_mtx); 23032a58b312SMartin Matuska 23042a58b312SMartin Matuska dr = list_head(&db->db_dirty_records); 23052a58b312SMartin Matuska VERIFY(dr != NULL); 23062a58b312SMartin Matuska ASSERT3U(dr->dr_txg, ==, tx->tx_txg); 23072a58b312SMartin Matuska dl = &dr->dt.dl; 23082a58b312SMartin Matuska dl->dr_overridden_by = *bp; 23092a58b312SMartin Matuska dl->dr_brtwrite = B_TRUE; 23102a58b312SMartin Matuska dl->dr_override_state = DR_OVERRIDDEN; 23112a58b312SMartin Matuska if (BP_IS_HOLE(bp)) { 23122a58b312SMartin Matuska dl->dr_overridden_by.blk_birth = 0; 23132a58b312SMartin Matuska dl->dr_overridden_by.blk_phys_birth = 0; 23142a58b312SMartin Matuska } else { 23152a58b312SMartin Matuska dl->dr_overridden_by.blk_birth = dr->dr_txg; 2316e0bb1999SPawel Jakub Dawidek if (!BP_IS_EMBEDDED(bp)) { 23172a58b312SMartin Matuska dl->dr_overridden_by.blk_phys_birth = 23182a58b312SMartin Matuska BP_PHYSICAL_BIRTH(bp); 23192a58b312SMartin Matuska } 2320e0bb1999SPawel Jakub Dawidek } 23212a58b312SMartin Matuska 23222a58b312SMartin Matuska mutex_exit(&db->db_mtx); 23232a58b312SMartin Matuska 23242a58b312SMartin Matuska /* 23252a58b312SMartin Matuska * When data in embedded into BP there is no need to create 23262a58b312SMartin Matuska * BRT entry as there is no data block. Just copy the BP as 23272a58b312SMartin Matuska * it contains the data. 23282a58b312SMartin Matuska * Also, when replaying ZIL we don't want to bump references 23292a58b312SMartin Matuska * in the BRT as it was already done during ZIL claim. 23302a58b312SMartin Matuska */ 23312a58b312SMartin Matuska if (!replay && !BP_IS_HOLE(bp) && !BP_IS_EMBEDDED(bp)) { 23322a58b312SMartin Matuska brt_pending_add(spa, bp, tx); 23332a58b312SMartin Matuska } 23342a58b312SMartin Matuska } 2335*e639e0d2SMartin Matuska out: 23362a58b312SMartin Matuska dmu_buf_rele_array(dbp, numbufs, FTAG); 2337*e639e0d2SMartin Matuska 2338*e639e0d2SMartin Matuska return (error); 23392a58b312SMartin Matuska } 23402a58b312SMartin Matuska 2341eda14cbcSMatt Macy void 2342eda14cbcSMatt Macy __dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi) 2343eda14cbcSMatt Macy { 2344eda14cbcSMatt Macy dnode_phys_t *dnp = dn->dn_phys; 2345eda14cbcSMatt Macy 2346eda14cbcSMatt Macy doi->doi_data_block_size = dn->dn_datablksz; 2347eda14cbcSMatt Macy doi->doi_metadata_block_size = dn->dn_indblkshift ? 2348eda14cbcSMatt Macy 1ULL << dn->dn_indblkshift : 0; 2349eda14cbcSMatt Macy doi->doi_type = dn->dn_type; 2350eda14cbcSMatt Macy doi->doi_bonus_type = dn->dn_bonustype; 2351eda14cbcSMatt Macy doi->doi_bonus_size = dn->dn_bonuslen; 2352eda14cbcSMatt Macy doi->doi_dnodesize = dn->dn_num_slots << DNODE_SHIFT; 2353eda14cbcSMatt Macy doi->doi_indirection = dn->dn_nlevels; 2354eda14cbcSMatt Macy doi->doi_checksum = dn->dn_checksum; 2355eda14cbcSMatt Macy doi->doi_compress = dn->dn_compress; 2356eda14cbcSMatt Macy doi->doi_nblkptr = dn->dn_nblkptr; 2357eda14cbcSMatt Macy doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9; 2358eda14cbcSMatt Macy doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz; 2359eda14cbcSMatt Macy doi->doi_fill_count = 0; 2360eda14cbcSMatt Macy for (int i = 0; i < dnp->dn_nblkptr; i++) 2361eda14cbcSMatt Macy doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]); 2362eda14cbcSMatt Macy } 2363eda14cbcSMatt Macy 2364eda14cbcSMatt Macy void 2365eda14cbcSMatt Macy dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi) 2366eda14cbcSMatt Macy { 2367eda14cbcSMatt Macy rw_enter(&dn->dn_struct_rwlock, RW_READER); 2368eda14cbcSMatt Macy mutex_enter(&dn->dn_mtx); 2369eda14cbcSMatt Macy 2370eda14cbcSMatt Macy __dmu_object_info_from_dnode(dn, doi); 2371eda14cbcSMatt Macy 2372eda14cbcSMatt Macy mutex_exit(&dn->dn_mtx); 2373eda14cbcSMatt Macy rw_exit(&dn->dn_struct_rwlock); 2374eda14cbcSMatt Macy } 2375eda14cbcSMatt Macy 2376eda14cbcSMatt Macy /* 2377eda14cbcSMatt Macy * Get information on a DMU object. 2378eda14cbcSMatt Macy * If doi is NULL, just indicates whether the object exists. 2379eda14cbcSMatt Macy */ 2380eda14cbcSMatt Macy int 2381eda14cbcSMatt Macy dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi) 2382eda14cbcSMatt Macy { 2383eda14cbcSMatt Macy dnode_t *dn; 2384eda14cbcSMatt Macy int err = dnode_hold(os, object, FTAG, &dn); 2385eda14cbcSMatt Macy 2386eda14cbcSMatt Macy if (err) 2387eda14cbcSMatt Macy return (err); 2388eda14cbcSMatt Macy 2389eda14cbcSMatt Macy if (doi != NULL) 2390eda14cbcSMatt Macy dmu_object_info_from_dnode(dn, doi); 2391eda14cbcSMatt Macy 2392eda14cbcSMatt Macy dnode_rele(dn, FTAG); 2393eda14cbcSMatt Macy return (0); 2394eda14cbcSMatt Macy } 2395eda14cbcSMatt Macy 2396eda14cbcSMatt Macy /* 2397eda14cbcSMatt Macy * As above, but faster; can be used when you have a held dbuf in hand. 2398eda14cbcSMatt Macy */ 2399eda14cbcSMatt Macy void 2400eda14cbcSMatt Macy dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi) 2401eda14cbcSMatt Macy { 2402eda14cbcSMatt Macy dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2403eda14cbcSMatt Macy 2404eda14cbcSMatt Macy DB_DNODE_ENTER(db); 2405eda14cbcSMatt Macy dmu_object_info_from_dnode(DB_DNODE(db), doi); 2406eda14cbcSMatt Macy DB_DNODE_EXIT(db); 2407eda14cbcSMatt Macy } 2408eda14cbcSMatt Macy 2409eda14cbcSMatt Macy /* 2410eda14cbcSMatt Macy * Faster still when you only care about the size. 2411eda14cbcSMatt Macy */ 2412eda14cbcSMatt Macy void 2413eda14cbcSMatt Macy dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize, 2414eda14cbcSMatt Macy u_longlong_t *nblk512) 2415eda14cbcSMatt Macy { 2416eda14cbcSMatt Macy dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2417eda14cbcSMatt Macy dnode_t *dn; 2418eda14cbcSMatt Macy 2419eda14cbcSMatt Macy DB_DNODE_ENTER(db); 2420eda14cbcSMatt Macy dn = DB_DNODE(db); 2421eda14cbcSMatt Macy 2422eda14cbcSMatt Macy *blksize = dn->dn_datablksz; 2423eda14cbcSMatt Macy /* add in number of slots used for the dnode itself */ 2424eda14cbcSMatt Macy *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >> 2425eda14cbcSMatt Macy SPA_MINBLOCKSHIFT) + dn->dn_num_slots; 2426eda14cbcSMatt Macy DB_DNODE_EXIT(db); 2427eda14cbcSMatt Macy } 2428eda14cbcSMatt Macy 2429eda14cbcSMatt Macy void 2430eda14cbcSMatt Macy dmu_object_dnsize_from_db(dmu_buf_t *db_fake, int *dnsize) 2431eda14cbcSMatt Macy { 2432eda14cbcSMatt Macy dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake; 2433eda14cbcSMatt Macy dnode_t *dn; 2434eda14cbcSMatt Macy 2435eda14cbcSMatt Macy DB_DNODE_ENTER(db); 2436eda14cbcSMatt Macy dn = DB_DNODE(db); 2437eda14cbcSMatt Macy *dnsize = dn->dn_num_slots << DNODE_SHIFT; 2438eda14cbcSMatt Macy DB_DNODE_EXIT(db); 2439eda14cbcSMatt Macy } 2440eda14cbcSMatt Macy 2441eda14cbcSMatt Macy void 2442eda14cbcSMatt Macy byteswap_uint64_array(void *vbuf, size_t size) 2443eda14cbcSMatt Macy { 2444eda14cbcSMatt Macy uint64_t *buf = vbuf; 2445eda14cbcSMatt Macy size_t count = size >> 3; 2446eda14cbcSMatt Macy int i; 2447eda14cbcSMatt Macy 2448eda14cbcSMatt Macy ASSERT((size & 7) == 0); 2449eda14cbcSMatt Macy 2450eda14cbcSMatt Macy for (i = 0; i < count; i++) 2451eda14cbcSMatt Macy buf[i] = BSWAP_64(buf[i]); 2452eda14cbcSMatt Macy } 2453eda14cbcSMatt Macy 2454eda14cbcSMatt Macy void 2455eda14cbcSMatt Macy byteswap_uint32_array(void *vbuf, size_t size) 2456eda14cbcSMatt Macy { 2457eda14cbcSMatt Macy uint32_t *buf = vbuf; 2458eda14cbcSMatt Macy size_t count = size >> 2; 2459eda14cbcSMatt Macy int i; 2460eda14cbcSMatt Macy 2461eda14cbcSMatt Macy ASSERT((size & 3) == 0); 2462eda14cbcSMatt Macy 2463eda14cbcSMatt Macy for (i = 0; i < count; i++) 2464eda14cbcSMatt Macy buf[i] = BSWAP_32(buf[i]); 2465eda14cbcSMatt Macy } 2466eda14cbcSMatt Macy 2467eda14cbcSMatt Macy void 2468eda14cbcSMatt Macy byteswap_uint16_array(void *vbuf, size_t size) 2469eda14cbcSMatt Macy { 2470eda14cbcSMatt Macy uint16_t *buf = vbuf; 2471eda14cbcSMatt Macy size_t count = size >> 1; 2472eda14cbcSMatt Macy int i; 2473eda14cbcSMatt Macy 2474eda14cbcSMatt Macy ASSERT((size & 1) == 0); 2475eda14cbcSMatt Macy 2476eda14cbcSMatt Macy for (i = 0; i < count; i++) 2477eda14cbcSMatt Macy buf[i] = BSWAP_16(buf[i]); 2478eda14cbcSMatt Macy } 2479eda14cbcSMatt Macy 2480eda14cbcSMatt Macy void 2481eda14cbcSMatt Macy byteswap_uint8_array(void *vbuf, size_t size) 2482eda14cbcSMatt Macy { 2483e92ffd9bSMartin Matuska (void) vbuf, (void) size; 2484eda14cbcSMatt Macy } 2485eda14cbcSMatt Macy 2486eda14cbcSMatt Macy void 2487eda14cbcSMatt Macy dmu_init(void) 2488eda14cbcSMatt Macy { 2489eda14cbcSMatt Macy abd_init(); 2490eda14cbcSMatt Macy zfs_dbgmsg_init(); 2491eda14cbcSMatt Macy sa_cache_init(); 2492eda14cbcSMatt Macy dmu_objset_init(); 2493eda14cbcSMatt Macy dnode_init(); 2494eda14cbcSMatt Macy zfetch_init(); 2495eda14cbcSMatt Macy dmu_tx_init(); 2496eda14cbcSMatt Macy l2arc_init(); 2497eda14cbcSMatt Macy arc_init(); 2498eda14cbcSMatt Macy dbuf_init(); 2499eda14cbcSMatt Macy } 2500eda14cbcSMatt Macy 2501eda14cbcSMatt Macy void 2502eda14cbcSMatt Macy dmu_fini(void) 2503eda14cbcSMatt Macy { 2504eda14cbcSMatt Macy arc_fini(); /* arc depends on l2arc, so arc must go first */ 2505eda14cbcSMatt Macy l2arc_fini(); 2506eda14cbcSMatt Macy dmu_tx_fini(); 2507eda14cbcSMatt Macy zfetch_fini(); 2508eda14cbcSMatt Macy dbuf_fini(); 2509eda14cbcSMatt Macy dnode_fini(); 2510eda14cbcSMatt Macy dmu_objset_fini(); 2511eda14cbcSMatt Macy sa_cache_fini(); 2512eda14cbcSMatt Macy zfs_dbgmsg_fini(); 2513eda14cbcSMatt Macy abd_fini(); 2514eda14cbcSMatt Macy } 2515eda14cbcSMatt Macy 2516eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_bonus_hold); 2517eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_bonus_hold_by_dnode); 2518eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_buf_hold_array_by_bonus); 2519eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_buf_rele_array); 2520eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_prefetch); 2521eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_free_range); 2522eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_free_long_range); 2523eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_free_long_object); 2524eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_read); 2525eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_read_by_dnode); 2526eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_write); 2527eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_write_by_dnode); 2528eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_prealloc); 2529eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_object_info); 2530eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_object_info_from_dnode); 2531eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_object_info_from_db); 2532eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_object_size_from_db); 2533eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_object_dnsize_from_db); 2534eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_object_set_nlevels); 2535eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_object_set_blocksize); 2536eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_object_set_maxblkid); 2537eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_object_set_checksum); 2538eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_object_set_compress); 2539ac0bf12eSMatt Macy EXPORT_SYMBOL(dmu_offset_next); 2540eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_write_policy); 2541eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_sync); 2542eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_request_arcbuf); 2543eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_return_arcbuf); 2544eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_assign_arcbuf_by_dnode); 2545eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_assign_arcbuf_by_dbuf); 2546eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_buf_hold); 2547eda14cbcSMatt Macy EXPORT_SYMBOL(dmu_ot); 2548eda14cbcSMatt Macy 2549eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs, zfs_, nopwrite_enabled, INT, ZMOD_RW, 2550eda14cbcSMatt Macy "Enable NOP writes"); 2551eda14cbcSMatt Macy 2552dbd5678dSMartin Matuska ZFS_MODULE_PARAM(zfs, zfs_, per_txg_dirty_frees_percent, UINT, ZMOD_RW, 2553eda14cbcSMatt Macy "Percentage of dirtied blocks from frees in one TXG"); 2554eda14cbcSMatt Macy 2555eda14cbcSMatt Macy ZFS_MODULE_PARAM(zfs, zfs_, dmu_offset_next_sync, INT, ZMOD_RW, 2556eda14cbcSMatt Macy "Enable forcing txg sync to find holes"); 2557eda14cbcSMatt Macy 2558c03c5b1cSMartin Matuska /* CSTYLED */ 2559be181ee2SMartin Matuska ZFS_MODULE_PARAM(zfs, , dmu_prefetch_max, UINT, ZMOD_RW, 2560eda14cbcSMatt Macy "Limit one prefetch call to this size"); 2561