1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <sys/types.h> 27 #include <sys/param.h> 28 #include <sys/systm.h> 29 #include <sys/sysmacros.h> 30 #include <sys/cmn_err.h> 31 #include <sys/kmem.h> 32 #include <sys/thread.h> 33 #include <sys/file.h> 34 #include <sys/vfs.h> 35 #include <sys/zfs_znode.h> 36 #include <sys/zfs_dir.h> 37 #include <sys/zil.h> 38 #include <sys/zil_impl.h> 39 #include <sys/byteorder.h> 40 #include <sys/policy.h> 41 #include <sys/stat.h> 42 #include <sys/mode.h> 43 #include <sys/acl.h> 44 #include <sys/dmu.h> 45 #include <sys/spa.h> 46 #include <sys/zfs_fuid.h> 47 #include <sys/ddi.h> 48 #include <sys/dsl_dataset.h> 49 50 #define ZFS_HANDLE_REPLAY(zilog, tx) \ 51 if (zilog->zl_replay) { \ 52 dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx); \ 53 zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] = \ 54 zilog->zl_replaying_seq; \ 55 return; \ 56 } 57 58 /* 59 * These zfs_log_* functions must be called within a dmu tx, in one 60 * of 2 contexts depending on zilog->z_replay: 61 * 62 * Non replay mode 63 * --------------- 64 * We need to record the transaction so that if it is committed to 65 * the Intent Log then it can be replayed. An intent log transaction 66 * structure (itx_t) is allocated and all the information necessary to 67 * possibly replay the transaction is saved in it. The itx is then assigned 68 * a sequence number and inserted in the in-memory list anchored in the zilog. 69 * 70 * Replay mode 71 * ----------- 72 * We need to mark the intent log record as replayed in the log header. 73 * This is done in the same transaction as the replay so that they 74 * commit atomically. 75 */ 76 77 int 78 zfs_log_create_txtype(zil_create_t type, vsecattr_t *vsecp, vattr_t *vap) 79 { 80 int isxvattr = (vap->va_mask & AT_XVATTR); 81 switch (type) { 82 case Z_FILE: 83 if (vsecp == NULL && !isxvattr) 84 return (TX_CREATE); 85 if (vsecp && isxvattr) 86 return (TX_CREATE_ACL_ATTR); 87 if (vsecp) 88 return (TX_CREATE_ACL); 89 else 90 return (TX_CREATE_ATTR); 91 /*NOTREACHED*/ 92 case Z_DIR: 93 if (vsecp == NULL && !isxvattr) 94 return (TX_MKDIR); 95 if (vsecp && isxvattr) 96 return (TX_MKDIR_ACL_ATTR); 97 if (vsecp) 98 return (TX_MKDIR_ACL); 99 else 100 return (TX_MKDIR_ATTR); 101 case Z_XATTRDIR: 102 return (TX_MKXATTR); 103 } 104 ASSERT(0); 105 return (TX_MAX_TYPE); 106 } 107 108 /* 109 * build up the log data necessary for logging xvattr_t 110 * First lr_attr_t is initialized. following the lr_attr_t 111 * is the mapsize and attribute bitmap copied from the xvattr_t. 112 * Following the bitmap and bitmapsize two 64 bit words are reserved 113 * for the create time which may be set. Following the create time 114 * records a single 64 bit integer which has the bits to set on 115 * replay for the xvattr. 116 */ 117 static void 118 zfs_log_xvattr(lr_attr_t *lrattr, xvattr_t *xvap) 119 { 120 uint32_t *bitmap; 121 uint64_t *attrs; 122 uint64_t *crtime; 123 xoptattr_t *xoap; 124 void *scanstamp; 125 int i; 126 127 xoap = xva_getxoptattr(xvap); 128 ASSERT(xoap); 129 130 lrattr->lr_attr_masksize = xvap->xva_mapsize; 131 bitmap = &lrattr->lr_attr_bitmap; 132 for (i = 0; i != xvap->xva_mapsize; i++, bitmap++) { 133 *bitmap = xvap->xva_reqattrmap[i]; 134 } 135 136 /* Now pack the attributes up in a single uint64_t */ 137 attrs = (uint64_t *)bitmap; 138 crtime = attrs + 1; 139 scanstamp = (caddr_t)(crtime + 2); 140 *attrs = 0; 141 if (XVA_ISSET_REQ(xvap, XAT_READONLY)) 142 *attrs |= (xoap->xoa_readonly == 0) ? 0 : 143 XAT0_READONLY; 144 if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) 145 *attrs |= (xoap->xoa_hidden == 0) ? 0 : 146 XAT0_HIDDEN; 147 if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) 148 *attrs |= (xoap->xoa_system == 0) ? 0 : 149 XAT0_SYSTEM; 150 if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) 151 *attrs |= (xoap->xoa_archive == 0) ? 0 : 152 XAT0_ARCHIVE; 153 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) 154 *attrs |= (xoap->xoa_immutable == 0) ? 0 : 155 XAT0_IMMUTABLE; 156 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) 157 *attrs |= (xoap->xoa_nounlink == 0) ? 0 : 158 XAT0_NOUNLINK; 159 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) 160 *attrs |= (xoap->xoa_appendonly == 0) ? 0 : 161 XAT0_APPENDONLY; 162 if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) 163 *attrs |= (xoap->xoa_opaque == 0) ? 0 : 164 XAT0_APPENDONLY; 165 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) 166 *attrs |= (xoap->xoa_nodump == 0) ? 0 : 167 XAT0_NODUMP; 168 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) 169 *attrs |= (xoap->xoa_av_quarantined == 0) ? 0 : 170 XAT0_AV_QUARANTINED; 171 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) 172 *attrs |= (xoap->xoa_av_modified == 0) ? 0 : 173 XAT0_AV_MODIFIED; 174 if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) 175 ZFS_TIME_ENCODE(&xoap->xoa_createtime, crtime); 176 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) 177 bcopy(xoap->xoa_av_scanstamp, scanstamp, AV_SCANSTAMP_SZ); 178 } 179 180 static void * 181 zfs_log_fuid_ids(zfs_fuid_info_t *fuidp, void *start) 182 { 183 zfs_fuid_t *zfuid; 184 uint64_t *fuidloc = start; 185 186 /* First copy in the ACE FUIDs */ 187 for (zfuid = list_head(&fuidp->z_fuids); zfuid; 188 zfuid = list_next(&fuidp->z_fuids, zfuid)) { 189 *fuidloc++ = zfuid->z_logfuid; 190 } 191 return (fuidloc); 192 } 193 194 195 static void * 196 zfs_log_fuid_domains(zfs_fuid_info_t *fuidp, void *start) 197 { 198 zfs_fuid_domain_t *zdomain; 199 200 /* now copy in the domain info, if any */ 201 if (fuidp->z_domain_str_sz != 0) { 202 for (zdomain = list_head(&fuidp->z_domains); zdomain; 203 zdomain = list_next(&fuidp->z_domains, zdomain)) { 204 bcopy((void *)zdomain->z_domain, start, 205 strlen(zdomain->z_domain) + 1); 206 start = (caddr_t)start + 207 strlen(zdomain->z_domain) + 1; 208 } 209 } 210 return (start); 211 } 212 213 /* 214 * zfs_log_create() is used to handle TX_CREATE, TX_CREATE_ATTR, TX_MKDIR, 215 * TX_MKDIR_ATTR and TX_MKXATTR 216 * transactions. 217 * 218 * TX_CREATE and TX_MKDIR are standard creates, but they may have FUID 219 * domain information appended prior to the name. In this case the 220 * uid/gid in the log record will be a log centric FUID. 221 * 222 * TX_CREATE_ACL_ATTR and TX_MKDIR_ACL_ATTR handle special creates that 223 * may contain attributes, ACL and optional fuid information. 224 * 225 * TX_CREATE_ACL and TX_MKDIR_ACL handle special creates that specify 226 * and ACL and normal users/groups in the ACEs. 227 * 228 * There may be an optional xvattr attribute information similar 229 * to zfs_log_setattr. 230 * 231 * Also, after the file name "domain" strings may be appended. 232 */ 233 void 234 zfs_log_create(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, 235 znode_t *dzp, znode_t *zp, char *name, vsecattr_t *vsecp, 236 zfs_fuid_info_t *fuidp, vattr_t *vap) 237 { 238 itx_t *itx; 239 uint64_t seq; 240 lr_create_t *lr; 241 lr_acl_create_t *lracl; 242 size_t aclsize; 243 size_t xvatsize = 0; 244 size_t txsize; 245 xvattr_t *xvap = (xvattr_t *)vap; 246 void *end; 247 size_t lrsize; 248 size_t namesize = strlen(name) + 1; 249 size_t fuidsz = 0; 250 251 if (zilog == NULL) 252 return; 253 254 ZFS_HANDLE_REPLAY(zilog, tx); /* exits if replay */ 255 256 /* 257 * If we have FUIDs present then add in space for 258 * domains and ACE fuid's if any. 259 */ 260 if (fuidp) { 261 fuidsz += fuidp->z_domain_str_sz; 262 fuidsz += fuidp->z_fuid_cnt * sizeof (uint64_t); 263 } 264 265 if (vap->va_mask & AT_XVATTR) 266 xvatsize = ZIL_XVAT_SIZE(xvap->xva_mapsize); 267 268 if ((int)txtype == TX_CREATE_ATTR || (int)txtype == TX_MKDIR_ATTR || 269 (int)txtype == TX_CREATE || (int)txtype == TX_MKDIR || 270 (int)txtype == TX_MKXATTR) { 271 txsize = sizeof (*lr) + namesize + fuidsz + xvatsize; 272 lrsize = sizeof (*lr); 273 } else { 274 aclsize = (vsecp) ? vsecp->vsa_aclentsz : 0; 275 txsize = 276 sizeof (lr_acl_create_t) + namesize + fuidsz + 277 ZIL_ACE_LENGTH(aclsize) + xvatsize; 278 lrsize = sizeof (lr_acl_create_t); 279 } 280 281 itx = zil_itx_create(txtype, txsize); 282 283 lr = (lr_create_t *)&itx->itx_lr; 284 lr->lr_doid = dzp->z_id; 285 lr->lr_foid = zp->z_id; 286 lr->lr_mode = zp->z_phys->zp_mode; 287 if (!IS_EPHEMERAL(zp->z_phys->zp_uid)) { 288 lr->lr_uid = (uint64_t)zp->z_phys->zp_uid; 289 } else { 290 lr->lr_uid = fuidp->z_fuid_owner; 291 } 292 if (!IS_EPHEMERAL(zp->z_phys->zp_gid)) { 293 lr->lr_gid = (uint64_t)zp->z_phys->zp_gid; 294 } else { 295 lr->lr_gid = fuidp->z_fuid_group; 296 } 297 lr->lr_gen = zp->z_phys->zp_gen; 298 lr->lr_crtime[0] = zp->z_phys->zp_crtime[0]; 299 lr->lr_crtime[1] = zp->z_phys->zp_crtime[1]; 300 lr->lr_rdev = zp->z_phys->zp_rdev; 301 302 /* 303 * Fill in xvattr info if any 304 */ 305 if (vap->va_mask & AT_XVATTR) { 306 zfs_log_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), xvap); 307 end = (caddr_t)lr + lrsize + xvatsize; 308 } else { 309 end = (caddr_t)lr + lrsize; 310 } 311 312 /* Now fill in any ACL info */ 313 314 if (vsecp) { 315 lracl = (lr_acl_create_t *)&itx->itx_lr; 316 lracl->lr_aclcnt = vsecp->vsa_aclcnt; 317 lracl->lr_acl_bytes = aclsize; 318 lracl->lr_domcnt = fuidp ? fuidp->z_domain_cnt : 0; 319 lracl->lr_fuidcnt = fuidp ? fuidp->z_fuid_cnt : 0; 320 if (vsecp->vsa_aclflags & VSA_ACE_ACLFLAGS) 321 lracl->lr_acl_flags = (uint64_t)vsecp->vsa_aclflags; 322 else 323 lracl->lr_acl_flags = 0; 324 325 bcopy(vsecp->vsa_aclentp, end, aclsize); 326 end = (caddr_t)end + ZIL_ACE_LENGTH(aclsize); 327 } 328 329 /* drop in FUID info */ 330 if (fuidp) { 331 end = zfs_log_fuid_ids(fuidp, end); 332 end = zfs_log_fuid_domains(fuidp, end); 333 } 334 /* 335 * Now place file name in log record 336 */ 337 bcopy(name, end, namesize); 338 339 seq = zil_itx_assign(zilog, itx, tx); 340 dzp->z_last_itx = seq; 341 zp->z_last_itx = seq; 342 } 343 344 /* 345 * zfs_log_remove() handles both TX_REMOVE and TX_RMDIR transactions. 346 */ 347 void 348 zfs_log_remove(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, 349 znode_t *dzp, char *name) 350 { 351 itx_t *itx; 352 uint64_t seq; 353 lr_remove_t *lr; 354 size_t namesize = strlen(name) + 1; 355 356 if (zilog == NULL) 357 return; 358 359 ZFS_HANDLE_REPLAY(zilog, tx); /* exits if replay */ 360 361 itx = zil_itx_create(txtype, sizeof (*lr) + namesize); 362 lr = (lr_remove_t *)&itx->itx_lr; 363 lr->lr_doid = dzp->z_id; 364 bcopy(name, (char *)(lr + 1), namesize); 365 366 seq = zil_itx_assign(zilog, itx, tx); 367 dzp->z_last_itx = seq; 368 } 369 370 /* 371 * zfs_log_link() handles TX_LINK transactions. 372 */ 373 void 374 zfs_log_link(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, 375 znode_t *dzp, znode_t *zp, char *name) 376 { 377 itx_t *itx; 378 uint64_t seq; 379 lr_link_t *lr; 380 size_t namesize = strlen(name) + 1; 381 382 if (zilog == NULL) 383 return; 384 385 ZFS_HANDLE_REPLAY(zilog, tx); /* exits if replay */ 386 387 itx = zil_itx_create(txtype, sizeof (*lr) + namesize); 388 lr = (lr_link_t *)&itx->itx_lr; 389 lr->lr_doid = dzp->z_id; 390 lr->lr_link_obj = zp->z_id; 391 bcopy(name, (char *)(lr + 1), namesize); 392 393 seq = zil_itx_assign(zilog, itx, tx); 394 dzp->z_last_itx = seq; 395 zp->z_last_itx = seq; 396 } 397 398 /* 399 * zfs_log_symlink() handles TX_SYMLINK transactions. 400 */ 401 void 402 zfs_log_symlink(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, 403 znode_t *dzp, znode_t *zp, char *name, char *link) 404 { 405 itx_t *itx; 406 uint64_t seq; 407 lr_create_t *lr; 408 size_t namesize = strlen(name) + 1; 409 size_t linksize = strlen(link) + 1; 410 411 if (zilog == NULL) 412 return; 413 414 ZFS_HANDLE_REPLAY(zilog, tx); /* exits if replay */ 415 416 itx = zil_itx_create(txtype, sizeof (*lr) + namesize + linksize); 417 lr = (lr_create_t *)&itx->itx_lr; 418 lr->lr_doid = dzp->z_id; 419 lr->lr_foid = zp->z_id; 420 lr->lr_mode = zp->z_phys->zp_mode; 421 lr->lr_uid = zp->z_phys->zp_uid; 422 lr->lr_gid = zp->z_phys->zp_gid; 423 lr->lr_gen = zp->z_phys->zp_gen; 424 lr->lr_crtime[0] = zp->z_phys->zp_crtime[0]; 425 lr->lr_crtime[1] = zp->z_phys->zp_crtime[1]; 426 bcopy(name, (char *)(lr + 1), namesize); 427 bcopy(link, (char *)(lr + 1) + namesize, linksize); 428 429 seq = zil_itx_assign(zilog, itx, tx); 430 dzp->z_last_itx = seq; 431 zp->z_last_itx = seq; 432 } 433 434 /* 435 * zfs_log_rename() handles TX_RENAME transactions. 436 */ 437 void 438 zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, 439 znode_t *sdzp, char *sname, znode_t *tdzp, char *dname, znode_t *szp) 440 { 441 itx_t *itx; 442 uint64_t seq; 443 lr_rename_t *lr; 444 size_t snamesize = strlen(sname) + 1; 445 size_t dnamesize = strlen(dname) + 1; 446 447 if (zilog == NULL) 448 return; 449 450 ZFS_HANDLE_REPLAY(zilog, tx); /* exits if replay */ 451 452 itx = zil_itx_create(txtype, sizeof (*lr) + snamesize + dnamesize); 453 lr = (lr_rename_t *)&itx->itx_lr; 454 lr->lr_sdoid = sdzp->z_id; 455 lr->lr_tdoid = tdzp->z_id; 456 bcopy(sname, (char *)(lr + 1), snamesize); 457 bcopy(dname, (char *)(lr + 1) + snamesize, dnamesize); 458 459 seq = zil_itx_assign(zilog, itx, tx); 460 sdzp->z_last_itx = seq; 461 tdzp->z_last_itx = seq; 462 szp->z_last_itx = seq; 463 } 464 465 /* 466 * zfs_log_write() handles TX_WRITE transactions. 467 */ 468 ssize_t zfs_immediate_write_sz = 32768; 469 470 #define ZIL_MAX_LOG_DATA (SPA_MAXBLOCKSIZE - sizeof (zil_trailer_t) - \ 471 sizeof (lr_write_t)) 472 473 void 474 zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype, 475 znode_t *zp, offset_t off, ssize_t resid, int ioflag) 476 { 477 itx_wr_state_t write_state; 478 boolean_t slogging; 479 uintptr_t fsync_cnt; 480 481 if (zilog == NULL || zp->z_unlinked) 482 return; 483 484 ZFS_HANDLE_REPLAY(zilog, tx); /* exits if replay */ 485 486 /* 487 * Writes are handled in three different ways: 488 * 489 * WR_INDIRECT: 490 * In this mode, if we need to commit the write later, then the block 491 * is immediately written into the file system (using dmu_sync), 492 * and a pointer to the block is put into the log record. 493 * When the txg commits the block is linked in. 494 * This saves additionally writing the data into the log record. 495 * There are a few requirements for this to occur: 496 * - write is greater than zfs_immediate_write_sz 497 * - not using slogs (as slogs are assumed to always be faster 498 * than writing into the main pool) 499 * - the write occupies only one block 500 * WR_COPIED: 501 * If we know we'll immediately be committing the 502 * transaction (FSYNC or FDSYNC), the we allocate a larger 503 * log record here for the data and copy the data in. 504 * WR_NEED_COPY: 505 * Otherwise we don't allocate a buffer, and *if* we need to 506 * flush the write later then a buffer is allocated and 507 * we retrieve the data using the dmu. 508 */ 509 slogging = spa_has_slogs(zilog->zl_spa); 510 if (resid > zfs_immediate_write_sz && !slogging && resid <= zp->z_blksz) 511 write_state = WR_INDIRECT; 512 else if (ioflag & (FSYNC | FDSYNC)) 513 write_state = WR_COPIED; 514 else 515 write_state = WR_NEED_COPY; 516 517 if ((fsync_cnt = (uintptr_t)tsd_get(zfs_fsyncer_key)) != 0) { 518 (void) tsd_set(zfs_fsyncer_key, (void *)(fsync_cnt - 1)); 519 } 520 521 while (resid) { 522 itx_t *itx; 523 lr_write_t *lr; 524 ssize_t len; 525 526 /* 527 * If the write would overflow the largest block then split it. 528 */ 529 if (write_state != WR_INDIRECT && resid > ZIL_MAX_LOG_DATA) 530 len = SPA_MAXBLOCKSIZE >> 1; 531 else 532 len = resid; 533 534 itx = zil_itx_create(txtype, sizeof (*lr) + 535 (write_state == WR_COPIED ? len : 0)); 536 lr = (lr_write_t *)&itx->itx_lr; 537 if (write_state == WR_COPIED && dmu_read(zp->z_zfsvfs->z_os, 538 zp->z_id, off, len, lr + 1) != 0) { 539 kmem_free(itx, offsetof(itx_t, itx_lr) + 540 itx->itx_lr.lrc_reclen); 541 itx = zil_itx_create(txtype, sizeof (*lr)); 542 lr = (lr_write_t *)&itx->itx_lr; 543 write_state = WR_NEED_COPY; 544 } 545 546 itx->itx_wr_state = write_state; 547 if (write_state == WR_NEED_COPY) 548 itx->itx_sod += len; 549 lr->lr_foid = zp->z_id; 550 lr->lr_offset = off; 551 lr->lr_length = len; 552 lr->lr_blkoff = 0; 553 BP_ZERO(&lr->lr_blkptr); 554 555 itx->itx_private = zp->z_zfsvfs; 556 557 if ((zp->z_sync_cnt != 0) || (fsync_cnt != 0) || 558 (ioflag & (FSYNC | FDSYNC))) 559 itx->itx_sync = B_TRUE; 560 else 561 itx->itx_sync = B_FALSE; 562 563 zp->z_last_itx = zil_itx_assign(zilog, itx, tx); 564 565 off += len; 566 resid -= len; 567 } 568 } 569 570 /* 571 * zfs_log_truncate() handles TX_TRUNCATE transactions. 572 */ 573 void 574 zfs_log_truncate(zilog_t *zilog, dmu_tx_t *tx, int txtype, 575 znode_t *zp, uint64_t off, uint64_t len) 576 { 577 itx_t *itx; 578 uint64_t seq; 579 lr_truncate_t *lr; 580 581 if (zilog == NULL || zp->z_unlinked) 582 return; 583 584 ZFS_HANDLE_REPLAY(zilog, tx); /* exits if replay */ 585 586 itx = zil_itx_create(txtype, sizeof (*lr)); 587 lr = (lr_truncate_t *)&itx->itx_lr; 588 lr->lr_foid = zp->z_id; 589 lr->lr_offset = off; 590 lr->lr_length = len; 591 592 itx->itx_sync = (zp->z_sync_cnt != 0); 593 seq = zil_itx_assign(zilog, itx, tx); 594 zp->z_last_itx = seq; 595 } 596 597 /* 598 * zfs_log_setattr() handles TX_SETATTR transactions. 599 */ 600 void 601 zfs_log_setattr(zilog_t *zilog, dmu_tx_t *tx, int txtype, 602 znode_t *zp, vattr_t *vap, uint_t mask_applied, zfs_fuid_info_t *fuidp) 603 { 604 itx_t *itx; 605 uint64_t seq; 606 lr_setattr_t *lr; 607 xvattr_t *xvap = (xvattr_t *)vap; 608 size_t recsize = sizeof (lr_setattr_t); 609 void *start; 610 611 612 if (zilog == NULL || zp->z_unlinked) 613 return; 614 615 ZFS_HANDLE_REPLAY(zilog, tx); /* exits if replay */ 616 617 /* 618 * If XVATTR set, then log record size needs to allow 619 * for lr_attr_t + xvattr mask, mapsize and create time 620 * plus actual attribute values 621 */ 622 if (vap->va_mask & AT_XVATTR) 623 recsize = sizeof (*lr) + ZIL_XVAT_SIZE(xvap->xva_mapsize); 624 625 if (fuidp) 626 recsize += fuidp->z_domain_str_sz; 627 628 itx = zil_itx_create(txtype, recsize); 629 lr = (lr_setattr_t *)&itx->itx_lr; 630 lr->lr_foid = zp->z_id; 631 lr->lr_mask = (uint64_t)mask_applied; 632 lr->lr_mode = (uint64_t)vap->va_mode; 633 if ((mask_applied & AT_UID) && IS_EPHEMERAL(vap->va_uid)) 634 lr->lr_uid = fuidp->z_fuid_owner; 635 else 636 lr->lr_uid = (uint64_t)vap->va_uid; 637 638 if ((mask_applied & AT_GID) && IS_EPHEMERAL(vap->va_gid)) 639 lr->lr_gid = fuidp->z_fuid_group; 640 else 641 lr->lr_gid = (uint64_t)vap->va_gid; 642 643 lr->lr_size = (uint64_t)vap->va_size; 644 ZFS_TIME_ENCODE(&vap->va_atime, lr->lr_atime); 645 ZFS_TIME_ENCODE(&vap->va_mtime, lr->lr_mtime); 646 start = (lr_setattr_t *)(lr + 1); 647 if (vap->va_mask & AT_XVATTR) { 648 zfs_log_xvattr((lr_attr_t *)start, xvap); 649 start = (caddr_t)start + ZIL_XVAT_SIZE(xvap->xva_mapsize); 650 } 651 652 /* 653 * Now stick on domain information if any on end 654 */ 655 656 if (fuidp) 657 (void) zfs_log_fuid_domains(fuidp, start); 658 659 itx->itx_sync = (zp->z_sync_cnt != 0); 660 seq = zil_itx_assign(zilog, itx, tx); 661 zp->z_last_itx = seq; 662 } 663 664 /* 665 * zfs_log_acl() handles TX_ACL transactions. 666 */ 667 void 668 zfs_log_acl(zilog_t *zilog, dmu_tx_t *tx, znode_t *zp, 669 vsecattr_t *vsecp, zfs_fuid_info_t *fuidp) 670 { 671 itx_t *itx; 672 uint64_t seq; 673 lr_acl_v0_t *lrv0; 674 lr_acl_t *lr; 675 int txtype; 676 int lrsize; 677 size_t txsize; 678 size_t aclbytes = vsecp->vsa_aclentsz; 679 680 if (zilog == NULL || zp->z_unlinked) 681 return; 682 683 ZFS_HANDLE_REPLAY(zilog, tx); /* exits if replay */ 684 685 txtype = (zp->z_zfsvfs->z_version < ZPL_VERSION_FUID) ? 686 TX_ACL_V0 : TX_ACL; 687 688 if (txtype == TX_ACL) 689 lrsize = sizeof (*lr); 690 else 691 lrsize = sizeof (*lrv0); 692 693 txsize = lrsize + 694 ((txtype == TX_ACL) ? ZIL_ACE_LENGTH(aclbytes) : aclbytes) + 695 (fuidp ? fuidp->z_domain_str_sz : 0) + 696 sizeof (uint64_t) * (fuidp ? fuidp->z_fuid_cnt : 0); 697 698 itx = zil_itx_create(txtype, txsize); 699 700 lr = (lr_acl_t *)&itx->itx_lr; 701 lr->lr_foid = zp->z_id; 702 if (txtype == TX_ACL) { 703 lr->lr_acl_bytes = aclbytes; 704 lr->lr_domcnt = fuidp ? fuidp->z_domain_cnt : 0; 705 lr->lr_fuidcnt = fuidp ? fuidp->z_fuid_cnt : 0; 706 if (vsecp->vsa_mask & VSA_ACE_ACLFLAGS) 707 lr->lr_acl_flags = (uint64_t)vsecp->vsa_aclflags; 708 else 709 lr->lr_acl_flags = 0; 710 } 711 lr->lr_aclcnt = (uint64_t)vsecp->vsa_aclcnt; 712 713 if (txtype == TX_ACL_V0) { 714 lrv0 = (lr_acl_v0_t *)lr; 715 bcopy(vsecp->vsa_aclentp, (ace_t *)(lrv0 + 1), aclbytes); 716 } else { 717 void *start = (ace_t *)(lr + 1); 718 719 bcopy(vsecp->vsa_aclentp, start, aclbytes); 720 721 start = (caddr_t)start + ZIL_ACE_LENGTH(aclbytes); 722 723 if (fuidp) { 724 start = zfs_log_fuid_ids(fuidp, start); 725 (void) zfs_log_fuid_domains(fuidp, start); 726 } 727 } 728 729 itx->itx_sync = (zp->z_sync_cnt != 0); 730 seq = zil_itx_assign(zilog, itx, tx); 731 zp->z_last_itx = seq; 732 } 733