1 // SPDX-License-Identifier: CDDL-1.0
2 /*
3 * CDDL HEADER START
4 *
5 * The contents of this file are subject to the terms of the
6 * Common Development and Distribution License (the "License").
7 * You may not use this file except in compliance with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or https://opensource.org/licenses/CDDL-1.0.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22
23 /*
24 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
26 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
27 * Copyright 2017 Nexenta Systems, Inc.
28 * Copyright (c) 2025, Klara, Inc.
29 */
30
31 /* Portions Copyright 2007 Jeremy Teo */
32 /* Portions Copyright 2010 Robert Milkowski */
33
34 #include <sys/types.h>
35 #include <sys/param.h>
36 #include <sys/time.h>
37 #include <sys/sysmacros.h>
38 #include <sys/vfs.h>
39 #include <sys/file.h>
40 #include <sys/stat.h>
41 #include <sys/kmem.h>
42 #include <sys/taskq.h>
43 #include <sys/uio.h>
44 #include <sys/vmsystm.h>
45 #include <sys/atomic.h>
46 #include <sys/pathname.h>
47 #include <sys/cmn_err.h>
48 #include <sys/errno.h>
49 #include <sys/zfs_dir.h>
50 #include <sys/zfs_acl.h>
51 #include <sys/zfs_ioctl.h>
52 #include <sys/fs/zfs.h>
53 #include <sys/dmu.h>
54 #include <sys/dmu_objset.h>
55 #include <sys/spa.h>
56 #include <sys/txg.h>
57 #include <sys/dbuf.h>
58 #include <sys/zap.h>
59 #include <sys/sa.h>
60 #include <sys/policy.h>
61 #include <sys/sunddi.h>
62 #include <sys/sid.h>
63 #include <sys/zfs_ctldir.h>
64 #include <sys/zfs_fuid.h>
65 #include <sys/zfs_quota.h>
66 #include <sys/zfs_sa.h>
67 #include <sys/zfs_vnops.h>
68 #include <sys/zfs_rlock.h>
69 #include <sys/cred.h>
70 #include <sys/zpl.h>
71 #include <sys/zil.h>
72 #include <sys/sa_impl.h>
73 #include <linux/mm_compat.h>
74
75 /*
76 * Programming rules.
77 *
78 * Each vnode op performs some logical unit of work. To do this, the ZPL must
79 * properly lock its in-core state, create a DMU transaction, do the work,
80 * record this work in the intent log (ZIL), commit the DMU transaction,
81 * and wait for the intent log to commit if it is a synchronous operation.
82 * Moreover, the vnode ops must work in both normal and log replay context.
83 * The ordering of events is important to avoid deadlocks and references
84 * to freed memory. The example below illustrates the following Big Rules:
85 *
86 * (1) A check must be made in each zfs thread for a mounted file system.
87 * This is done avoiding races using zfs_enter(zfsvfs).
88 * A zfs_exit(zfsvfs) is needed before all returns. Any znodes
89 * must be checked with zfs_verify_zp(zp). Both of these macros
90 * can return EIO from the calling function.
91 *
92 * (2) zrele() should always be the last thing except for zil_commit() (if
93 * necessary) and zfs_exit(). This is for 3 reasons: First, if it's the
94 * last reference, the vnode/znode can be freed, so the zp may point to
95 * freed memory. Second, the last reference will call zfs_zinactive(),
96 * which may induce a lot of work -- pushing cached pages (which acquires
97 * range locks) and syncing out cached atime changes. Third,
98 * zfs_zinactive() may require a new tx, which could deadlock the system
99 * if you were already holding one. This deadlock occurs because the tx
100 * currently being operated on prevents a txg from syncing, which
101 * prevents the new tx from progressing, resulting in a deadlock. If you
102 * must call zrele() within a tx, use zfs_zrele_async(). Note that iput()
103 * is a synonym for zrele().
104 *
105 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
106 * as they can span dmu_tx_assign() calls.
107 *
108 * (4) If ZPL locks are held, pass DMU_TX_NOWAIT as the second argument to
109 * dmu_tx_assign(). This is critical because we don't want to block
110 * while holding locks.
111 *
112 * If no ZPL locks are held (aside from zfs_enter()), use DMU_TX_WAIT.
113 * This reduces lock contention and CPU usage when we must wait (note
114 * that if throughput is constrained by the storage, nearly every
115 * transaction must wait).
116 *
117 * Note, in particular, that if a lock is sometimes acquired before
118 * the tx assigns, and sometimes after (e.g. z_lock), then failing
119 * to use a non-blocking assign can deadlock the system. The scenario:
120 *
121 * Thread A has grabbed a lock before calling dmu_tx_assign().
122 * Thread B is in an already-assigned tx, and blocks for this lock.
123 * Thread A calls dmu_tx_assign(DMU_TX_WAIT) and blocks in
124 * txg_wait_open() forever, because the previous txg can't quiesce
125 * until B's tx commits.
126 *
127 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is
128 * DMU_TX_NOWAIT, then drop all locks, call dmu_tx_wait(), and try
129 * again. On subsequent calls to dmu_tx_assign(), pass
130 * DMU_TX_NOTHROTTLE in addition to DMU_TX_NOWAIT, to indicate that
131 * this operation has already called dmu_tx_wait(). This will ensure
132 * that we don't retry forever, waiting a short bit each time.
133 *
134 * (5) If the operation succeeded, generate the intent log entry for it
135 * before dropping locks. This ensures that the ordering of events
136 * in the intent log matches the order in which they actually occurred.
137 * During ZIL replay the zfs_log_* functions will update the sequence
138 * number to indicate the zil transaction has replayed.
139 *
140 * (6) At the end of each vnode op, the DMU tx must always commit,
141 * regardless of whether there were any errors.
142 *
143 * (7) After dropping all locks, invoke zil_commit(zilog, foid)
144 * to ensure that synchronous semantics are provided when necessary.
145 *
146 * In general, this is how things should be ordered in each vnode op:
147 *
148 * zfs_enter(zfsvfs); // exit if unmounted
149 * top:
150 * zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab())
151 * rw_enter(...); // grab any other locks you need
152 * tx = dmu_tx_create(...); // get DMU tx
153 * dmu_tx_hold_*(); // hold each object you might modify
154 * error = dmu_tx_assign(tx,
155 * (waited ? DMU_TX_NOTHROTTLE : 0) | DMU_TX_NOWAIT);
156 * if (error) {
157 * rw_exit(...); // drop locks
158 * zfs_dirent_unlock(dl); // unlock directory entry
159 * zrele(...); // release held znodes
160 * if (error == ERESTART) {
161 * waited = B_TRUE;
162 * dmu_tx_wait(tx);
163 * dmu_tx_abort(tx);
164 * goto top;
165 * }
166 * dmu_tx_abort(tx); // abort DMU tx
167 * zfs_exit(zfsvfs); // finished in zfs
168 * return (error); // really out of space
169 * }
170 * error = do_real_work(); // do whatever this VOP does
171 * if (error == 0)
172 * zfs_log_*(...); // on success, make ZIL entry
173 * dmu_tx_commit(tx); // commit DMU tx -- error or not
174 * rw_exit(...); // drop locks
175 * zfs_dirent_unlock(dl); // unlock directory entry
176 * zrele(...); // release held znodes
177 * zil_commit(zilog, foid); // synchronous when necessary
178 * zfs_exit(zfsvfs); // finished in zfs
179 * return (error); // done, report error
180 */
181 int
zfs_open(struct inode * ip,int mode,int flag,cred_t * cr)182 zfs_open(struct inode *ip, int mode, int flag, cred_t *cr)
183 {
184 (void) cr;
185 znode_t *zp = ITOZ(ip);
186 zfsvfs_t *zfsvfs = ITOZSB(ip);
187 int error;
188
189 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
190 return (error);
191
192 /* Honor ZFS_APPENDONLY file attribute */
193 if (blk_mode_is_open_write(mode) && (zp->z_pflags & ZFS_APPENDONLY) &&
194 ((flag & O_APPEND) == 0)) {
195 zfs_exit(zfsvfs, FTAG);
196 return (SET_ERROR(EPERM));
197 }
198
199 /*
200 * Keep a count of the synchronous opens in the znode. On first
201 * synchronous open we must convert all previous async transactions
202 * into sync to keep correct ordering.
203 */
204 if (flag & O_SYNC) {
205 if (atomic_inc_32_nv(&zp->z_sync_cnt) == 1)
206 zil_async_to_sync(zfsvfs->z_log, zp->z_id);
207 }
208
209 zfs_exit(zfsvfs, FTAG);
210 return (0);
211 }
212
213 int
zfs_close(struct inode * ip,int flag,cred_t * cr)214 zfs_close(struct inode *ip, int flag, cred_t *cr)
215 {
216 (void) cr;
217 znode_t *zp = ITOZ(ip);
218 zfsvfs_t *zfsvfs = ITOZSB(ip);
219 int error;
220
221 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
222 return (error);
223
224 /* Decrement the synchronous opens in the znode */
225 if (flag & O_SYNC)
226 atomic_dec_32(&zp->z_sync_cnt);
227
228 zfs_exit(zfsvfs, FTAG);
229 return (0);
230 }
231
232 #if defined(_KERNEL)
233
234 static int zfs_fillpage(struct inode *ip, struct page *pp);
235
236 /*
237 * When a file is memory mapped, we must keep the IO data synchronized
238 * between the DMU cache and the memory mapped pages. Update all mapped
239 * pages with the contents of the coresponding dmu buffer.
240 */
241 void
update_pages(znode_t * zp,int64_t start,int len,objset_t * os)242 update_pages(znode_t *zp, int64_t start, int len, objset_t *os)
243 {
244 struct address_space *mp = ZTOI(zp)->i_mapping;
245 int64_t off = start & (PAGE_SIZE - 1);
246
247 for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
248 uint64_t nbytes = MIN(PAGE_SIZE - off, len);
249
250 struct page *pp = find_lock_page(mp, start >> PAGE_SHIFT);
251 if (pp) {
252 if (mapping_writably_mapped(mp))
253 flush_dcache_page(pp);
254
255 void *pb = kmap(pp);
256 int error = dmu_read(os, zp->z_id, start + off,
257 nbytes, pb + off, DMU_READ_PREFETCH);
258 kunmap(pp);
259
260 if (error) {
261 SetPageError(pp);
262 ClearPageUptodate(pp);
263 } else {
264 ClearPageError(pp);
265 SetPageUptodate(pp);
266
267 if (mapping_writably_mapped(mp))
268 flush_dcache_page(pp);
269
270 mark_page_accessed(pp);
271 }
272
273 unlock_page(pp);
274 put_page(pp);
275 }
276
277 len -= nbytes;
278 off = 0;
279 }
280 }
281
282 /*
283 * When a file is memory mapped, we must keep the I/O data synchronized
284 * between the DMU cache and the memory mapped pages. Preferentially read
285 * from memory mapped pages, otherwise fallback to reading through the dmu.
286 */
287 int
mappedread(znode_t * zp,int nbytes,zfs_uio_t * uio)288 mappedread(znode_t *zp, int nbytes, zfs_uio_t *uio)
289 {
290 struct inode *ip = ZTOI(zp);
291 struct address_space *mp = ip->i_mapping;
292 int64_t start = uio->uio_loffset;
293 int64_t off = start & (PAGE_SIZE - 1);
294 int len = nbytes;
295 int error = 0;
296
297 for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
298 uint64_t bytes = MIN(PAGE_SIZE - off, len);
299
300 struct page *pp = find_lock_page(mp, start >> PAGE_SHIFT);
301 if (pp) {
302
303 /*
304 * If filemap_fault() retries there exists a window
305 * where the page will be unlocked and not up to date.
306 * In this case we must try and fill the page.
307 */
308 if (unlikely(!PageUptodate(pp))) {
309 error = zfs_fillpage(ip, pp);
310 if (error) {
311 unlock_page(pp);
312 put_page(pp);
313 return (error);
314 }
315 }
316
317 ASSERT(PageUptodate(pp) || PageDirty(pp));
318
319 unlock_page(pp);
320
321 void *pb = kmap(pp);
322 error = zfs_uiomove(pb + off, bytes, UIO_READ, uio);
323 kunmap(pp);
324
325 if (mapping_writably_mapped(mp))
326 flush_dcache_page(pp);
327
328 mark_page_accessed(pp);
329 put_page(pp);
330 } else {
331 error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
332 uio, bytes, DMU_READ_PREFETCH);
333 }
334
335 len -= bytes;
336 off = 0;
337
338 if (error)
339 break;
340 }
341
342 return (error);
343 }
344 #endif /* _KERNEL */
345
346 static unsigned long zfs_delete_blocks = DMU_MAX_DELETEBLKCNT;
347
348 /*
349 * Write the bytes to a file.
350 *
351 * IN: zp - znode of file to be written to
352 * data - bytes to write
353 * len - number of bytes to write
354 * pos - offset to start writing at
355 *
356 * OUT: resid - remaining bytes to write
357 *
358 * RETURN: 0 if success
359 * positive error code if failure. EIO is returned
360 * for a short write when residp isn't provided.
361 *
362 * Timestamps:
363 * zp - ctime|mtime updated if byte count > 0
364 */
365 int
zfs_write_simple(znode_t * zp,const void * data,size_t len,loff_t pos,size_t * residp)366 zfs_write_simple(znode_t *zp, const void *data, size_t len,
367 loff_t pos, size_t *residp)
368 {
369 fstrans_cookie_t cookie;
370 int error;
371
372 struct iovec iov;
373 iov.iov_base = (void *)data;
374 iov.iov_len = len;
375
376 zfs_uio_t uio;
377 zfs_uio_iovec_init(&uio, &iov, 1, pos, UIO_SYSSPACE, len, 0);
378
379 cookie = spl_fstrans_mark();
380 error = zfs_write(zp, &uio, 0, kcred);
381 spl_fstrans_unmark(cookie);
382
383 if (error == 0) {
384 if (residp != NULL)
385 *residp = zfs_uio_resid(&uio);
386 else if (zfs_uio_resid(&uio) != 0)
387 error = SET_ERROR(EIO);
388 }
389
390 return (error);
391 }
392
393 static void
zfs_rele_async_task(void * arg)394 zfs_rele_async_task(void *arg)
395 {
396 iput(arg);
397 }
398
399 void
zfs_zrele_async(znode_t * zp)400 zfs_zrele_async(znode_t *zp)
401 {
402 struct inode *ip = ZTOI(zp);
403 objset_t *os = ITOZSB(ip)->z_os;
404
405 ASSERT(atomic_read(&ip->i_count) > 0);
406 ASSERT(os != NULL);
407
408 /*
409 * If decrementing the count would put us at 0, we can't do it inline
410 * here, because that would be synchronous. Instead, dispatch an iput
411 * to run later.
412 *
413 * For more information on the dangers of a synchronous iput, see the
414 * header comment of this file.
415 */
416 if (!atomic_add_unless(&ip->i_count, -1, 1)) {
417 VERIFY(taskq_dispatch(dsl_pool_zrele_taskq(dmu_objset_pool(os)),
418 zfs_rele_async_task, ip, TQ_SLEEP) != TASKQID_INVALID);
419 }
420 }
421
422
423 /*
424 * Lookup an entry in a directory, or an extended attribute directory.
425 * If it exists, return a held inode reference for it.
426 *
427 * IN: zdp - znode of directory to search.
428 * nm - name of entry to lookup.
429 * flags - LOOKUP_XATTR set if looking for an attribute.
430 * cr - credentials of caller.
431 * direntflags - directory lookup flags
432 * realpnp - returned pathname.
433 *
434 * OUT: zpp - znode of located entry, NULL if not found.
435 *
436 * RETURN: 0 on success, error code on failure.
437 *
438 * Timestamps:
439 * NA
440 */
441 int
zfs_lookup(znode_t * zdp,char * nm,znode_t ** zpp,int flags,cred_t * cr,int * direntflags,pathname_t * realpnp)442 zfs_lookup(znode_t *zdp, char *nm, znode_t **zpp, int flags, cred_t *cr,
443 int *direntflags, pathname_t *realpnp)
444 {
445 zfsvfs_t *zfsvfs = ZTOZSB(zdp);
446 int error = 0;
447
448 /*
449 * Fast path lookup, however we must skip DNLC lookup
450 * for case folding or normalizing lookups because the
451 * DNLC code only stores the passed in name. This means
452 * creating 'a' and removing 'A' on a case insensitive
453 * file system would work, but DNLC still thinks 'a'
454 * exists and won't let you create it again on the next
455 * pass through fast path.
456 */
457 if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
458
459 if (!S_ISDIR(ZTOI(zdp)->i_mode)) {
460 return (SET_ERROR(ENOTDIR));
461 } else if (zdp->z_sa_hdl == NULL) {
462 return (SET_ERROR(EIO));
463 }
464
465 if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
466 error = zfs_fastaccesschk_execute(zdp, cr);
467 if (!error) {
468 *zpp = zdp;
469 zhold(*zpp);
470 return (0);
471 }
472 return (error);
473 }
474 }
475
476 if ((error = zfs_enter_verify_zp(zfsvfs, zdp, FTAG)) != 0)
477 return (error);
478
479 *zpp = NULL;
480
481 if (flags & LOOKUP_XATTR) {
482 /*
483 * We don't allow recursive attributes..
484 * Maybe someday we will.
485 */
486 if (zdp->z_pflags & ZFS_XATTR) {
487 zfs_exit(zfsvfs, FTAG);
488 return (SET_ERROR(EINVAL));
489 }
490
491 if ((error = zfs_get_xattrdir(zdp, zpp, cr, flags))) {
492 zfs_exit(zfsvfs, FTAG);
493 return (error);
494 }
495
496 /*
497 * Do we have permission to get into attribute directory?
498 */
499
500 if ((error = zfs_zaccess(*zpp, ACE_EXECUTE, 0,
501 B_TRUE, cr, zfs_init_idmap))) {
502 zrele(*zpp);
503 *zpp = NULL;
504 }
505
506 zfs_exit(zfsvfs, FTAG);
507 return (error);
508 }
509
510 if (!S_ISDIR(ZTOI(zdp)->i_mode)) {
511 zfs_exit(zfsvfs, FTAG);
512 return (SET_ERROR(ENOTDIR));
513 }
514
515 /*
516 * Check accessibility of directory.
517 */
518
519 if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr,
520 zfs_init_idmap))) {
521 zfs_exit(zfsvfs, FTAG);
522 return (error);
523 }
524
525 if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
526 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
527 zfs_exit(zfsvfs, FTAG);
528 return (SET_ERROR(EILSEQ));
529 }
530
531 error = zfs_dirlook(zdp, nm, zpp, flags, direntflags, realpnp);
532 if ((error == 0) && (*zpp))
533 zfs_znode_update_vfs(*zpp);
534
535 zfs_exit(zfsvfs, FTAG);
536 return (error);
537 }
538
539 /*
540 * Perform a linear search in directory for the name of specific inode.
541 * Note we don't pass in the buffer size of name because it's hardcoded to
542 * NAME_MAX+1(256) in Linux.
543 *
544 * IN: dzp - znode of directory to search.
545 * zp - znode of the target
546 *
547 * OUT: name - dentry name of the target
548 *
549 * RETURN: 0 on success, error code on failure.
550 */
551 int
zfs_get_name(znode_t * dzp,char * name,znode_t * zp)552 zfs_get_name(znode_t *dzp, char *name, znode_t *zp)
553 {
554 zfsvfs_t *zfsvfs = ZTOZSB(dzp);
555 int error = 0;
556
557 if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
558 return (error);
559
560 if ((error = zfs_verify_zp(zp)) != 0) {
561 zfs_exit(zfsvfs, FTAG);
562 return (error);
563 }
564
565 /* ctldir should have got their name in zfs_vget */
566 if (dzp->z_is_ctldir || zp->z_is_ctldir) {
567 zfs_exit(zfsvfs, FTAG);
568 return (ENOENT);
569 }
570
571 /* buffer len is hardcoded to 256 in Linux kernel */
572 error = zap_value_search(zfsvfs->z_os, dzp->z_id, zp->z_id,
573 ZFS_DIRENT_OBJ(-1ULL), name, ZAP_MAXNAMELEN);
574
575 zfs_exit(zfsvfs, FTAG);
576 return (error);
577 }
578
579 /*
580 * Attempt to create a new entry in a directory. If the entry
581 * already exists, truncate the file if permissible, else return
582 * an error. Return the ip of the created or trunc'd file.
583 *
584 * IN: dzp - znode of directory to put new file entry in.
585 * name - name of new file entry.
586 * vap - attributes of new file.
587 * excl - flag indicating exclusive or non-exclusive mode.
588 * mode - mode to open file with.
589 * cr - credentials of caller.
590 * flag - file flag.
591 * vsecp - ACL to be set
592 * mnt_ns - user namespace of the mount
593 *
594 * OUT: zpp - znode of created or trunc'd entry.
595 *
596 * RETURN: 0 on success, error code on failure.
597 *
598 * Timestamps:
599 * dzp - ctime|mtime updated if new entry created
600 * zp - ctime|mtime always, atime if new
601 */
602 int
zfs_create(znode_t * dzp,char * name,vattr_t * vap,int excl,int mode,znode_t ** zpp,cred_t * cr,int flag,vsecattr_t * vsecp,zidmap_t * mnt_ns)603 zfs_create(znode_t *dzp, char *name, vattr_t *vap, int excl,
604 int mode, znode_t **zpp, cred_t *cr, int flag, vsecattr_t *vsecp,
605 zidmap_t *mnt_ns)
606 {
607 znode_t *zp;
608 zfsvfs_t *zfsvfs = ZTOZSB(dzp);
609 zilog_t *zilog;
610 objset_t *os;
611 zfs_dirlock_t *dl;
612 dmu_tx_t *tx;
613 int error;
614 uid_t uid;
615 gid_t gid;
616 zfs_acl_ids_t acl_ids;
617 boolean_t fuid_dirtied;
618 boolean_t have_acl = B_FALSE;
619 boolean_t waited = B_FALSE;
620 boolean_t skip_acl = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
621
622 /*
623 * If we have an ephemeral id, ACL, or XVATTR then
624 * make sure file system is at proper version
625 */
626
627 gid = crgetgid(cr);
628 uid = crgetuid(cr);
629
630 if (zfsvfs->z_use_fuids == B_FALSE &&
631 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
632 return (SET_ERROR(EINVAL));
633
634 if (name == NULL)
635 return (SET_ERROR(EINVAL));
636
637 if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
638 return (error);
639 os = zfsvfs->z_os;
640 zilog = zfsvfs->z_log;
641
642 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
643 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
644 zfs_exit(zfsvfs, FTAG);
645 return (SET_ERROR(EILSEQ));
646 }
647
648 if (vap->va_mask & ATTR_XVATTR) {
649 if ((error = secpolicy_xvattr((xvattr_t *)vap,
650 crgetuid(cr), cr, vap->va_mode)) != 0) {
651 zfs_exit(zfsvfs, FTAG);
652 return (error);
653 }
654 }
655
656 top:
657 *zpp = NULL;
658 if (*name == '\0') {
659 /*
660 * Null component name refers to the directory itself.
661 */
662 zhold(dzp);
663 zp = dzp;
664 dl = NULL;
665 error = 0;
666 } else {
667 /* possible igrab(zp) */
668 int zflg = 0;
669
670 if (flag & FIGNORECASE)
671 zflg |= ZCILOOK;
672
673 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
674 NULL, NULL);
675 if (error) {
676 if (have_acl)
677 zfs_acl_ids_free(&acl_ids);
678 if (strcmp(name, "..") == 0)
679 error = SET_ERROR(EISDIR);
680 zfs_exit(zfsvfs, FTAG);
681 return (error);
682 }
683 }
684
685 if (zp == NULL) {
686 uint64_t txtype;
687 uint64_t projid = ZFS_DEFAULT_PROJID;
688
689 /*
690 * Create a new file object and update the directory
691 * to reference it.
692 */
693 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, skip_acl, cr,
694 mnt_ns))) {
695 if (have_acl)
696 zfs_acl_ids_free(&acl_ids);
697 goto out;
698 }
699
700 /*
701 * We only support the creation of regular files in
702 * extended attribute directories.
703 */
704
705 if ((dzp->z_pflags & ZFS_XATTR) && !S_ISREG(vap->va_mode)) {
706 if (have_acl)
707 zfs_acl_ids_free(&acl_ids);
708 error = SET_ERROR(EINVAL);
709 goto out;
710 }
711
712 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
713 cr, vsecp, &acl_ids, mnt_ns)) != 0)
714 goto out;
715 have_acl = B_TRUE;
716
717 if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
718 projid = zfs_inherit_projid(dzp);
719 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
720 zfs_acl_ids_free(&acl_ids);
721 error = SET_ERROR(EDQUOT);
722 goto out;
723 }
724
725 tx = dmu_tx_create(os);
726
727 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
728 ZFS_SA_BASE_ATTR_SIZE);
729
730 fuid_dirtied = zfsvfs->z_fuid_dirty;
731 if (fuid_dirtied)
732 zfs_fuid_txhold(zfsvfs, tx);
733 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
734 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
735 if (!zfsvfs->z_use_sa &&
736 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
737 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
738 0, acl_ids.z_aclp->z_acl_bytes);
739 }
740
741 error = dmu_tx_assign(tx,
742 (waited ? DMU_TX_NOTHROTTLE : 0) | DMU_TX_NOWAIT);
743 if (error) {
744 zfs_dirent_unlock(dl);
745 if (error == ERESTART) {
746 waited = B_TRUE;
747 dmu_tx_wait(tx);
748 dmu_tx_abort(tx);
749 goto top;
750 }
751 zfs_acl_ids_free(&acl_ids);
752 dmu_tx_abort(tx);
753 zfs_exit(zfsvfs, FTAG);
754 return (error);
755 }
756 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
757
758 error = zfs_link_create(dl, zp, tx, ZNEW);
759 if (error != 0) {
760 /*
761 * Since, we failed to add the directory entry for it,
762 * delete the newly created dnode.
763 */
764 zfs_znode_delete(zp, tx);
765 remove_inode_hash(ZTOI(zp));
766 zfs_acl_ids_free(&acl_ids);
767 dmu_tx_commit(tx);
768 goto out;
769 }
770
771 if (fuid_dirtied)
772 zfs_fuid_sync(zfsvfs, tx);
773
774 txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
775 if (flag & FIGNORECASE)
776 txtype |= TX_CI;
777 zfs_log_create(zilog, tx, txtype, dzp, zp, name,
778 vsecp, acl_ids.z_fuidp, vap);
779 zfs_acl_ids_free(&acl_ids);
780 dmu_tx_commit(tx);
781 } else {
782 int aflags = (flag & O_APPEND) ? V_APPEND : 0;
783
784 if (have_acl)
785 zfs_acl_ids_free(&acl_ids);
786
787 /*
788 * A directory entry already exists for this name.
789 */
790 /*
791 * Can't truncate an existing file if in exclusive mode.
792 */
793 if (excl) {
794 error = SET_ERROR(EEXIST);
795 goto out;
796 }
797 /*
798 * Can't open a directory for writing.
799 */
800 if (S_ISDIR(ZTOI(zp)->i_mode)) {
801 error = SET_ERROR(EISDIR);
802 goto out;
803 }
804 /*
805 * Verify requested access to file.
806 */
807 if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr,
808 mnt_ns))) {
809 goto out;
810 }
811
812 mutex_enter(&dzp->z_lock);
813 dzp->z_seq++;
814 mutex_exit(&dzp->z_lock);
815
816 /*
817 * Truncate regular files if requested.
818 */
819 if (S_ISREG(ZTOI(zp)->i_mode) &&
820 (vap->va_mask & ATTR_SIZE) && (vap->va_size == 0)) {
821 /* we can't hold any locks when calling zfs_freesp() */
822 if (dl) {
823 zfs_dirent_unlock(dl);
824 dl = NULL;
825 }
826 error = zfs_freesp(zp, 0, 0, mode, TRUE);
827 }
828 }
829 out:
830
831 if (dl)
832 zfs_dirent_unlock(dl);
833
834 if (error) {
835 if (zp)
836 zrele(zp);
837 } else {
838 zfs_znode_update_vfs(dzp);
839 zfs_znode_update_vfs(zp);
840 *zpp = zp;
841 }
842
843 if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
844 error = zil_commit(zilog, 0);
845
846 zfs_exit(zfsvfs, FTAG);
847 return (error);
848 }
849
850 int
zfs_tmpfile(struct inode * dip,vattr_t * vap,int excl,int mode,struct inode ** ipp,cred_t * cr,int flag,vsecattr_t * vsecp,zidmap_t * mnt_ns)851 zfs_tmpfile(struct inode *dip, vattr_t *vap, int excl,
852 int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp,
853 zidmap_t *mnt_ns)
854 {
855 (void) excl, (void) mode, (void) flag;
856 znode_t *zp = NULL, *dzp = ITOZ(dip);
857 zfsvfs_t *zfsvfs = ITOZSB(dip);
858 objset_t *os;
859 dmu_tx_t *tx;
860 int error;
861 uid_t uid;
862 gid_t gid;
863 zfs_acl_ids_t acl_ids;
864 uint64_t projid = ZFS_DEFAULT_PROJID;
865 boolean_t fuid_dirtied;
866 boolean_t have_acl = B_FALSE;
867 boolean_t waited = B_FALSE;
868
869 /*
870 * If we have an ephemeral id, ACL, or XVATTR then
871 * make sure file system is at proper version
872 */
873
874 gid = crgetgid(cr);
875 uid = crgetuid(cr);
876
877 if (zfsvfs->z_use_fuids == B_FALSE &&
878 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
879 return (SET_ERROR(EINVAL));
880
881 if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
882 return (error);
883 os = zfsvfs->z_os;
884
885 if (vap->va_mask & ATTR_XVATTR) {
886 if ((error = secpolicy_xvattr((xvattr_t *)vap,
887 crgetuid(cr), cr, vap->va_mode)) != 0) {
888 zfs_exit(zfsvfs, FTAG);
889 return (error);
890 }
891 }
892
893 top:
894 *ipp = NULL;
895
896 /*
897 * Create a new file object and update the directory
898 * to reference it.
899 */
900 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr, mnt_ns))) {
901 if (have_acl)
902 zfs_acl_ids_free(&acl_ids);
903 goto out;
904 }
905
906 if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
907 cr, vsecp, &acl_ids, mnt_ns)) != 0)
908 goto out;
909 have_acl = B_TRUE;
910
911 if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
912 projid = zfs_inherit_projid(dzp);
913 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
914 zfs_acl_ids_free(&acl_ids);
915 error = SET_ERROR(EDQUOT);
916 goto out;
917 }
918
919 tx = dmu_tx_create(os);
920
921 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
922 ZFS_SA_BASE_ATTR_SIZE);
923 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
924
925 fuid_dirtied = zfsvfs->z_fuid_dirty;
926 if (fuid_dirtied)
927 zfs_fuid_txhold(zfsvfs, tx);
928 if (!zfsvfs->z_use_sa &&
929 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
930 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
931 0, acl_ids.z_aclp->z_acl_bytes);
932 }
933 error = dmu_tx_assign(tx,
934 (waited ? DMU_TX_NOTHROTTLE : 0) | DMU_TX_NOWAIT);
935 if (error) {
936 if (error == ERESTART) {
937 waited = B_TRUE;
938 dmu_tx_wait(tx);
939 dmu_tx_abort(tx);
940 goto top;
941 }
942 zfs_acl_ids_free(&acl_ids);
943 dmu_tx_abort(tx);
944 zfs_exit(zfsvfs, FTAG);
945 return (error);
946 }
947 zfs_mknode(dzp, vap, tx, cr, IS_TMPFILE, &zp, &acl_ids);
948
949 if (fuid_dirtied)
950 zfs_fuid_sync(zfsvfs, tx);
951
952 /* Add to unlinked set */
953 zp->z_unlinked = B_TRUE;
954 zfs_unlinked_add(zp, tx);
955 zfs_acl_ids_free(&acl_ids);
956 dmu_tx_commit(tx);
957 out:
958
959 if (error) {
960 if (zp)
961 zrele(zp);
962 } else {
963 zfs_znode_update_vfs(dzp);
964 zfs_znode_update_vfs(zp);
965 *ipp = ZTOI(zp);
966 }
967
968 zfs_exit(zfsvfs, FTAG);
969 return (error);
970 }
971
972 /*
973 * Remove an entry from a directory.
974 *
975 * IN: dzp - znode of directory to remove entry from.
976 * name - name of entry to remove.
977 * cr - credentials of caller.
978 * flags - case flags.
979 *
980 * RETURN: 0 if success
981 * error code if failure
982 *
983 * Timestamps:
984 * dzp - ctime|mtime
985 * ip - ctime (if nlink > 0)
986 */
987
988 static uint64_t null_xattr = 0;
989
990 int
zfs_remove(znode_t * dzp,char * name,cred_t * cr,int flags)991 zfs_remove(znode_t *dzp, char *name, cred_t *cr, int flags)
992 {
993 znode_t *zp;
994 znode_t *xzp;
995 zfsvfs_t *zfsvfs = ZTOZSB(dzp);
996 zilog_t *zilog;
997 uint64_t acl_obj, xattr_obj;
998 uint64_t xattr_obj_unlinked = 0;
999 uint64_t obj = 0;
1000 uint64_t links;
1001 zfs_dirlock_t *dl;
1002 dmu_tx_t *tx;
1003 boolean_t may_delete_now, delete_now = FALSE;
1004 boolean_t unlinked, toobig = FALSE;
1005 uint64_t txtype;
1006 pathname_t *realnmp = NULL;
1007 pathname_t realnm;
1008 int error;
1009 int zflg = ZEXISTS;
1010 boolean_t waited = B_FALSE;
1011
1012 if (name == NULL)
1013 return (SET_ERROR(EINVAL));
1014
1015 if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
1016 return (error);
1017 zilog = zfsvfs->z_log;
1018
1019 if (flags & FIGNORECASE) {
1020 zflg |= ZCILOOK;
1021 pn_alloc(&realnm);
1022 realnmp = &realnm;
1023 }
1024
1025 top:
1026 xattr_obj = 0;
1027 xzp = NULL;
1028 /*
1029 * Attempt to lock directory; fail if entry doesn't exist.
1030 */
1031 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1032 NULL, realnmp))) {
1033 if (realnmp)
1034 pn_free(realnmp);
1035 zfs_exit(zfsvfs, FTAG);
1036 return (error);
1037 }
1038
1039 if ((error = zfs_zaccess_delete(dzp, zp, cr, zfs_init_idmap))) {
1040 goto out;
1041 }
1042
1043 /*
1044 * Need to use rmdir for removing directories.
1045 */
1046 if (S_ISDIR(ZTOI(zp)->i_mode)) {
1047 error = SET_ERROR(EPERM);
1048 goto out;
1049 }
1050
1051 mutex_enter(&zp->z_lock);
1052 may_delete_now = atomic_read(&ZTOI(zp)->i_count) == 1 &&
1053 !zn_has_cached_data(zp, 0, LLONG_MAX);
1054 mutex_exit(&zp->z_lock);
1055
1056 /*
1057 * We may delete the znode now, or we may put it in the unlinked set;
1058 * it depends on whether we're the last link, and on whether there are
1059 * other holds on the inode. So we dmu_tx_hold() the right things to
1060 * allow for either case.
1061 */
1062 obj = zp->z_id;
1063 tx = dmu_tx_create(zfsvfs->z_os);
1064 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1065 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1066 zfs_sa_upgrade_txholds(tx, zp);
1067 zfs_sa_upgrade_txholds(tx, dzp);
1068 if (may_delete_now) {
1069 toobig = zp->z_size > zp->z_blksz * zfs_delete_blocks;
1070 /* if the file is too big, only hold_free a token amount */
1071 dmu_tx_hold_free(tx, zp->z_id, 0,
1072 (toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END));
1073 }
1074
1075 /* are there any extended attributes? */
1076 error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1077 &xattr_obj, sizeof (xattr_obj));
1078 if (error == 0 && xattr_obj) {
1079 error = zfs_zget(zfsvfs, xattr_obj, &xzp);
1080 ASSERT0(error);
1081 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1082 dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
1083 }
1084
1085 mutex_enter(&zp->z_lock);
1086 if ((acl_obj = zfs_external_acl(zp)) != 0 && may_delete_now)
1087 dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
1088 mutex_exit(&zp->z_lock);
1089
1090 /* charge as an update -- would be nice not to charge at all */
1091 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1092
1093 /*
1094 * Mark this transaction as typically resulting in a net free of space
1095 */
1096 dmu_tx_mark_netfree(tx);
1097
1098 error = dmu_tx_assign(tx,
1099 (waited ? DMU_TX_NOTHROTTLE : 0) | DMU_TX_NOWAIT);
1100 if (error) {
1101 zfs_dirent_unlock(dl);
1102 if (error == ERESTART) {
1103 waited = B_TRUE;
1104 dmu_tx_wait(tx);
1105 dmu_tx_abort(tx);
1106 zrele(zp);
1107 if (xzp)
1108 zrele(xzp);
1109 goto top;
1110 }
1111 if (realnmp)
1112 pn_free(realnmp);
1113 dmu_tx_abort(tx);
1114 zrele(zp);
1115 if (xzp)
1116 zrele(xzp);
1117 zfs_exit(zfsvfs, FTAG);
1118 return (error);
1119 }
1120
1121 /*
1122 * Remove the directory entry.
1123 */
1124 error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked);
1125
1126 if (error) {
1127 dmu_tx_commit(tx);
1128 goto out;
1129 }
1130
1131 if (unlinked) {
1132 /*
1133 * Hold z_lock so that we can make sure that the ACL obj
1134 * hasn't changed. Could have been deleted due to
1135 * zfs_sa_upgrade().
1136 */
1137 mutex_enter(&zp->z_lock);
1138 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
1139 &xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
1140 delete_now = may_delete_now && !toobig &&
1141 atomic_read(&ZTOI(zp)->i_count) == 1 &&
1142 !zn_has_cached_data(zp, 0, LLONG_MAX) &&
1143 xattr_obj == xattr_obj_unlinked &&
1144 zfs_external_acl(zp) == acl_obj;
1145 VERIFY_IMPLY(xattr_obj_unlinked, xzp);
1146 }
1147
1148 if (delete_now) {
1149 if (xattr_obj_unlinked) {
1150 ASSERT3U(ZTOI(xzp)->i_nlink, ==, 2);
1151 mutex_enter(&xzp->z_lock);
1152 xzp->z_unlinked = B_TRUE;
1153 clear_nlink(ZTOI(xzp));
1154 links = 0;
1155 error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
1156 &links, sizeof (links), tx);
1157 ASSERT3U(error, ==, 0);
1158 mutex_exit(&xzp->z_lock);
1159 zfs_unlinked_add(xzp, tx);
1160
1161 if (zp->z_is_sa)
1162 error = sa_remove(zp->z_sa_hdl,
1163 SA_ZPL_XATTR(zfsvfs), tx);
1164 else
1165 error = sa_update(zp->z_sa_hdl,
1166 SA_ZPL_XATTR(zfsvfs), &null_xattr,
1167 sizeof (uint64_t), tx);
1168 ASSERT0(error);
1169 }
1170 /*
1171 * Add to the unlinked set because a new reference could be
1172 * taken concurrently resulting in a deferred destruction.
1173 */
1174 zfs_unlinked_add(zp, tx);
1175 mutex_exit(&zp->z_lock);
1176 } else if (unlinked) {
1177 mutex_exit(&zp->z_lock);
1178 zfs_unlinked_add(zp, tx);
1179 }
1180
1181 txtype = TX_REMOVE;
1182 if (flags & FIGNORECASE)
1183 txtype |= TX_CI;
1184 zfs_log_remove(zilog, tx, txtype, dzp, name, obj, unlinked);
1185
1186 dmu_tx_commit(tx);
1187 out:
1188 if (realnmp)
1189 pn_free(realnmp);
1190
1191 zfs_dirent_unlock(dl);
1192 zfs_znode_update_vfs(dzp);
1193 zfs_znode_update_vfs(zp);
1194
1195 if (delete_now)
1196 zrele(zp);
1197 else
1198 zfs_zrele_async(zp);
1199
1200 if (xzp) {
1201 zfs_znode_update_vfs(xzp);
1202 zfs_zrele_async(xzp);
1203 }
1204
1205 if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1206 error = zil_commit(zilog, 0);
1207
1208 zfs_exit(zfsvfs, FTAG);
1209 return (error);
1210 }
1211
1212 /*
1213 * Create a new directory and insert it into dzp using the name
1214 * provided. Return a pointer to the inserted directory.
1215 *
1216 * IN: dzp - znode of directory to add subdir to.
1217 * dirname - name of new directory.
1218 * vap - attributes of new directory.
1219 * cr - credentials of caller.
1220 * flags - case flags.
1221 * vsecp - ACL to be set
1222 * mnt_ns - user namespace of the mount
1223 *
1224 * OUT: zpp - znode of created directory.
1225 *
1226 * RETURN: 0 if success
1227 * error code if failure
1228 *
1229 * Timestamps:
1230 * dzp - ctime|mtime updated
1231 * zpp - ctime|mtime|atime updated
1232 */
1233 int
zfs_mkdir(znode_t * dzp,char * dirname,vattr_t * vap,znode_t ** zpp,cred_t * cr,int flags,vsecattr_t * vsecp,zidmap_t * mnt_ns)1234 zfs_mkdir(znode_t *dzp, char *dirname, vattr_t *vap, znode_t **zpp,
1235 cred_t *cr, int flags, vsecattr_t *vsecp, zidmap_t *mnt_ns)
1236 {
1237 znode_t *zp;
1238 zfsvfs_t *zfsvfs = ZTOZSB(dzp);
1239 zilog_t *zilog;
1240 zfs_dirlock_t *dl;
1241 uint64_t txtype;
1242 dmu_tx_t *tx;
1243 int error;
1244 int zf = ZNEW;
1245 uid_t uid;
1246 gid_t gid = crgetgid(cr);
1247 zfs_acl_ids_t acl_ids;
1248 boolean_t fuid_dirtied;
1249 boolean_t waited = B_FALSE;
1250
1251 ASSERT(S_ISDIR(vap->va_mode));
1252
1253 /*
1254 * If we have an ephemeral id, ACL, or XVATTR then
1255 * make sure file system is at proper version
1256 */
1257
1258 uid = crgetuid(cr);
1259 if (zfsvfs->z_use_fuids == B_FALSE &&
1260 (vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
1261 return (SET_ERROR(EINVAL));
1262
1263 if (dirname == NULL)
1264 return (SET_ERROR(EINVAL));
1265
1266 if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
1267 return (error);
1268 zilog = zfsvfs->z_log;
1269
1270 if (dzp->z_pflags & ZFS_XATTR) {
1271 zfs_exit(zfsvfs, FTAG);
1272 return (SET_ERROR(EINVAL));
1273 }
1274
1275 if (zfsvfs->z_utf8 && u8_validate(dirname,
1276 strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
1277 zfs_exit(zfsvfs, FTAG);
1278 return (SET_ERROR(EILSEQ));
1279 }
1280 if (flags & FIGNORECASE)
1281 zf |= ZCILOOK;
1282
1283 if (vap->va_mask & ATTR_XVATTR) {
1284 if ((error = secpolicy_xvattr((xvattr_t *)vap,
1285 crgetuid(cr), cr, vap->va_mode)) != 0) {
1286 zfs_exit(zfsvfs, FTAG);
1287 return (error);
1288 }
1289 }
1290
1291 if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
1292 vsecp, &acl_ids, mnt_ns)) != 0) {
1293 zfs_exit(zfsvfs, FTAG);
1294 return (error);
1295 }
1296 /*
1297 * First make sure the new directory doesn't exist.
1298 *
1299 * Existence is checked first to make sure we don't return
1300 * EACCES instead of EEXIST which can cause some applications
1301 * to fail.
1302 */
1303 top:
1304 *zpp = NULL;
1305
1306 if ((error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
1307 NULL, NULL))) {
1308 zfs_acl_ids_free(&acl_ids);
1309 zfs_exit(zfsvfs, FTAG);
1310 return (error);
1311 }
1312
1313 if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr,
1314 mnt_ns))) {
1315 zfs_acl_ids_free(&acl_ids);
1316 zfs_dirent_unlock(dl);
1317 zfs_exit(zfsvfs, FTAG);
1318 return (error);
1319 }
1320
1321 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, zfs_inherit_projid(dzp))) {
1322 zfs_acl_ids_free(&acl_ids);
1323 zfs_dirent_unlock(dl);
1324 zfs_exit(zfsvfs, FTAG);
1325 return (SET_ERROR(EDQUOT));
1326 }
1327
1328 /*
1329 * Add a new entry to the directory.
1330 */
1331 tx = dmu_tx_create(zfsvfs->z_os);
1332 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
1333 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
1334 fuid_dirtied = zfsvfs->z_fuid_dirty;
1335 if (fuid_dirtied)
1336 zfs_fuid_txhold(zfsvfs, tx);
1337 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
1338 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
1339 acl_ids.z_aclp->z_acl_bytes);
1340 }
1341
1342 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
1343 ZFS_SA_BASE_ATTR_SIZE);
1344
1345 error = dmu_tx_assign(tx,
1346 (waited ? DMU_TX_NOTHROTTLE : 0) | DMU_TX_NOWAIT);
1347 if (error) {
1348 zfs_dirent_unlock(dl);
1349 if (error == ERESTART) {
1350 waited = B_TRUE;
1351 dmu_tx_wait(tx);
1352 dmu_tx_abort(tx);
1353 goto top;
1354 }
1355 zfs_acl_ids_free(&acl_ids);
1356 dmu_tx_abort(tx);
1357 zfs_exit(zfsvfs, FTAG);
1358 return (error);
1359 }
1360
1361 /*
1362 * Create new node.
1363 */
1364 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
1365
1366 /*
1367 * Now put new name in parent dir.
1368 */
1369 error = zfs_link_create(dl, zp, tx, ZNEW);
1370 if (error != 0) {
1371 zfs_znode_delete(zp, tx);
1372 remove_inode_hash(ZTOI(zp));
1373 goto out;
1374 }
1375
1376 if (fuid_dirtied)
1377 zfs_fuid_sync(zfsvfs, tx);
1378
1379 *zpp = zp;
1380
1381 txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
1382 if (flags & FIGNORECASE)
1383 txtype |= TX_CI;
1384 zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
1385 acl_ids.z_fuidp, vap);
1386
1387 out:
1388 zfs_acl_ids_free(&acl_ids);
1389
1390 dmu_tx_commit(tx);
1391
1392 zfs_dirent_unlock(dl);
1393
1394 if (error != 0) {
1395 zrele(zp);
1396 } else {
1397 zfs_znode_update_vfs(dzp);
1398 zfs_znode_update_vfs(zp);
1399
1400 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1401 error = zil_commit(zilog, 0);
1402
1403 }
1404 zfs_exit(zfsvfs, FTAG);
1405 return (error);
1406 }
1407
1408 /*
1409 * Remove a directory subdir entry. If the current working
1410 * directory is the same as the subdir to be removed, the
1411 * remove will fail.
1412 *
1413 * IN: dzp - znode of directory to remove from.
1414 * name - name of directory to be removed.
1415 * cwd - inode of current working directory.
1416 * cr - credentials of caller.
1417 * flags - case flags
1418 *
1419 * RETURN: 0 on success, error code on failure.
1420 *
1421 * Timestamps:
1422 * dzp - ctime|mtime updated
1423 */
1424 int
zfs_rmdir(znode_t * dzp,char * name,znode_t * cwd,cred_t * cr,int flags)1425 zfs_rmdir(znode_t *dzp, char *name, znode_t *cwd, cred_t *cr,
1426 int flags)
1427 {
1428 znode_t *zp;
1429 zfsvfs_t *zfsvfs = ZTOZSB(dzp);
1430 zilog_t *zilog;
1431 zfs_dirlock_t *dl;
1432 dmu_tx_t *tx;
1433 int error;
1434 int zflg = ZEXISTS;
1435 boolean_t waited = B_FALSE;
1436
1437 if (name == NULL)
1438 return (SET_ERROR(EINVAL));
1439
1440 if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
1441 return (error);
1442 zilog = zfsvfs->z_log;
1443
1444 if (flags & FIGNORECASE)
1445 zflg |= ZCILOOK;
1446 top:
1447 zp = NULL;
1448
1449 /*
1450 * Attempt to lock directory; fail if entry doesn't exist.
1451 */
1452 if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
1453 NULL, NULL))) {
1454 zfs_exit(zfsvfs, FTAG);
1455 return (error);
1456 }
1457
1458 if ((error = zfs_zaccess_delete(dzp, zp, cr, zfs_init_idmap))) {
1459 goto out;
1460 }
1461
1462 if (!S_ISDIR(ZTOI(zp)->i_mode)) {
1463 error = SET_ERROR(ENOTDIR);
1464 goto out;
1465 }
1466
1467 if (zp == cwd) {
1468 error = SET_ERROR(EINVAL);
1469 goto out;
1470 }
1471
1472 /*
1473 * Grab a lock on the directory to make sure that no one is
1474 * trying to add (or lookup) entries while we are removing it.
1475 */
1476 rw_enter(&zp->z_name_lock, RW_WRITER);
1477
1478 /*
1479 * Grab a lock on the parent pointer to make sure we play well
1480 * with the treewalk and directory rename code.
1481 */
1482 rw_enter(&zp->z_parent_lock, RW_WRITER);
1483
1484 tx = dmu_tx_create(zfsvfs->z_os);
1485 dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
1486 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1487 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
1488 zfs_sa_upgrade_txholds(tx, zp);
1489 zfs_sa_upgrade_txholds(tx, dzp);
1490 dmu_tx_mark_netfree(tx);
1491 error = dmu_tx_assign(tx,
1492 (waited ? DMU_TX_NOTHROTTLE : 0) | DMU_TX_NOWAIT);
1493 if (error) {
1494 rw_exit(&zp->z_parent_lock);
1495 rw_exit(&zp->z_name_lock);
1496 zfs_dirent_unlock(dl);
1497 if (error == ERESTART) {
1498 waited = B_TRUE;
1499 dmu_tx_wait(tx);
1500 dmu_tx_abort(tx);
1501 zrele(zp);
1502 goto top;
1503 }
1504 dmu_tx_abort(tx);
1505 zrele(zp);
1506 zfs_exit(zfsvfs, FTAG);
1507 return (error);
1508 }
1509
1510 error = zfs_link_destroy(dl, zp, tx, zflg, NULL);
1511
1512 if (error == 0) {
1513 uint64_t txtype = TX_RMDIR;
1514 if (flags & FIGNORECASE)
1515 txtype |= TX_CI;
1516 zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT,
1517 B_FALSE);
1518 }
1519
1520 dmu_tx_commit(tx);
1521
1522 rw_exit(&zp->z_parent_lock);
1523 rw_exit(&zp->z_name_lock);
1524 out:
1525 zfs_dirent_unlock(dl);
1526
1527 zfs_znode_update_vfs(dzp);
1528 zfs_znode_update_vfs(zp);
1529 zrele(zp);
1530
1531 if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
1532 error = zil_commit(zilog, 0);
1533
1534 zfs_exit(zfsvfs, FTAG);
1535 return (error);
1536 }
1537
1538 /*
1539 * Read directory entries from the given directory cursor position and emit
1540 * name and position for each entry.
1541 *
1542 * IN: ip - inode of directory to read.
1543 * ctx - directory entry context.
1544 * cr - credentials of caller.
1545 *
1546 * RETURN: 0 if success
1547 * error code if failure
1548 *
1549 * Timestamps:
1550 * ip - atime updated
1551 *
1552 * Note that the low 4 bits of the cookie returned by zap is always zero.
1553 * This allows us to use the low range for "special" directory entries:
1554 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
1555 * we use the offset 2 for the '.zfs' directory.
1556 */
1557 int
zfs_readdir(struct inode * ip,struct dir_context * ctx,cred_t * cr)1558 zfs_readdir(struct inode *ip, struct dir_context *ctx, cred_t *cr)
1559 {
1560 (void) cr;
1561 znode_t *zp = ITOZ(ip);
1562 zfsvfs_t *zfsvfs = ITOZSB(ip);
1563 objset_t *os;
1564 zap_cursor_t zc;
1565 zap_attribute_t *zap;
1566 int error;
1567 uint8_t prefetch;
1568 uint8_t type;
1569 int done = 0;
1570 uint64_t parent;
1571 uint64_t offset; /* must be unsigned; checks for < 1 */
1572
1573 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
1574 return (error);
1575
1576 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
1577 &parent, sizeof (parent))) != 0)
1578 goto out;
1579
1580 /*
1581 * Quit if directory has been removed (posix)
1582 */
1583 if (zp->z_unlinked)
1584 goto out;
1585
1586 error = 0;
1587 os = zfsvfs->z_os;
1588 offset = ctx->pos;
1589 prefetch = zp->z_zn_prefetch;
1590 zap = zap_attribute_long_alloc();
1591
1592 /*
1593 * Initialize the iterator cursor.
1594 */
1595 if (offset <= 3) {
1596 /*
1597 * Start iteration from the beginning of the directory.
1598 */
1599 zap_cursor_init(&zc, os, zp->z_id);
1600 } else {
1601 /*
1602 * The offset is a serialized cursor.
1603 */
1604 zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
1605 }
1606
1607 /*
1608 * Transform to file-system independent format
1609 */
1610 while (!done) {
1611 uint64_t objnum;
1612 /*
1613 * Special case `.', `..', and `.zfs'.
1614 */
1615 if (offset == 0) {
1616 (void) strcpy(zap->za_name, ".");
1617 zap->za_normalization_conflict = 0;
1618 objnum = zp->z_id;
1619 type = DT_DIR;
1620 } else if (offset == 1) {
1621 (void) strcpy(zap->za_name, "..");
1622 zap->za_normalization_conflict = 0;
1623 objnum = parent;
1624 type = DT_DIR;
1625 } else if (offset == 2 && zfs_show_ctldir(zp)) {
1626 (void) strcpy(zap->za_name, ZFS_CTLDIR_NAME);
1627 zap->za_normalization_conflict = 0;
1628 objnum = ZFSCTL_INO_ROOT;
1629 type = DT_DIR;
1630 } else {
1631 /*
1632 * Grab next entry.
1633 */
1634 if ((error = zap_cursor_retrieve(&zc, zap))) {
1635 if (error == ENOENT)
1636 break;
1637 else
1638 goto update;
1639 }
1640
1641 /*
1642 * Allow multiple entries provided the first entry is
1643 * the object id. Non-zpl consumers may safely make
1644 * use of the additional space.
1645 *
1646 * XXX: This should be a feature flag for compatibility
1647 */
1648 if (zap->za_integer_length != 8 ||
1649 zap->za_num_integers == 0) {
1650 cmn_err(CE_WARN, "zap_readdir: bad directory "
1651 "entry, obj = %lld, offset = %lld, "
1652 "length = %d, num = %lld\n",
1653 (u_longlong_t)zp->z_id,
1654 (u_longlong_t)offset,
1655 zap->za_integer_length,
1656 (u_longlong_t)zap->za_num_integers);
1657 error = SET_ERROR(ENXIO);
1658 goto update;
1659 }
1660
1661 objnum = ZFS_DIRENT_OBJ(zap->za_first_integer);
1662 type = ZFS_DIRENT_TYPE(zap->za_first_integer);
1663 }
1664
1665 done = !dir_emit(ctx, zap->za_name, strlen(zap->za_name),
1666 objnum, type);
1667 if (done)
1668 break;
1669
1670 if (prefetch)
1671 dmu_prefetch_dnode(os, objnum, ZIO_PRIORITY_SYNC_READ);
1672
1673 /*
1674 * Move to the next entry, fill in the previous offset.
1675 */
1676 if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
1677 zap_cursor_advance(&zc);
1678 offset = zap_cursor_serialize(&zc);
1679 } else {
1680 offset += 1;
1681 }
1682 ctx->pos = offset;
1683 }
1684 zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
1685
1686 update:
1687 zap_cursor_fini(&zc);
1688 zap_attribute_free(zap);
1689 if (error == ENOENT)
1690 error = 0;
1691 out:
1692 zfs_exit(zfsvfs, FTAG);
1693
1694 return (error);
1695 }
1696
1697 /*
1698 * Get the basic file attributes and place them in the provided kstat
1699 * structure. The inode is assumed to be the authoritative source
1700 * for most of the attributes. However, the znode currently has the
1701 * authoritative atime, blksize, and block count.
1702 *
1703 * IN: ip - inode of file.
1704 *
1705 * OUT: sp - kstat values.
1706 *
1707 * RETURN: 0 (always succeeds)
1708 */
1709 int
1710 #ifdef HAVE_GENERIC_FILLATTR_IDMAP_REQMASK
zfs_getattr_fast(zidmap_t * user_ns,u32 request_mask,struct inode * ip,struct kstat * sp)1711 zfs_getattr_fast(zidmap_t *user_ns, u32 request_mask, struct inode *ip,
1712 struct kstat *sp)
1713 #else
1714 zfs_getattr_fast(zidmap_t *user_ns, struct inode *ip, struct kstat *sp)
1715 #endif
1716 {
1717 znode_t *zp = ITOZ(ip);
1718 zfsvfs_t *zfsvfs = ITOZSB(ip);
1719 uint32_t blksize;
1720 u_longlong_t nblocks;
1721 int error;
1722
1723 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
1724 return (error);
1725
1726 mutex_enter(&zp->z_lock);
1727
1728 #ifdef HAVE_GENERIC_FILLATTR_IDMAP_REQMASK
1729 zpl_generic_fillattr(user_ns, request_mask, ip, sp);
1730 #else
1731 zpl_generic_fillattr(user_ns, ip, sp);
1732 #endif
1733 /*
1734 * +1 link count for root inode with visible '.zfs' directory.
1735 */
1736 if ((zp->z_id == zfsvfs->z_root) && zfs_show_ctldir(zp))
1737 if (sp->nlink < ZFS_LINK_MAX)
1738 sp->nlink++;
1739
1740 sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
1741 sp->blksize = blksize;
1742 sp->blocks = nblocks;
1743
1744 if (unlikely(zp->z_blksz == 0)) {
1745 /*
1746 * Block size hasn't been set; suggest maximal I/O transfers.
1747 */
1748 sp->blksize = zfsvfs->z_max_blksz;
1749 }
1750
1751 mutex_exit(&zp->z_lock);
1752
1753 /*
1754 * Required to prevent NFS client from detecting different inode
1755 * numbers of snapshot root dentry before and after snapshot mount.
1756 */
1757 if (zfsvfs->z_issnap) {
1758 if (ip->i_sb->s_root->d_inode == ip)
1759 sp->ino = ZFSCTL_INO_SNAPDIRS -
1760 dmu_objset_id(zfsvfs->z_os);
1761 }
1762
1763 zfs_exit(zfsvfs, FTAG);
1764
1765 return (0);
1766 }
1767
1768 /*
1769 * For the operation of changing file's user/group/project, we need to
1770 * handle not only the main object that is assigned to the file directly,
1771 * but also the ones that are used by the file via hidden xattr directory.
1772 *
1773 * Because the xattr directory may contains many EA entries, as to it may
1774 * be impossible to change all of them via the transaction of changing the
1775 * main object's user/group/project attributes. Then we have to change them
1776 * via other multiple independent transactions one by one. It may be not good
1777 * solution, but we have no better idea yet.
1778 */
1779 static int
zfs_setattr_dir(znode_t * dzp)1780 zfs_setattr_dir(znode_t *dzp)
1781 {
1782 struct inode *dxip = ZTOI(dzp);
1783 struct inode *xip = NULL;
1784 zfsvfs_t *zfsvfs = ZTOZSB(dzp);
1785 objset_t *os = zfsvfs->z_os;
1786 zap_cursor_t zc;
1787 zap_attribute_t *zap;
1788 zfs_dirlock_t *dl;
1789 znode_t *zp = NULL;
1790 dmu_tx_t *tx = NULL;
1791 uint64_t uid, gid;
1792 sa_bulk_attr_t bulk[4];
1793 int count;
1794 int err;
1795
1796 zap = zap_attribute_alloc();
1797 zap_cursor_init(&zc, os, dzp->z_id);
1798 while ((err = zap_cursor_retrieve(&zc, zap)) == 0) {
1799 count = 0;
1800 if (zap->za_integer_length != 8 || zap->za_num_integers != 1) {
1801 err = ENXIO;
1802 break;
1803 }
1804
1805 err = zfs_dirent_lock(&dl, dzp, (char *)zap->za_name, &zp,
1806 ZEXISTS, NULL, NULL);
1807 if (err == ENOENT)
1808 goto next;
1809 if (err)
1810 break;
1811
1812 xip = ZTOI(zp);
1813 if (KUID_TO_SUID(xip->i_uid) == KUID_TO_SUID(dxip->i_uid) &&
1814 KGID_TO_SGID(xip->i_gid) == KGID_TO_SGID(dxip->i_gid) &&
1815 zp->z_projid == dzp->z_projid)
1816 goto next;
1817
1818 tx = dmu_tx_create(os);
1819 if (!(zp->z_pflags & ZFS_PROJID))
1820 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
1821 else
1822 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
1823
1824 err = dmu_tx_assign(tx, DMU_TX_WAIT);
1825 if (err)
1826 break;
1827
1828 mutex_enter(&dzp->z_lock);
1829
1830 if (KUID_TO_SUID(xip->i_uid) != KUID_TO_SUID(dxip->i_uid)) {
1831 xip->i_uid = dxip->i_uid;
1832 uid = zfs_uid_read(dxip);
1833 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
1834 &uid, sizeof (uid));
1835 }
1836
1837 if (KGID_TO_SGID(xip->i_gid) != KGID_TO_SGID(dxip->i_gid)) {
1838 xip->i_gid = dxip->i_gid;
1839 gid = zfs_gid_read(dxip);
1840 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
1841 &gid, sizeof (gid));
1842 }
1843
1844
1845 uint64_t projid = dzp->z_projid;
1846 if (zp->z_projid != projid) {
1847 if (!(zp->z_pflags & ZFS_PROJID)) {
1848 err = sa_add_projid(zp->z_sa_hdl, tx, projid);
1849 if (unlikely(err == EEXIST)) {
1850 err = 0;
1851 } else if (err != 0) {
1852 goto sa_add_projid_err;
1853 } else {
1854 projid = ZFS_INVALID_PROJID;
1855 }
1856 }
1857
1858 if (projid != ZFS_INVALID_PROJID) {
1859 zp->z_projid = projid;
1860 SA_ADD_BULK_ATTR(bulk, count,
1861 SA_ZPL_PROJID(zfsvfs), NULL, &zp->z_projid,
1862 sizeof (zp->z_projid));
1863 }
1864 }
1865
1866 sa_add_projid_err:
1867 mutex_exit(&dzp->z_lock);
1868
1869 if (likely(count > 0)) {
1870 err = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
1871 dmu_tx_commit(tx);
1872 } else if (projid == ZFS_INVALID_PROJID) {
1873 dmu_tx_commit(tx);
1874 } else {
1875 dmu_tx_abort(tx);
1876 }
1877 tx = NULL;
1878 if (err != 0 && err != ENOENT)
1879 break;
1880
1881 next:
1882 if (zp) {
1883 zrele(zp);
1884 zp = NULL;
1885 zfs_dirent_unlock(dl);
1886 }
1887 zap_cursor_advance(&zc);
1888 }
1889
1890 if (tx)
1891 dmu_tx_abort(tx);
1892 if (zp) {
1893 zrele(zp);
1894 zfs_dirent_unlock(dl);
1895 }
1896 zap_cursor_fini(&zc);
1897 zap_attribute_free(zap);
1898
1899 return (err == ENOENT ? 0 : err);
1900 }
1901
1902 /*
1903 * Set the file attributes to the values contained in the
1904 * vattr structure.
1905 *
1906 * IN: zp - znode of file to be modified.
1907 * vap - new attribute values.
1908 * If ATTR_XVATTR set, then optional attrs are being set
1909 * flags - ATTR_UTIME set if non-default time values provided.
1910 * - ATTR_NOACLCHECK (CIFS context only).
1911 * cr - credentials of caller.
1912 * mnt_ns - user namespace of the mount
1913 *
1914 * RETURN: 0 if success
1915 * error code if failure
1916 *
1917 * Timestamps:
1918 * ip - ctime updated, mtime updated if size changed.
1919 */
1920 int
zfs_setattr(znode_t * zp,vattr_t * vap,int flags,cred_t * cr,zidmap_t * mnt_ns)1921 zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr, zidmap_t *mnt_ns)
1922 {
1923 struct inode *ip;
1924 zfsvfs_t *zfsvfs = ZTOZSB(zp);
1925 objset_t *os;
1926 zilog_t *zilog;
1927 dmu_tx_t *tx;
1928 vattr_t oldva;
1929 xvattr_t *tmpxvattr;
1930 uint_t mask = vap->va_mask;
1931 uint_t saved_mask = 0;
1932 int trim_mask = 0;
1933 uint64_t new_mode;
1934 uint64_t new_kuid = 0, new_kgid = 0, new_uid, new_gid;
1935 uint64_t xattr_obj;
1936 uint64_t mtime[2], ctime[2], atime[2];
1937 uint64_t projid = ZFS_INVALID_PROJID;
1938 znode_t *attrzp;
1939 int need_policy = FALSE;
1940 int err, err2 = 0;
1941 zfs_fuid_info_t *fuidp = NULL;
1942 xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
1943 xoptattr_t *xoap;
1944 zfs_acl_t *aclp;
1945 boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
1946 boolean_t fuid_dirtied = B_FALSE;
1947 boolean_t handle_eadir = B_FALSE;
1948 sa_bulk_attr_t *bulk, *xattr_bulk;
1949 int count = 0, xattr_count = 0, bulks = 8;
1950
1951 if (mask == 0)
1952 return (0);
1953
1954 if ((err = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
1955 return (err);
1956 ip = ZTOI(zp);
1957 os = zfsvfs->z_os;
1958
1959 /*
1960 * If this is a xvattr_t, then get a pointer to the structure of
1961 * optional attributes. If this is NULL, then we have a vattr_t.
1962 */
1963 xoap = xva_getxoptattr(xvap);
1964 if (xoap != NULL && (mask & ATTR_XVATTR)) {
1965 if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
1966 if (!dmu_objset_projectquota_enabled(os) ||
1967 (!S_ISREG(ip->i_mode) && !S_ISDIR(ip->i_mode))) {
1968 zfs_exit(zfsvfs, FTAG);
1969 return (SET_ERROR(ENOTSUP));
1970 }
1971
1972 projid = xoap->xoa_projid;
1973 if (unlikely(projid == ZFS_INVALID_PROJID)) {
1974 zfs_exit(zfsvfs, FTAG);
1975 return (SET_ERROR(EINVAL));
1976 }
1977
1978 if (projid == zp->z_projid && zp->z_pflags & ZFS_PROJID)
1979 projid = ZFS_INVALID_PROJID;
1980 else
1981 need_policy = TRUE;
1982 }
1983
1984 if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT) &&
1985 (xoap->xoa_projinherit !=
1986 ((zp->z_pflags & ZFS_PROJINHERIT) != 0)) &&
1987 (!dmu_objset_projectquota_enabled(os) ||
1988 (!S_ISREG(ip->i_mode) && !S_ISDIR(ip->i_mode)))) {
1989 zfs_exit(zfsvfs, FTAG);
1990 return (SET_ERROR(ENOTSUP));
1991 }
1992 }
1993
1994 zilog = zfsvfs->z_log;
1995
1996 /*
1997 * Make sure that if we have ephemeral uid/gid or xvattr specified
1998 * that file system is at proper version level
1999 */
2000
2001 if (zfsvfs->z_use_fuids == B_FALSE &&
2002 (((mask & ATTR_UID) && IS_EPHEMERAL(vap->va_uid)) ||
2003 ((mask & ATTR_GID) && IS_EPHEMERAL(vap->va_gid)) ||
2004 (mask & ATTR_XVATTR))) {
2005 zfs_exit(zfsvfs, FTAG);
2006 return (SET_ERROR(EINVAL));
2007 }
2008
2009 if (mask & ATTR_SIZE && S_ISDIR(ip->i_mode)) {
2010 zfs_exit(zfsvfs, FTAG);
2011 return (SET_ERROR(EISDIR));
2012 }
2013
2014 if (mask & ATTR_SIZE && !S_ISREG(ip->i_mode) && !S_ISFIFO(ip->i_mode)) {
2015 zfs_exit(zfsvfs, FTAG);
2016 return (SET_ERROR(EINVAL));
2017 }
2018
2019 tmpxvattr = kmem_alloc(sizeof (xvattr_t), KM_SLEEP);
2020 xva_init(tmpxvattr);
2021
2022 bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * bulks, KM_SLEEP);
2023 xattr_bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * bulks, KM_SLEEP);
2024
2025 /*
2026 * Immutable files can only alter immutable bit and atime
2027 */
2028 if ((zp->z_pflags & ZFS_IMMUTABLE) &&
2029 ((mask & (ATTR_SIZE|ATTR_UID|ATTR_GID|ATTR_MTIME|ATTR_MODE)) ||
2030 ((mask & ATTR_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
2031 err = SET_ERROR(EPERM);
2032 goto out3;
2033 }
2034
2035 /* ZFS_READONLY will be handled in zfs_zaccess() */
2036
2037 /*
2038 * Verify timestamps doesn't overflow 32 bits.
2039 * ZFS can handle large timestamps, but 32bit syscalls can't
2040 * handle times greater than 2039. This check should be removed
2041 * once large timestamps are fully supported.
2042 */
2043 if (mask & (ATTR_ATIME | ATTR_MTIME)) {
2044 if (((mask & ATTR_ATIME) &&
2045 TIMESPEC_OVERFLOW(&vap->va_atime)) ||
2046 ((mask & ATTR_MTIME) &&
2047 TIMESPEC_OVERFLOW(&vap->va_mtime))) {
2048 err = SET_ERROR(EOVERFLOW);
2049 goto out3;
2050 }
2051 }
2052
2053 top:
2054 attrzp = NULL;
2055 aclp = NULL;
2056
2057 /* Can this be moved to before the top label? */
2058 if (zfs_is_readonly(zfsvfs)) {
2059 err = SET_ERROR(EROFS);
2060 goto out3;
2061 }
2062
2063 /*
2064 * First validate permissions
2065 */
2066
2067 if (mask & ATTR_SIZE) {
2068 err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr,
2069 mnt_ns);
2070 if (err)
2071 goto out3;
2072
2073 /*
2074 * XXX - Note, we are not providing any open
2075 * mode flags here (like FNDELAY), so we may
2076 * block if there are locks present... this
2077 * should be addressed in openat().
2078 */
2079 /* XXX - would it be OK to generate a log record here? */
2080 err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
2081 if (err)
2082 goto out3;
2083 }
2084
2085 if (mask & (ATTR_ATIME|ATTR_MTIME) ||
2086 ((mask & ATTR_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
2087 XVA_ISSET_REQ(xvap, XAT_READONLY) ||
2088 XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
2089 XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
2090 XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
2091 XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
2092 XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
2093 need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
2094 skipaclchk, cr, mnt_ns);
2095 }
2096
2097 if (mask & (ATTR_UID|ATTR_GID)) {
2098 int idmask = (mask & (ATTR_UID|ATTR_GID));
2099 int take_owner;
2100 int take_group;
2101 uid_t uid;
2102 gid_t gid;
2103
2104 /*
2105 * NOTE: even if a new mode is being set,
2106 * we may clear S_ISUID/S_ISGID bits.
2107 */
2108
2109 if (!(mask & ATTR_MODE))
2110 vap->va_mode = zp->z_mode;
2111
2112 /*
2113 * Take ownership or chgrp to group we are a member of
2114 */
2115
2116 uid = zfs_uid_to_vfsuid(mnt_ns, zfs_i_user_ns(ip),
2117 vap->va_uid);
2118 gid = zfs_gid_to_vfsgid(mnt_ns, zfs_i_user_ns(ip),
2119 vap->va_gid);
2120 take_owner = (mask & ATTR_UID) && (uid == crgetuid(cr));
2121 take_group = (mask & ATTR_GID) &&
2122 zfs_groupmember(zfsvfs, gid, cr);
2123
2124 /*
2125 * If both ATTR_UID and ATTR_GID are set then take_owner and
2126 * take_group must both be set in order to allow taking
2127 * ownership.
2128 *
2129 * Otherwise, send the check through secpolicy_vnode_setattr()
2130 *
2131 */
2132
2133 if (((idmask == (ATTR_UID|ATTR_GID)) &&
2134 take_owner && take_group) ||
2135 ((idmask == ATTR_UID) && take_owner) ||
2136 ((idmask == ATTR_GID) && take_group)) {
2137 if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
2138 skipaclchk, cr, mnt_ns) == 0) {
2139 /*
2140 * Remove setuid/setgid for non-privileged users
2141 */
2142 (void) secpolicy_setid_clear(vap, cr);
2143 trim_mask = (mask & (ATTR_UID|ATTR_GID));
2144 } else {
2145 need_policy = TRUE;
2146 }
2147 } else {
2148 need_policy = TRUE;
2149 }
2150 }
2151
2152 mutex_enter(&zp->z_lock);
2153 oldva.va_mode = zp->z_mode;
2154 zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
2155 if (mask & ATTR_XVATTR) {
2156 /*
2157 * Update xvattr mask to include only those attributes
2158 * that are actually changing.
2159 *
2160 * the bits will be restored prior to actually setting
2161 * the attributes so the caller thinks they were set.
2162 */
2163 if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
2164 if (xoap->xoa_appendonly !=
2165 ((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
2166 need_policy = TRUE;
2167 } else {
2168 XVA_CLR_REQ(xvap, XAT_APPENDONLY);
2169 XVA_SET_REQ(tmpxvattr, XAT_APPENDONLY);
2170 }
2171 }
2172
2173 if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
2174 if (xoap->xoa_projinherit !=
2175 ((zp->z_pflags & ZFS_PROJINHERIT) != 0)) {
2176 need_policy = TRUE;
2177 } else {
2178 XVA_CLR_REQ(xvap, XAT_PROJINHERIT);
2179 XVA_SET_REQ(tmpxvattr, XAT_PROJINHERIT);
2180 }
2181 }
2182
2183 if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
2184 if (xoap->xoa_nounlink !=
2185 ((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
2186 need_policy = TRUE;
2187 } else {
2188 XVA_CLR_REQ(xvap, XAT_NOUNLINK);
2189 XVA_SET_REQ(tmpxvattr, XAT_NOUNLINK);
2190 }
2191 }
2192
2193 if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
2194 if (xoap->xoa_immutable !=
2195 ((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
2196 need_policy = TRUE;
2197 } else {
2198 XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
2199 XVA_SET_REQ(tmpxvattr, XAT_IMMUTABLE);
2200 }
2201 }
2202
2203 if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
2204 if (xoap->xoa_nodump !=
2205 ((zp->z_pflags & ZFS_NODUMP) != 0)) {
2206 need_policy = TRUE;
2207 } else {
2208 XVA_CLR_REQ(xvap, XAT_NODUMP);
2209 XVA_SET_REQ(tmpxvattr, XAT_NODUMP);
2210 }
2211 }
2212
2213 if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
2214 if (xoap->xoa_av_modified !=
2215 ((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
2216 need_policy = TRUE;
2217 } else {
2218 XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
2219 XVA_SET_REQ(tmpxvattr, XAT_AV_MODIFIED);
2220 }
2221 }
2222
2223 if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
2224 if ((!S_ISREG(ip->i_mode) &&
2225 xoap->xoa_av_quarantined) ||
2226 xoap->xoa_av_quarantined !=
2227 ((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
2228 need_policy = TRUE;
2229 } else {
2230 XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
2231 XVA_SET_REQ(tmpxvattr, XAT_AV_QUARANTINED);
2232 }
2233 }
2234
2235 if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
2236 mutex_exit(&zp->z_lock);
2237 err = SET_ERROR(EPERM);
2238 goto out3;
2239 }
2240
2241 if (need_policy == FALSE &&
2242 (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
2243 XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
2244 need_policy = TRUE;
2245 }
2246 }
2247
2248 mutex_exit(&zp->z_lock);
2249
2250 if (mask & ATTR_MODE) {
2251 if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr,
2252 mnt_ns) == 0) {
2253 err = secpolicy_setid_setsticky_clear(ip, vap,
2254 &oldva, cr, mnt_ns, zfs_i_user_ns(ip));
2255 if (err)
2256 goto out3;
2257 trim_mask |= ATTR_MODE;
2258 } else {
2259 need_policy = TRUE;
2260 }
2261 }
2262
2263 if (need_policy) {
2264 /*
2265 * If trim_mask is set then take ownership
2266 * has been granted or write_acl is present and user
2267 * has the ability to modify mode. In that case remove
2268 * UID|GID and or MODE from mask so that
2269 * secpolicy_vnode_setattr() doesn't revoke it.
2270 */
2271
2272 if (trim_mask) {
2273 saved_mask = vap->va_mask;
2274 vap->va_mask &= ~trim_mask;
2275 }
2276 err = secpolicy_vnode_setattr(cr, ip, vap, &oldva, flags,
2277 zfs_zaccess_unix, zp);
2278 if (err)
2279 goto out3;
2280
2281 if (trim_mask)
2282 vap->va_mask |= saved_mask;
2283 }
2284
2285 /*
2286 * secpolicy_vnode_setattr, or take ownership may have
2287 * changed va_mask
2288 */
2289 mask = vap->va_mask;
2290
2291 if ((mask & (ATTR_UID | ATTR_GID)) || projid != ZFS_INVALID_PROJID) {
2292 handle_eadir = B_TRUE;
2293 err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
2294 &xattr_obj, sizeof (xattr_obj));
2295
2296 if (err == 0 && xattr_obj) {
2297 err = zfs_zget(ZTOZSB(zp), xattr_obj, &attrzp);
2298 if (err)
2299 goto out2;
2300 }
2301 if (mask & ATTR_UID) {
2302 new_kuid = zfs_fuid_create(zfsvfs,
2303 (uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
2304 if (new_kuid != KUID_TO_SUID(ZTOI(zp)->i_uid) &&
2305 zfs_id_overquota(zfsvfs, DMU_USERUSED_OBJECT,
2306 new_kuid)) {
2307 if (attrzp)
2308 zrele(attrzp);
2309 err = SET_ERROR(EDQUOT);
2310 goto out2;
2311 }
2312 }
2313
2314 if (mask & ATTR_GID) {
2315 new_kgid = zfs_fuid_create(zfsvfs,
2316 (uint64_t)vap->va_gid, cr, ZFS_GROUP, &fuidp);
2317 if (new_kgid != KGID_TO_SGID(ZTOI(zp)->i_gid) &&
2318 zfs_id_overquota(zfsvfs, DMU_GROUPUSED_OBJECT,
2319 new_kgid)) {
2320 if (attrzp)
2321 zrele(attrzp);
2322 err = SET_ERROR(EDQUOT);
2323 goto out2;
2324 }
2325 }
2326
2327 if (projid != ZFS_INVALID_PROJID &&
2328 zfs_id_overquota(zfsvfs, DMU_PROJECTUSED_OBJECT, projid)) {
2329 if (attrzp)
2330 zrele(attrzp);
2331 err = EDQUOT;
2332 goto out2;
2333 }
2334 }
2335 tx = dmu_tx_create(os);
2336
2337 if (mask & ATTR_MODE) {
2338 uint64_t pmode = zp->z_mode;
2339 uint64_t acl_obj;
2340 new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
2341
2342 if (ZTOZSB(zp)->z_acl_mode == ZFS_ACL_RESTRICTED &&
2343 !(zp->z_pflags & ZFS_ACL_TRIVIAL)) {
2344 err = EPERM;
2345 goto out;
2346 }
2347
2348 if ((err = zfs_acl_chmod_setattr(zp, &aclp, new_mode)))
2349 goto out;
2350
2351 mutex_enter(&zp->z_lock);
2352 if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
2353 /*
2354 * Are we upgrading ACL from old V0 format
2355 * to V1 format?
2356 */
2357 if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
2358 zfs_znode_acl_version(zp) ==
2359 ZFS_ACL_VERSION_INITIAL) {
2360 dmu_tx_hold_free(tx, acl_obj, 0,
2361 DMU_OBJECT_END);
2362 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
2363 0, aclp->z_acl_bytes);
2364 } else {
2365 dmu_tx_hold_write(tx, acl_obj, 0,
2366 aclp->z_acl_bytes);
2367 }
2368 } else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
2369 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
2370 0, aclp->z_acl_bytes);
2371 }
2372 mutex_exit(&zp->z_lock);
2373 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
2374 } else {
2375 if (((mask & ATTR_XVATTR) &&
2376 XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) ||
2377 (projid != ZFS_INVALID_PROJID &&
2378 !(zp->z_pflags & ZFS_PROJID)))
2379 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
2380 else
2381 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
2382 }
2383
2384 if (attrzp) {
2385 dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
2386 }
2387
2388 fuid_dirtied = zfsvfs->z_fuid_dirty;
2389 if (fuid_dirtied)
2390 zfs_fuid_txhold(zfsvfs, tx);
2391
2392 zfs_sa_upgrade_txholds(tx, zp);
2393
2394 err = dmu_tx_assign(tx, DMU_TX_WAIT);
2395 if (err)
2396 goto out;
2397
2398 count = 0;
2399 /*
2400 * Set each attribute requested.
2401 * We group settings according to the locks they need to acquire.
2402 *
2403 * Note: you cannot set ctime directly, although it will be
2404 * updated as a side-effect of calling this function.
2405 */
2406
2407 if (projid != ZFS_INVALID_PROJID && !(zp->z_pflags & ZFS_PROJID)) {
2408 /*
2409 * For the existed object that is upgraded from old system,
2410 * its on-disk layout has no slot for the project ID attribute.
2411 * But quota accounting logic needs to access related slots by
2412 * offset directly. So we need to adjust old objects' layout
2413 * to make the project ID to some unified and fixed offset.
2414 */
2415 if (attrzp)
2416 err = sa_add_projid(attrzp->z_sa_hdl, tx, projid);
2417 if (err == 0)
2418 err = sa_add_projid(zp->z_sa_hdl, tx, projid);
2419
2420 if (unlikely(err == EEXIST))
2421 err = 0;
2422 else if (err != 0)
2423 goto out;
2424 else
2425 projid = ZFS_INVALID_PROJID;
2426 }
2427
2428 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2429 mutex_enter(&zp->z_acl_lock);
2430 mutex_enter(&zp->z_lock);
2431
2432 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
2433 &zp->z_pflags, sizeof (zp->z_pflags));
2434
2435 if (attrzp) {
2436 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2437 mutex_enter(&attrzp->z_acl_lock);
2438 mutex_enter(&attrzp->z_lock);
2439 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2440 SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
2441 sizeof (attrzp->z_pflags));
2442 if (projid != ZFS_INVALID_PROJID) {
2443 attrzp->z_projid = projid;
2444 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2445 SA_ZPL_PROJID(zfsvfs), NULL, &attrzp->z_projid,
2446 sizeof (attrzp->z_projid));
2447 }
2448 }
2449
2450 if (mask & (ATTR_UID|ATTR_GID)) {
2451
2452 if (mask & ATTR_UID) {
2453 ZTOI(zp)->i_uid = SUID_TO_KUID(new_kuid);
2454 new_uid = zfs_uid_read(ZTOI(zp));
2455 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
2456 &new_uid, sizeof (new_uid));
2457 if (attrzp) {
2458 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2459 SA_ZPL_UID(zfsvfs), NULL, &new_uid,
2460 sizeof (new_uid));
2461 ZTOI(attrzp)->i_uid = SUID_TO_KUID(new_uid);
2462 }
2463 }
2464
2465 if (mask & ATTR_GID) {
2466 ZTOI(zp)->i_gid = SGID_TO_KGID(new_kgid);
2467 new_gid = zfs_gid_read(ZTOI(zp));
2468 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
2469 NULL, &new_gid, sizeof (new_gid));
2470 if (attrzp) {
2471 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2472 SA_ZPL_GID(zfsvfs), NULL, &new_gid,
2473 sizeof (new_gid));
2474 ZTOI(attrzp)->i_gid = SGID_TO_KGID(new_kgid);
2475 }
2476 }
2477 if (!(mask & ATTR_MODE)) {
2478 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
2479 NULL, &new_mode, sizeof (new_mode));
2480 new_mode = zp->z_mode;
2481 }
2482 err = zfs_acl_chown_setattr(zp);
2483 ASSERT0(err);
2484 if (attrzp) {
2485 err = zfs_acl_chown_setattr(attrzp);
2486 ASSERT0(err);
2487 }
2488 }
2489
2490 if (mask & ATTR_MODE) {
2491 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
2492 &new_mode, sizeof (new_mode));
2493 zp->z_mode = ZTOI(zp)->i_mode = new_mode;
2494 ASSERT3P(aclp, !=, NULL);
2495 err = zfs_aclset_common(zp, aclp, cr, tx);
2496 ASSERT0(err);
2497 if (zp->z_acl_cached)
2498 zfs_acl_free(zp->z_acl_cached);
2499 zp->z_acl_cached = aclp;
2500 aclp = NULL;
2501 }
2502
2503 if ((mask & ATTR_ATIME) || zp->z_atime_dirty) {
2504 zp->z_atime_dirty = B_FALSE;
2505 inode_timespec_t tmp_atime = zpl_inode_get_atime(ip);
2506 ZFS_TIME_ENCODE(&tmp_atime, atime);
2507 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
2508 &atime, sizeof (atime));
2509 }
2510
2511 if (mask & (ATTR_MTIME | ATTR_SIZE)) {
2512 ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
2513 zpl_inode_set_mtime_to_ts(ZTOI(zp),
2514 zpl_inode_timestamp_truncate(vap->va_mtime, ZTOI(zp)));
2515
2516 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
2517 mtime, sizeof (mtime));
2518 }
2519
2520 if (mask & (ATTR_CTIME | ATTR_SIZE)) {
2521 ZFS_TIME_ENCODE(&vap->va_ctime, ctime);
2522 zpl_inode_set_ctime_to_ts(ZTOI(zp),
2523 zpl_inode_timestamp_truncate(vap->va_ctime, ZTOI(zp)));
2524 SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
2525 ctime, sizeof (ctime));
2526 }
2527
2528 if (projid != ZFS_INVALID_PROJID) {
2529 zp->z_projid = projid;
2530 SA_ADD_BULK_ATTR(bulk, count,
2531 SA_ZPL_PROJID(zfsvfs), NULL, &zp->z_projid,
2532 sizeof (zp->z_projid));
2533 }
2534
2535 if (attrzp && mask) {
2536 SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
2537 SA_ZPL_CTIME(zfsvfs), NULL, &ctime,
2538 sizeof (ctime));
2539 }
2540
2541 /*
2542 * Do this after setting timestamps to prevent timestamp
2543 * update from toggling bit
2544 */
2545
2546 if (xoap && (mask & ATTR_XVATTR)) {
2547
2548 /*
2549 * restore trimmed off masks
2550 * so that return masks can be set for caller.
2551 */
2552
2553 if (XVA_ISSET_REQ(tmpxvattr, XAT_APPENDONLY)) {
2554 XVA_SET_REQ(xvap, XAT_APPENDONLY);
2555 }
2556 if (XVA_ISSET_REQ(tmpxvattr, XAT_NOUNLINK)) {
2557 XVA_SET_REQ(xvap, XAT_NOUNLINK);
2558 }
2559 if (XVA_ISSET_REQ(tmpxvattr, XAT_IMMUTABLE)) {
2560 XVA_SET_REQ(xvap, XAT_IMMUTABLE);
2561 }
2562 if (XVA_ISSET_REQ(tmpxvattr, XAT_NODUMP)) {
2563 XVA_SET_REQ(xvap, XAT_NODUMP);
2564 }
2565 if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_MODIFIED)) {
2566 XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
2567 }
2568 if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_QUARANTINED)) {
2569 XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
2570 }
2571 if (XVA_ISSET_REQ(tmpxvattr, XAT_PROJINHERIT)) {
2572 XVA_SET_REQ(xvap, XAT_PROJINHERIT);
2573 }
2574
2575 if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
2576 ASSERT(S_ISREG(ip->i_mode));
2577
2578 zfs_xvattr_set(zp, xvap, tx);
2579 }
2580
2581 if (fuid_dirtied)
2582 zfs_fuid_sync(zfsvfs, tx);
2583
2584 if (mask != 0)
2585 zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
2586
2587 mutex_exit(&zp->z_lock);
2588 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2589 mutex_exit(&zp->z_acl_lock);
2590
2591 if (attrzp) {
2592 if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
2593 mutex_exit(&attrzp->z_acl_lock);
2594 mutex_exit(&attrzp->z_lock);
2595 }
2596 out:
2597 if (err == 0 && xattr_count > 0) {
2598 err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
2599 xattr_count, tx);
2600 ASSERT0(err2);
2601 }
2602
2603 if (aclp)
2604 zfs_acl_free(aclp);
2605
2606 if (fuidp) {
2607 zfs_fuid_info_free(fuidp);
2608 fuidp = NULL;
2609 }
2610
2611 if (err) {
2612 dmu_tx_abort(tx);
2613 if (attrzp)
2614 zrele(attrzp);
2615 if (err == ERESTART)
2616 goto top;
2617 } else {
2618 if (count > 0)
2619 err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
2620 dmu_tx_commit(tx);
2621 if (attrzp) {
2622 if (err2 == 0 && handle_eadir)
2623 err = zfs_setattr_dir(attrzp);
2624 zrele(attrzp);
2625 }
2626 zfs_znode_update_vfs(zp);
2627 }
2628
2629 out2:
2630 if (err == 0 && os->os_sync == ZFS_SYNC_ALWAYS)
2631 err = zil_commit(zilog, 0);
2632
2633 out3:
2634 kmem_free(xattr_bulk, sizeof (sa_bulk_attr_t) * bulks);
2635 kmem_free(bulk, sizeof (sa_bulk_attr_t) * bulks);
2636 kmem_free(tmpxvattr, sizeof (xvattr_t));
2637 zfs_exit(zfsvfs, FTAG);
2638 return (err);
2639 }
2640
2641 typedef struct zfs_zlock {
2642 krwlock_t *zl_rwlock; /* lock we acquired */
2643 znode_t *zl_znode; /* znode we held */
2644 struct zfs_zlock *zl_next; /* next in list */
2645 } zfs_zlock_t;
2646
2647 /*
2648 * Drop locks and release vnodes that were held by zfs_rename_lock().
2649 */
2650 static void
zfs_rename_unlock(zfs_zlock_t ** zlpp)2651 zfs_rename_unlock(zfs_zlock_t **zlpp)
2652 {
2653 zfs_zlock_t *zl;
2654
2655 while ((zl = *zlpp) != NULL) {
2656 if (zl->zl_znode != NULL)
2657 zfs_zrele_async(zl->zl_znode);
2658 rw_exit(zl->zl_rwlock);
2659 *zlpp = zl->zl_next;
2660 kmem_free(zl, sizeof (*zl));
2661 }
2662 }
2663
2664 /*
2665 * Search back through the directory tree, using the ".." entries.
2666 * Lock each directory in the chain to prevent concurrent renames.
2667 * Fail any attempt to move a directory into one of its own descendants.
2668 * XXX - z_parent_lock can overlap with map or grow locks
2669 */
2670 static int
zfs_rename_lock(znode_t * szp,znode_t * tdzp,znode_t * sdzp,zfs_zlock_t ** zlpp)2671 zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
2672 {
2673 zfs_zlock_t *zl;
2674 znode_t *zp = tdzp;
2675 uint64_t rootid = ZTOZSB(zp)->z_root;
2676 uint64_t oidp = zp->z_id;
2677 krwlock_t *rwlp = &szp->z_parent_lock;
2678 krw_t rw = RW_WRITER;
2679
2680 /*
2681 * First pass write-locks szp and compares to zp->z_id.
2682 * Later passes read-lock zp and compare to zp->z_parent.
2683 */
2684 do {
2685 if (!rw_tryenter(rwlp, rw)) {
2686 /*
2687 * Another thread is renaming in this path.
2688 * Note that if we are a WRITER, we don't have any
2689 * parent_locks held yet.
2690 */
2691 if (rw == RW_READER && zp->z_id > szp->z_id) {
2692 /*
2693 * Drop our locks and restart
2694 */
2695 zfs_rename_unlock(&zl);
2696 *zlpp = NULL;
2697 zp = tdzp;
2698 oidp = zp->z_id;
2699 rwlp = &szp->z_parent_lock;
2700 rw = RW_WRITER;
2701 continue;
2702 } else {
2703 /*
2704 * Wait for other thread to drop its locks
2705 */
2706 rw_enter(rwlp, rw);
2707 }
2708 }
2709
2710 zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
2711 zl->zl_rwlock = rwlp;
2712 zl->zl_znode = NULL;
2713 zl->zl_next = *zlpp;
2714 *zlpp = zl;
2715
2716 if (oidp == szp->z_id) /* We're a descendant of szp */
2717 return (SET_ERROR(EINVAL));
2718
2719 if (oidp == rootid) /* We've hit the top */
2720 return (0);
2721
2722 if (rw == RW_READER) { /* i.e. not the first pass */
2723 int error = zfs_zget(ZTOZSB(zp), oidp, &zp);
2724 if (error)
2725 return (error);
2726 zl->zl_znode = zp;
2727 }
2728 (void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(ZTOZSB(zp)),
2729 &oidp, sizeof (oidp));
2730 rwlp = &zp->z_parent_lock;
2731 rw = RW_READER;
2732
2733 } while (zp->z_id != sdzp->z_id);
2734
2735 return (0);
2736 }
2737
2738 /*
2739 * Move an entry from the provided source directory to the target
2740 * directory. Change the entry name as indicated.
2741 *
2742 * IN: sdzp - Source directory containing the "old entry".
2743 * snm - Old entry name.
2744 * tdzp - Target directory to contain the "new entry".
2745 * tnm - New entry name.
2746 * cr - credentials of caller.
2747 * flags - case flags
2748 * rflags - RENAME_* flags
2749 * wa_vap - attributes for RENAME_WHITEOUT (must be a char 0:0).
2750 * mnt_ns - user namespace of the mount
2751 *
2752 * RETURN: 0 on success, error code on failure.
2753 *
2754 * Timestamps:
2755 * sdzp,tdzp - ctime|mtime updated
2756 */
2757 int
zfs_rename(znode_t * sdzp,char * snm,znode_t * tdzp,char * tnm,cred_t * cr,int flags,uint64_t rflags,vattr_t * wo_vap,zidmap_t * mnt_ns)2758 zfs_rename(znode_t *sdzp, char *snm, znode_t *tdzp, char *tnm,
2759 cred_t *cr, int flags, uint64_t rflags, vattr_t *wo_vap, zidmap_t *mnt_ns)
2760 {
2761 znode_t *szp, *tzp;
2762 zfsvfs_t *zfsvfs = ZTOZSB(sdzp);
2763 zilog_t *zilog;
2764 zfs_dirlock_t *sdl, *tdl;
2765 dmu_tx_t *tx;
2766 zfs_zlock_t *zl;
2767 int cmp, serr, terr;
2768 int error = 0;
2769 int zflg = 0;
2770 boolean_t waited = B_FALSE;
2771 /* Needed for whiteout inode creation. */
2772 boolean_t fuid_dirtied;
2773 zfs_acl_ids_t acl_ids;
2774 boolean_t have_acl = B_FALSE;
2775 znode_t *wzp = NULL;
2776
2777
2778 if (snm == NULL || tnm == NULL)
2779 return (SET_ERROR(EINVAL));
2780
2781 if (rflags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
2782 return (SET_ERROR(EINVAL));
2783
2784 /* Already checked by Linux VFS, but just to make sure. */
2785 if (rflags & RENAME_EXCHANGE &&
2786 (rflags & (RENAME_NOREPLACE | RENAME_WHITEOUT)))
2787 return (SET_ERROR(EINVAL));
2788
2789 /*
2790 * Make sure we only get wo_vap iff. RENAME_WHITEOUT and that it's the
2791 * right kind of vattr_t for the whiteout file. These are set
2792 * internally by ZFS so should never be incorrect.
2793 */
2794 VERIFY_EQUIV(rflags & RENAME_WHITEOUT, wo_vap != NULL);
2795 VERIFY_IMPLY(wo_vap, wo_vap->va_mode == S_IFCHR);
2796 VERIFY_IMPLY(wo_vap, wo_vap->va_rdev == makedevice(0, 0));
2797
2798 if ((error = zfs_enter_verify_zp(zfsvfs, sdzp, FTAG)) != 0)
2799 return (error);
2800 zilog = zfsvfs->z_log;
2801
2802 if ((error = zfs_verify_zp(tdzp)) != 0) {
2803 zfs_exit(zfsvfs, FTAG);
2804 return (error);
2805 }
2806
2807 /*
2808 * We check i_sb because snapshots and the ctldir must have different
2809 * super blocks.
2810 */
2811 if (ZTOI(tdzp)->i_sb != ZTOI(sdzp)->i_sb ||
2812 zfsctl_is_node(ZTOI(tdzp))) {
2813 zfs_exit(zfsvfs, FTAG);
2814 return (SET_ERROR(EXDEV));
2815 }
2816
2817 if (zfsvfs->z_utf8 && u8_validate(tnm,
2818 strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
2819 zfs_exit(zfsvfs, FTAG);
2820 return (SET_ERROR(EILSEQ));
2821 }
2822
2823 if (flags & FIGNORECASE)
2824 zflg |= ZCILOOK;
2825
2826 top:
2827 szp = NULL;
2828 tzp = NULL;
2829 zl = NULL;
2830
2831 /*
2832 * This is to prevent the creation of links into attribute space
2833 * by renaming a linked file into/outof an attribute directory.
2834 * See the comment in zfs_link() for why this is considered bad.
2835 */
2836 if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
2837 zfs_exit(zfsvfs, FTAG);
2838 return (SET_ERROR(EINVAL));
2839 }
2840
2841 /*
2842 * Lock source and target directory entries. To prevent deadlock,
2843 * a lock ordering must be defined. We lock the directory with
2844 * the smallest object id first, or if it's a tie, the one with
2845 * the lexically first name.
2846 */
2847 if (sdzp->z_id < tdzp->z_id) {
2848 cmp = -1;
2849 } else if (sdzp->z_id > tdzp->z_id) {
2850 cmp = 1;
2851 } else {
2852 /*
2853 * First compare the two name arguments without
2854 * considering any case folding.
2855 */
2856 int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER);
2857
2858 cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
2859 ASSERT(error == 0 || !zfsvfs->z_utf8);
2860 if (cmp == 0) {
2861 /*
2862 * POSIX: "If the old argument and the new argument
2863 * both refer to links to the same existing file,
2864 * the rename() function shall return successfully
2865 * and perform no other action."
2866 */
2867 zfs_exit(zfsvfs, FTAG);
2868 return (0);
2869 }
2870 /*
2871 * If the file system is case-folding, then we may
2872 * have some more checking to do. A case-folding file
2873 * system is either supporting mixed case sensitivity
2874 * access or is completely case-insensitive. Note
2875 * that the file system is always case preserving.
2876 *
2877 * In mixed sensitivity mode case sensitive behavior
2878 * is the default. FIGNORECASE must be used to
2879 * explicitly request case insensitive behavior.
2880 *
2881 * If the source and target names provided differ only
2882 * by case (e.g., a request to rename 'tim' to 'Tim'),
2883 * we will treat this as a special case in the
2884 * case-insensitive mode: as long as the source name
2885 * is an exact match, we will allow this to proceed as
2886 * a name-change request.
2887 */
2888 if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
2889 (zfsvfs->z_case == ZFS_CASE_MIXED &&
2890 flags & FIGNORECASE)) &&
2891 u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST,
2892 &error) == 0) {
2893 /*
2894 * case preserving rename request, require exact
2895 * name matches
2896 */
2897 zflg |= ZCIEXACT;
2898 zflg &= ~ZCILOOK;
2899 }
2900 }
2901
2902 /*
2903 * If the source and destination directories are the same, we should
2904 * grab the z_name_lock of that directory only once.
2905 */
2906 if (sdzp == tdzp) {
2907 zflg |= ZHAVELOCK;
2908 rw_enter(&sdzp->z_name_lock, RW_READER);
2909 }
2910
2911 if (cmp < 0) {
2912 serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp,
2913 ZEXISTS | zflg, NULL, NULL);
2914 terr = zfs_dirent_lock(&tdl,
2915 tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL);
2916 } else {
2917 terr = zfs_dirent_lock(&tdl,
2918 tdzp, tnm, &tzp, zflg, NULL, NULL);
2919 serr = zfs_dirent_lock(&sdl,
2920 sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg,
2921 NULL, NULL);
2922 }
2923
2924 if (serr) {
2925 /*
2926 * Source entry invalid or not there.
2927 */
2928 if (!terr) {
2929 zfs_dirent_unlock(tdl);
2930 if (tzp)
2931 zrele(tzp);
2932 }
2933
2934 if (sdzp == tdzp)
2935 rw_exit(&sdzp->z_name_lock);
2936
2937 if (strcmp(snm, "..") == 0)
2938 serr = EINVAL;
2939 zfs_exit(zfsvfs, FTAG);
2940 return (serr);
2941 }
2942 if (terr) {
2943 zfs_dirent_unlock(sdl);
2944 zrele(szp);
2945
2946 if (sdzp == tdzp)
2947 rw_exit(&sdzp->z_name_lock);
2948
2949 if (strcmp(tnm, "..") == 0)
2950 terr = EINVAL;
2951 zfs_exit(zfsvfs, FTAG);
2952 return (terr);
2953 }
2954
2955 /*
2956 * If we are using project inheritance, means if the directory has
2957 * ZFS_PROJINHERIT set, then its descendant directories will inherit
2958 * not only the project ID, but also the ZFS_PROJINHERIT flag. Under
2959 * such case, we only allow renames into our tree when the project
2960 * IDs are the same.
2961 */
2962 if (tdzp->z_pflags & ZFS_PROJINHERIT &&
2963 tdzp->z_projid != szp->z_projid) {
2964 error = SET_ERROR(EXDEV);
2965 goto out;
2966 }
2967
2968 /*
2969 * Must have write access at the source to remove the old entry
2970 * and write access at the target to create the new entry.
2971 * Note that if target and source are the same, this can be
2972 * done in a single check.
2973 */
2974 if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr, mnt_ns)))
2975 goto out;
2976
2977 if (S_ISDIR(ZTOI(szp)->i_mode)) {
2978 /*
2979 * Check to make sure rename is valid.
2980 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
2981 */
2982 if ((error = zfs_rename_lock(szp, tdzp, sdzp, &zl)))
2983 goto out;
2984 }
2985
2986 /*
2987 * Does target exist?
2988 */
2989 if (tzp) {
2990 if (rflags & RENAME_NOREPLACE) {
2991 error = SET_ERROR(EEXIST);
2992 goto out;
2993 }
2994 /*
2995 * Source and target must be the same type (unless exchanging).
2996 */
2997 if (!(rflags & RENAME_EXCHANGE)) {
2998 boolean_t s_is_dir = S_ISDIR(ZTOI(szp)->i_mode) != 0;
2999 boolean_t t_is_dir = S_ISDIR(ZTOI(tzp)->i_mode) != 0;
3000
3001 if (s_is_dir != t_is_dir) {
3002 error = SET_ERROR(s_is_dir ? ENOTDIR : EISDIR);
3003 goto out;
3004 }
3005 }
3006 /*
3007 * POSIX dictates that when the source and target
3008 * entries refer to the same file object, rename
3009 * must do nothing and exit without error.
3010 */
3011 if (szp->z_id == tzp->z_id) {
3012 error = 0;
3013 goto out;
3014 }
3015 } else if (rflags & RENAME_EXCHANGE) {
3016 /* Target must exist for RENAME_EXCHANGE. */
3017 error = SET_ERROR(ENOENT);
3018 goto out;
3019 }
3020
3021 /* Set up inode creation for RENAME_WHITEOUT. */
3022 if (rflags & RENAME_WHITEOUT) {
3023 /*
3024 * Whiteout files are not regular files or directories, so to
3025 * match zfs_create() we do not inherit the project id.
3026 */
3027 uint64_t wo_projid = ZFS_DEFAULT_PROJID;
3028
3029 error = zfs_zaccess(sdzp, ACE_ADD_FILE, 0, B_FALSE, cr, mnt_ns);
3030 if (error)
3031 goto out;
3032
3033 if (!have_acl) {
3034 error = zfs_acl_ids_create(sdzp, 0, wo_vap, cr, NULL,
3035 &acl_ids, mnt_ns);
3036 if (error)
3037 goto out;
3038 have_acl = B_TRUE;
3039 }
3040
3041 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, wo_projid)) {
3042 error = SET_ERROR(EDQUOT);
3043 goto out;
3044 }
3045 }
3046
3047 tx = dmu_tx_create(zfsvfs->z_os);
3048 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3049 dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
3050 dmu_tx_hold_zap(tx, sdzp->z_id,
3051 (rflags & RENAME_EXCHANGE) ? TRUE : FALSE, snm);
3052 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
3053 if (sdzp != tdzp) {
3054 dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
3055 zfs_sa_upgrade_txholds(tx, tdzp);
3056 }
3057 if (tzp) {
3058 dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
3059 zfs_sa_upgrade_txholds(tx, tzp);
3060 }
3061 if (rflags & RENAME_WHITEOUT) {
3062 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
3063 ZFS_SA_BASE_ATTR_SIZE);
3064
3065 dmu_tx_hold_zap(tx, sdzp->z_id, TRUE, snm);
3066 dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
3067 if (!zfsvfs->z_use_sa &&
3068 acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3069 dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
3070 0, acl_ids.z_aclp->z_acl_bytes);
3071 }
3072 }
3073 fuid_dirtied = zfsvfs->z_fuid_dirty;
3074 if (fuid_dirtied)
3075 zfs_fuid_txhold(zfsvfs, tx);
3076 zfs_sa_upgrade_txholds(tx, szp);
3077 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
3078 error = dmu_tx_assign(tx,
3079 (waited ? DMU_TX_NOTHROTTLE : 0) | DMU_TX_NOWAIT);
3080 if (error) {
3081 if (zl != NULL)
3082 zfs_rename_unlock(&zl);
3083 zfs_dirent_unlock(sdl);
3084 zfs_dirent_unlock(tdl);
3085
3086 if (sdzp == tdzp)
3087 rw_exit(&sdzp->z_name_lock);
3088
3089 if (error == ERESTART) {
3090 waited = B_TRUE;
3091 dmu_tx_wait(tx);
3092 dmu_tx_abort(tx);
3093 zrele(szp);
3094 if (tzp)
3095 zrele(tzp);
3096 goto top;
3097 }
3098 dmu_tx_abort(tx);
3099 zrele(szp);
3100 if (tzp)
3101 zrele(tzp);
3102 zfs_exit(zfsvfs, FTAG);
3103 return (error);
3104 }
3105
3106 /*
3107 * Unlink the source.
3108 */
3109 szp->z_pflags |= ZFS_AV_MODIFIED;
3110 if (tdzp->z_pflags & ZFS_PROJINHERIT)
3111 szp->z_pflags |= ZFS_PROJINHERIT;
3112
3113 error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
3114 (void *)&szp->z_pflags, sizeof (uint64_t), tx);
3115 VERIFY0(error);
3116
3117 error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
3118 if (error)
3119 goto commit;
3120
3121 /*
3122 * Unlink the target.
3123 */
3124 if (tzp) {
3125 int tzflg = zflg;
3126
3127 if (rflags & RENAME_EXCHANGE) {
3128 /* This inode will be re-linked soon. */
3129 tzflg |= ZRENAMING;
3130
3131 tzp->z_pflags |= ZFS_AV_MODIFIED;
3132 if (sdzp->z_pflags & ZFS_PROJINHERIT)
3133 tzp->z_pflags |= ZFS_PROJINHERIT;
3134
3135 error = sa_update(tzp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
3136 (void *)&tzp->z_pflags, sizeof (uint64_t), tx);
3137 ASSERT0(error);
3138 }
3139 error = zfs_link_destroy(tdl, tzp, tx, tzflg, NULL);
3140 if (error)
3141 goto commit_link_szp;
3142 }
3143
3144 /*
3145 * Create the new target links:
3146 * * We always link the target.
3147 * * RENAME_EXCHANGE: Link the old target to the source.
3148 * * RENAME_WHITEOUT: Create a whiteout inode in-place of the source.
3149 */
3150 error = zfs_link_create(tdl, szp, tx, ZRENAMING);
3151 if (error) {
3152 /*
3153 * If we have removed the existing target, a subsequent call to
3154 * zfs_link_create() to add back the same entry, but with a new
3155 * dnode (szp), should not fail.
3156 */
3157 ASSERT0P(tzp);
3158 goto commit_link_tzp;
3159 }
3160
3161 switch (rflags & (RENAME_EXCHANGE | RENAME_WHITEOUT)) {
3162 case RENAME_EXCHANGE:
3163 error = zfs_link_create(sdl, tzp, tx, ZRENAMING);
3164 /*
3165 * The same argument as zfs_link_create() failing for
3166 * szp applies here, since the source directory must
3167 * have had an entry we are replacing.
3168 */
3169 ASSERT0(error);
3170 if (error)
3171 goto commit_unlink_td_szp;
3172 break;
3173 case RENAME_WHITEOUT:
3174 zfs_mknode(sdzp, wo_vap, tx, cr, 0, &wzp, &acl_ids);
3175 error = zfs_link_create(sdl, wzp, tx, ZNEW);
3176 if (error) {
3177 zfs_znode_delete(wzp, tx);
3178 remove_inode_hash(ZTOI(wzp));
3179 goto commit_unlink_td_szp;
3180 }
3181 break;
3182 }
3183
3184 if (fuid_dirtied)
3185 zfs_fuid_sync(zfsvfs, tx);
3186
3187 switch (rflags & (RENAME_EXCHANGE | RENAME_WHITEOUT)) {
3188 case RENAME_EXCHANGE:
3189 zfs_log_rename_exchange(zilog, tx,
3190 (flags & FIGNORECASE ? TX_CI : 0), sdzp, sdl->dl_name,
3191 tdzp, tdl->dl_name, szp);
3192 break;
3193 case RENAME_WHITEOUT:
3194 zfs_log_rename_whiteout(zilog, tx,
3195 (flags & FIGNORECASE ? TX_CI : 0), sdzp, sdl->dl_name,
3196 tdzp, tdl->dl_name, szp, wzp);
3197 break;
3198 default:
3199 ASSERT0(rflags & ~RENAME_NOREPLACE);
3200 zfs_log_rename(zilog, tx, (flags & FIGNORECASE ? TX_CI : 0),
3201 sdzp, sdl->dl_name, tdzp, tdl->dl_name, szp);
3202 break;
3203 }
3204
3205 commit:
3206 dmu_tx_commit(tx);
3207 out:
3208 if (have_acl)
3209 zfs_acl_ids_free(&acl_ids);
3210
3211 zfs_znode_update_vfs(sdzp);
3212 if (sdzp == tdzp)
3213 rw_exit(&sdzp->z_name_lock);
3214
3215 if (sdzp != tdzp)
3216 zfs_znode_update_vfs(tdzp);
3217
3218 zfs_znode_update_vfs(szp);
3219 zrele(szp);
3220 if (wzp) {
3221 zfs_znode_update_vfs(wzp);
3222 zrele(wzp);
3223 }
3224 if (tzp) {
3225 zfs_znode_update_vfs(tzp);
3226 zrele(tzp);
3227 }
3228
3229 if (zl != NULL)
3230 zfs_rename_unlock(&zl);
3231
3232 zfs_dirent_unlock(sdl);
3233 zfs_dirent_unlock(tdl);
3234
3235 if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3236 error = zil_commit(zilog, 0);
3237
3238 zfs_exit(zfsvfs, FTAG);
3239 return (error);
3240
3241 /*
3242 * Clean-up path for broken link state.
3243 *
3244 * At this point we are in a (very) bad state, so we need to do our
3245 * best to correct the state. In particular, all of the nlinks are
3246 * wrong because we were destroying and creating links with ZRENAMING.
3247 *
3248 * In some form, all of these operations have to resolve the state:
3249 *
3250 * * link_destroy() *must* succeed. Fortunately, this is very likely
3251 * since we only just created it.
3252 *
3253 * * link_create()s are allowed to fail (though they shouldn't because
3254 * we only just unlinked them and are putting the entries back
3255 * during clean-up). But if they fail, we can just forcefully drop
3256 * the nlink value to (at the very least) avoid broken nlink values
3257 * -- though in the case of non-empty directories we will have to
3258 * panic (otherwise we'd have a leaked directory with a broken ..).
3259 */
3260 commit_unlink_td_szp:
3261 VERIFY0(zfs_link_destroy(tdl, szp, tx, ZRENAMING, NULL));
3262 commit_link_tzp:
3263 if (tzp) {
3264 if (zfs_link_create(tdl, tzp, tx, ZRENAMING))
3265 VERIFY0(zfs_drop_nlink(tzp, tx, NULL));
3266 }
3267 commit_link_szp:
3268 if (zfs_link_create(sdl, szp, tx, ZRENAMING))
3269 VERIFY0(zfs_drop_nlink(szp, tx, NULL));
3270 goto commit;
3271 }
3272
3273 /*
3274 * Insert the indicated symbolic reference entry into the directory.
3275 *
3276 * IN: dzp - Directory to contain new symbolic link.
3277 * name - Name of directory entry in dip.
3278 * vap - Attributes of new entry.
3279 * link - Name for new symlink entry.
3280 * cr - credentials of caller.
3281 * flags - case flags
3282 * mnt_ns - user namespace of the mount
3283 *
3284 * OUT: zpp - Znode for new symbolic link.
3285 *
3286 * RETURN: 0 on success, error code on failure.
3287 *
3288 * Timestamps:
3289 * dip - ctime|mtime updated
3290 */
3291 int
zfs_symlink(znode_t * dzp,char * name,vattr_t * vap,char * link,znode_t ** zpp,cred_t * cr,int flags,zidmap_t * mnt_ns)3292 zfs_symlink(znode_t *dzp, char *name, vattr_t *vap, char *link,
3293 znode_t **zpp, cred_t *cr, int flags, zidmap_t *mnt_ns)
3294 {
3295 znode_t *zp;
3296 zfs_dirlock_t *dl;
3297 dmu_tx_t *tx;
3298 zfsvfs_t *zfsvfs = ZTOZSB(dzp);
3299 zilog_t *zilog;
3300 uint64_t len = strlen(link);
3301 int error;
3302 int zflg = ZNEW;
3303 zfs_acl_ids_t acl_ids;
3304 boolean_t fuid_dirtied;
3305 uint64_t txtype = TX_SYMLINK;
3306 boolean_t waited = B_FALSE;
3307
3308 ASSERT(S_ISLNK(vap->va_mode));
3309
3310 if (name == NULL)
3311 return (SET_ERROR(EINVAL));
3312
3313 if ((error = zfs_enter_verify_zp(zfsvfs, dzp, FTAG)) != 0)
3314 return (error);
3315 zilog = zfsvfs->z_log;
3316
3317 if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
3318 NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3319 zfs_exit(zfsvfs, FTAG);
3320 return (SET_ERROR(EILSEQ));
3321 }
3322 if (flags & FIGNORECASE)
3323 zflg |= ZCILOOK;
3324
3325 if (len > MAXPATHLEN) {
3326 zfs_exit(zfsvfs, FTAG);
3327 return (SET_ERROR(ENAMETOOLONG));
3328 }
3329
3330 if ((error = zfs_acl_ids_create(dzp, 0,
3331 vap, cr, NULL, &acl_ids, mnt_ns)) != 0) {
3332 zfs_exit(zfsvfs, FTAG);
3333 return (error);
3334 }
3335 top:
3336 *zpp = NULL;
3337
3338 /*
3339 * Attempt to lock directory; fail if entry already exists.
3340 */
3341 error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
3342 if (error) {
3343 zfs_acl_ids_free(&acl_ids);
3344 zfs_exit(zfsvfs, FTAG);
3345 return (error);
3346 }
3347
3348 if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr, mnt_ns))) {
3349 zfs_acl_ids_free(&acl_ids);
3350 zfs_dirent_unlock(dl);
3351 zfs_exit(zfsvfs, FTAG);
3352 return (error);
3353 }
3354
3355 if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, ZFS_DEFAULT_PROJID)) {
3356 zfs_acl_ids_free(&acl_ids);
3357 zfs_dirent_unlock(dl);
3358 zfs_exit(zfsvfs, FTAG);
3359 return (SET_ERROR(EDQUOT));
3360 }
3361 tx = dmu_tx_create(zfsvfs->z_os);
3362 fuid_dirtied = zfsvfs->z_fuid_dirty;
3363 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
3364 dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
3365 dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
3366 ZFS_SA_BASE_ATTR_SIZE + len);
3367 dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
3368 if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
3369 dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
3370 acl_ids.z_aclp->z_acl_bytes);
3371 }
3372 if (fuid_dirtied)
3373 zfs_fuid_txhold(zfsvfs, tx);
3374 error = dmu_tx_assign(tx,
3375 (waited ? DMU_TX_NOTHROTTLE : 0) | DMU_TX_NOWAIT);
3376 if (error) {
3377 zfs_dirent_unlock(dl);
3378 if (error == ERESTART) {
3379 waited = B_TRUE;
3380 dmu_tx_wait(tx);
3381 dmu_tx_abort(tx);
3382 goto top;
3383 }
3384 zfs_acl_ids_free(&acl_ids);
3385 dmu_tx_abort(tx);
3386 zfs_exit(zfsvfs, FTAG);
3387 return (error);
3388 }
3389
3390 /*
3391 * Create a new object for the symlink.
3392 * for version 4 ZPL datasets the symlink will be an SA attribute
3393 */
3394 zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
3395
3396 if (fuid_dirtied)
3397 zfs_fuid_sync(zfsvfs, tx);
3398
3399 mutex_enter(&zp->z_lock);
3400 if (zp->z_is_sa)
3401 error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
3402 link, len, tx);
3403 else
3404 zfs_sa_symlink(zp, link, len, tx);
3405 mutex_exit(&zp->z_lock);
3406
3407 zp->z_size = len;
3408 (void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
3409 &zp->z_size, sizeof (zp->z_size), tx);
3410 /*
3411 * Insert the new object into the directory.
3412 */
3413 error = zfs_link_create(dl, zp, tx, ZNEW);
3414 if (error != 0) {
3415 zfs_znode_delete(zp, tx);
3416 remove_inode_hash(ZTOI(zp));
3417 } else {
3418 if (flags & FIGNORECASE)
3419 txtype |= TX_CI;
3420 zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
3421
3422 zfs_znode_update_vfs(dzp);
3423 zfs_znode_update_vfs(zp);
3424 }
3425
3426 zfs_acl_ids_free(&acl_ids);
3427
3428 dmu_tx_commit(tx);
3429
3430 zfs_dirent_unlock(dl);
3431
3432 if (error == 0) {
3433 *zpp = zp;
3434
3435 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3436 error = zil_commit(zilog, 0);
3437 } else {
3438 zrele(zp);
3439 }
3440
3441 zfs_exit(zfsvfs, FTAG);
3442 return (error);
3443 }
3444
3445 /*
3446 * Return, in the buffer contained in the provided uio structure,
3447 * the symbolic path referred to by ip.
3448 *
3449 * IN: ip - inode of symbolic link
3450 * uio - structure to contain the link path.
3451 * cr - credentials of caller.
3452 *
3453 * RETURN: 0 if success
3454 * error code if failure
3455 *
3456 * Timestamps:
3457 * ip - atime updated
3458 */
3459 int
zfs_readlink(struct inode * ip,zfs_uio_t * uio,cred_t * cr)3460 zfs_readlink(struct inode *ip, zfs_uio_t *uio, cred_t *cr)
3461 {
3462 (void) cr;
3463 znode_t *zp = ITOZ(ip);
3464 zfsvfs_t *zfsvfs = ITOZSB(ip);
3465 int error;
3466
3467 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
3468 return (error);
3469
3470 mutex_enter(&zp->z_lock);
3471 if (zp->z_is_sa)
3472 error = sa_lookup_uio(zp->z_sa_hdl,
3473 SA_ZPL_SYMLINK(zfsvfs), uio);
3474 else
3475 error = zfs_sa_readlink(zp, uio);
3476 mutex_exit(&zp->z_lock);
3477
3478 zfs_exit(zfsvfs, FTAG);
3479 return (error);
3480 }
3481
3482 /*
3483 * Insert a new entry into directory tdzp referencing szp.
3484 *
3485 * IN: tdzp - Directory to contain new entry.
3486 * szp - znode of new entry.
3487 * name - name of new entry.
3488 * cr - credentials of caller.
3489 * flags - case flags.
3490 *
3491 * RETURN: 0 if success
3492 * error code if failure
3493 *
3494 * Timestamps:
3495 * tdzp - ctime|mtime updated
3496 * szp - ctime updated
3497 */
3498 int
zfs_link(znode_t * tdzp,znode_t * szp,char * name,cred_t * cr,int flags)3499 zfs_link(znode_t *tdzp, znode_t *szp, char *name, cred_t *cr,
3500 int flags)
3501 {
3502 struct inode *sip = ZTOI(szp);
3503 znode_t *tzp;
3504 zfsvfs_t *zfsvfs = ZTOZSB(tdzp);
3505 zilog_t *zilog;
3506 zfs_dirlock_t *dl;
3507 dmu_tx_t *tx;
3508 int error;
3509 int zf = ZNEW;
3510 uint64_t parent;
3511 uid_t owner;
3512 boolean_t waited = B_FALSE;
3513 boolean_t is_tmpfile = 0;
3514 uint64_t txg;
3515
3516 is_tmpfile = (sip->i_nlink == 0 && (sip->i_state & I_LINKABLE));
3517
3518 ASSERT(S_ISDIR(ZTOI(tdzp)->i_mode));
3519
3520 if (name == NULL)
3521 return (SET_ERROR(EINVAL));
3522
3523 if ((error = zfs_enter_verify_zp(zfsvfs, tdzp, FTAG)) != 0)
3524 return (error);
3525 zilog = zfsvfs->z_log;
3526
3527 /*
3528 * POSIX dictates that we return EPERM here.
3529 * Better choices include ENOTSUP or EISDIR.
3530 */
3531 if (S_ISDIR(sip->i_mode)) {
3532 zfs_exit(zfsvfs, FTAG);
3533 return (SET_ERROR(EPERM));
3534 }
3535
3536 if ((error = zfs_verify_zp(szp)) != 0) {
3537 zfs_exit(zfsvfs, FTAG);
3538 return (error);
3539 }
3540
3541 /*
3542 * If we are using project inheritance, means if the directory has
3543 * ZFS_PROJINHERIT set, then its descendant directories will inherit
3544 * not only the project ID, but also the ZFS_PROJINHERIT flag. Under
3545 * such case, we only allow hard link creation in our tree when the
3546 * project IDs are the same.
3547 */
3548 if (tdzp->z_pflags & ZFS_PROJINHERIT &&
3549 tdzp->z_projid != szp->z_projid) {
3550 zfs_exit(zfsvfs, FTAG);
3551 return (SET_ERROR(EXDEV));
3552 }
3553
3554 /*
3555 * We check i_sb because snapshots and the ctldir must have different
3556 * super blocks.
3557 */
3558 if (sip->i_sb != ZTOI(tdzp)->i_sb || zfsctl_is_node(sip)) {
3559 zfs_exit(zfsvfs, FTAG);
3560 return (SET_ERROR(EXDEV));
3561 }
3562
3563 /* Prevent links to .zfs/shares files */
3564
3565 if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
3566 &parent, sizeof (uint64_t))) != 0) {
3567 zfs_exit(zfsvfs, FTAG);
3568 return (error);
3569 }
3570 if (parent == zfsvfs->z_shares_dir) {
3571 zfs_exit(zfsvfs, FTAG);
3572 return (SET_ERROR(EPERM));
3573 }
3574
3575 if (zfsvfs->z_utf8 && u8_validate(name,
3576 strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
3577 zfs_exit(zfsvfs, FTAG);
3578 return (SET_ERROR(EILSEQ));
3579 }
3580 if (flags & FIGNORECASE)
3581 zf |= ZCILOOK;
3582
3583 /*
3584 * We do not support links between attributes and non-attributes
3585 * because of the potential security risk of creating links
3586 * into "normal" file space in order to circumvent restrictions
3587 * imposed in attribute space.
3588 */
3589 if ((szp->z_pflags & ZFS_XATTR) != (tdzp->z_pflags & ZFS_XATTR)) {
3590 zfs_exit(zfsvfs, FTAG);
3591 return (SET_ERROR(EINVAL));
3592 }
3593
3594 owner = zfs_fuid_map_id(zfsvfs, KUID_TO_SUID(sip->i_uid),
3595 cr, ZFS_OWNER);
3596 if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) {
3597 zfs_exit(zfsvfs, FTAG);
3598 return (SET_ERROR(EPERM));
3599 }
3600
3601 if ((error = zfs_zaccess(tdzp, ACE_ADD_FILE, 0, B_FALSE, cr,
3602 zfs_init_idmap))) {
3603 zfs_exit(zfsvfs, FTAG);
3604 return (error);
3605 }
3606
3607 top:
3608 /*
3609 * Attempt to lock directory; fail if entry already exists.
3610 */
3611 error = zfs_dirent_lock(&dl, tdzp, name, &tzp, zf, NULL, NULL);
3612 if (error) {
3613 zfs_exit(zfsvfs, FTAG);
3614 return (error);
3615 }
3616
3617 tx = dmu_tx_create(zfsvfs->z_os);
3618 dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
3619 dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, name);
3620 if (is_tmpfile)
3621 dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
3622
3623 zfs_sa_upgrade_txholds(tx, szp);
3624 zfs_sa_upgrade_txholds(tx, tdzp);
3625 error = dmu_tx_assign(tx,
3626 (waited ? DMU_TX_NOTHROTTLE : 0) | DMU_TX_NOWAIT);
3627 if (error) {
3628 zfs_dirent_unlock(dl);
3629 if (error == ERESTART) {
3630 waited = B_TRUE;
3631 dmu_tx_wait(tx);
3632 dmu_tx_abort(tx);
3633 goto top;
3634 }
3635 dmu_tx_abort(tx);
3636 zfs_exit(zfsvfs, FTAG);
3637 return (error);
3638 }
3639 /* unmark z_unlinked so zfs_link_create will not reject */
3640 if (is_tmpfile)
3641 szp->z_unlinked = B_FALSE;
3642 error = zfs_link_create(dl, szp, tx, 0);
3643
3644 if (error == 0) {
3645 uint64_t txtype = TX_LINK;
3646 /*
3647 * tmpfile is created to be in z_unlinkedobj, so remove it.
3648 * Also, we don't log in ZIL, because all previous file
3649 * operation on the tmpfile are ignored by ZIL. Instead we
3650 * always wait for txg to sync to make sure all previous
3651 * operation are sync safe.
3652 */
3653 if (is_tmpfile) {
3654 VERIFY0(zap_remove_int(zfsvfs->z_os,
3655 zfsvfs->z_unlinkedobj, szp->z_id, tx));
3656 } else {
3657 if (flags & FIGNORECASE)
3658 txtype |= TX_CI;
3659 zfs_log_link(zilog, tx, txtype, tdzp, szp, name);
3660 }
3661 } else if (is_tmpfile) {
3662 /* restore z_unlinked since when linking failed */
3663 szp->z_unlinked = B_TRUE;
3664 }
3665 txg = dmu_tx_get_txg(tx);
3666 dmu_tx_commit(tx);
3667
3668 zfs_dirent_unlock(dl);
3669
3670 if (error == 0) {
3671 if (!is_tmpfile && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
3672 error = zil_commit(zilog, 0);
3673
3674 if (is_tmpfile && zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
3675 txg_wait_flag_t wait_flags =
3676 spa_get_failmode(dmu_objset_spa(zfsvfs->z_os)) ==
3677 ZIO_FAILURE_MODE_CONTINUE ? TXG_WAIT_SUSPEND : 0;
3678 error = txg_wait_synced_flags(
3679 dmu_objset_pool(zfsvfs->z_os), txg, wait_flags);
3680 if (error != 0) {
3681 ASSERT3U(error, ==, ESHUTDOWN);
3682 error = SET_ERROR(EIO);
3683 }
3684 }
3685 }
3686
3687 zfs_znode_update_vfs(tdzp);
3688 zfs_znode_update_vfs(szp);
3689 zfs_exit(zfsvfs, FTAG);
3690 return (error);
3691 }
3692
3693 /* Finish page writeback. */
3694 static inline void
zfs_page_writeback_done(struct page * pp,int err)3695 zfs_page_writeback_done(struct page *pp, int err)
3696 {
3697 if (err != 0) {
3698 /*
3699 * Writeback failed. Re-dirty the page. It was undirtied before
3700 * the IO was issued (in zfs_putpage() or write_cache_pages()).
3701 * The kernel only considers writeback for dirty pages; if we
3702 * don't do this, it is eligible for eviction without being
3703 * written out, which we definitely don't want.
3704 */
3705 #ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO
3706 filemap_dirty_folio(page_mapping(pp), page_folio(pp));
3707 #else
3708 __set_page_dirty_nobuffers(pp);
3709 #endif
3710 }
3711
3712 ClearPageError(pp);
3713 end_page_writeback(pp);
3714 }
3715
3716 /*
3717 * ZIL callback for page writeback. Passes to zfs_log_write() in zfs_putpage()
3718 * for syncing writes. Called when the ZIL itx has been written to the log or
3719 * the whole txg syncs, or if the ZIL crashes or the pool suspends. Any failure
3720 * is passed as `err`.
3721 */
3722 static void
zfs_putpage_commit_cb(void * arg,int err)3723 zfs_putpage_commit_cb(void *arg, int err)
3724 {
3725 zfs_page_writeback_done(arg, err);
3726 }
3727
3728 /*
3729 * Push a page out to disk, once the page is on stable storage the
3730 * registered commit callback will be run as notification of completion.
3731 *
3732 * IN: ip - page mapped for inode.
3733 * pp - page to push (page is locked)
3734 * wbc - writeback control data
3735 * for_sync - does the caller intend to wait synchronously for the
3736 * page writeback to complete?
3737 *
3738 * RETURN: 0 if success
3739 * error code if failure
3740 *
3741 * Timestamps:
3742 * ip - ctime|mtime updated
3743 */
3744 int
zfs_putpage(struct inode * ip,struct page * pp,struct writeback_control * wbc,boolean_t for_sync)3745 zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc,
3746 boolean_t for_sync)
3747 {
3748 znode_t *zp = ITOZ(ip);
3749 zfsvfs_t *zfsvfs = ITOZSB(ip);
3750 loff_t offset;
3751 loff_t pgoff;
3752 unsigned int pglen;
3753 dmu_tx_t *tx;
3754 caddr_t va;
3755 int err = 0;
3756 uint64_t mtime[2], ctime[2];
3757 inode_timespec_t tmp_ts;
3758 sa_bulk_attr_t bulk[3];
3759 int cnt = 0;
3760 struct address_space *mapping;
3761
3762 if ((err = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
3763 return (err);
3764
3765 ASSERT(PageLocked(pp));
3766
3767 pgoff = page_offset(pp); /* Page byte-offset in file */
3768 offset = i_size_read(ip); /* File length in bytes */
3769 pglen = MIN(PAGE_SIZE, /* Page length in bytes */
3770 P2ROUNDUP(offset, PAGE_SIZE)-pgoff);
3771
3772 /* Page is beyond end of file */
3773 if (pgoff >= offset) {
3774 unlock_page(pp);
3775 zfs_exit(zfsvfs, FTAG);
3776 return (0);
3777 }
3778
3779 /* Truncate page length to end of file */
3780 if (pgoff + pglen > offset)
3781 pglen = offset - pgoff;
3782
3783 #if 0
3784 /*
3785 * FIXME: Allow mmap writes past its quota. The correct fix
3786 * is to register a page_mkwrite() handler to count the page
3787 * against its quota when it is about to be dirtied.
3788 */
3789 if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT,
3790 KUID_TO_SUID(ip->i_uid)) ||
3791 zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT,
3792 KGID_TO_SGID(ip->i_gid)) ||
3793 (zp->z_projid != ZFS_DEFAULT_PROJID &&
3794 zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
3795 zp->z_projid))) {
3796 err = EDQUOT;
3797 }
3798 #endif
3799
3800 /*
3801 * The ordering here is critical and must adhere to the following
3802 * rules in order to avoid deadlocking in either zfs_read() or
3803 * zfs_free_range() due to a lock inversion.
3804 *
3805 * 1) The page must be unlocked prior to acquiring the range lock.
3806 * This is critical because zfs_read() calls find_lock_page()
3807 * which may block on the page lock while holding the range lock.
3808 *
3809 * 2) Before setting or clearing write back on a page the range lock
3810 * must be held in order to prevent a lock inversion with the
3811 * zfs_free_range() function.
3812 *
3813 * This presents a problem because upon entering this function the
3814 * page lock is already held. To safely acquire the range lock the
3815 * page lock must be dropped. This creates a window where another
3816 * process could truncate, invalidate, dirty, or write out the page.
3817 *
3818 * Therefore, after successfully reacquiring the range and page locks
3819 * the current page state is checked. In the common case everything
3820 * will be as is expected and it can be written out. However, if
3821 * the page state has changed it must be handled accordingly.
3822 */
3823 mapping = pp->mapping;
3824 redirty_page_for_writepage(wbc, pp);
3825 unlock_page(pp);
3826
3827 zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
3828 pgoff, pglen, RL_WRITER);
3829 lock_page(pp);
3830
3831 /* Page mapping changed or it was no longer dirty, we're done */
3832 if (unlikely((mapping != pp->mapping) || !PageDirty(pp))) {
3833 unlock_page(pp);
3834 zfs_rangelock_exit(lr);
3835 zfs_exit(zfsvfs, FTAG);
3836 return (0);
3837 }
3838
3839 /* Another process started write block if required */
3840 if (PageWriteback(pp)) {
3841 unlock_page(pp);
3842 zfs_rangelock_exit(lr);
3843
3844 if (wbc->sync_mode != WB_SYNC_NONE) {
3845 if (PageWriteback(pp))
3846 #ifdef HAVE_PAGEMAP_FOLIO_WAIT_BIT
3847 folio_wait_bit(page_folio(pp), PG_writeback);
3848 #else
3849 wait_on_page_bit(pp, PG_writeback);
3850 #endif
3851 }
3852
3853 zfs_exit(zfsvfs, FTAG);
3854 return (0);
3855 }
3856
3857 /* Clear the dirty flag the required locks are held */
3858 if (!clear_page_dirty_for_io(pp)) {
3859 unlock_page(pp);
3860 zfs_rangelock_exit(lr);
3861 zfs_exit(zfsvfs, FTAG);
3862 return (0);
3863 }
3864
3865 /*
3866 * Counterpart for redirty_page_for_writepage() above. This page
3867 * was in fact not skipped and should not be counted as if it were.
3868 */
3869 wbc->pages_skipped--;
3870 set_page_writeback(pp);
3871 unlock_page(pp);
3872
3873 tx = dmu_tx_create(zfsvfs->z_os);
3874 dmu_tx_hold_write(tx, zp->z_id, pgoff, pglen);
3875 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
3876 zfs_sa_upgrade_txholds(tx, zp);
3877
3878 err = dmu_tx_assign(tx, DMU_TX_WAIT);
3879 if (err != 0) {
3880 dmu_tx_abort(tx);
3881 zfs_page_writeback_done(pp, err);
3882 zfs_rangelock_exit(lr);
3883 zfs_exit(zfsvfs, FTAG);
3884
3885 /*
3886 * Don't return error for an async writeback; we've re-dirtied
3887 * the page so it will be tried again some other time.
3888 */
3889 return (for_sync ? err : 0);
3890 }
3891
3892 va = kmap(pp);
3893 ASSERT3U(pglen, <=, PAGE_SIZE);
3894 dmu_write(zfsvfs->z_os, zp->z_id, pgoff, pglen, va, tx,
3895 DMU_READ_PREFETCH);
3896 kunmap(pp);
3897
3898 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
3899 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
3900 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(zfsvfs), NULL,
3901 &zp->z_pflags, 8);
3902
3903 /* Preserve the mtime and ctime provided by the inode */
3904 tmp_ts = zpl_inode_get_mtime(ip);
3905 ZFS_TIME_ENCODE(&tmp_ts, mtime);
3906 tmp_ts = zpl_inode_get_ctime(ip);
3907 ZFS_TIME_ENCODE(&tmp_ts, ctime);
3908 zp->z_atime_dirty = B_FALSE;
3909 zp->z_seq++;
3910
3911 err = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
3912
3913 /*
3914 * A note about for_sync vs wbc->sync_mode.
3915 *
3916 * for_sync indicates that this is a syncing writeback, that is, kernel
3917 * caller expects the data to be durably stored before being notified.
3918 * Often, but not always, the call was triggered by a userspace syncing
3919 * op (eg fsync(), msync(MS_SYNC)). For our purposes, for_sync==TRUE
3920 * means that that page should remain "locked" (in the writeback state)
3921 * until it is definitely on disk (ie zil_commit() or spa_sync()).
3922 * Otherwise, we can unlock and return as soon as it is on the
3923 * in-memory ZIL.
3924 *
3925 * wbc->sync_mode has similar meaning. wbc is passed from the kernel to
3926 * zpl_writepages()/zpl_writepage(); wbc->sync_mode==WB_SYNC_NONE
3927 * indicates this a regular async writeback (eg a cache eviction) and
3928 * so does not need a durability guarantee, while WB_SYNC_ALL indicates
3929 * a syncing op that must be waited on (by convention, we test for
3930 * !WB_SYNC_NONE rather than WB_SYNC_ALL, to prefer durability over
3931 * performance should there ever be a new mode that we have not yet
3932 * added support for).
3933 *
3934 * So, why a separate for_sync field? This is because zpl_writepages()
3935 * calls zfs_putpage() multiple times for a single "logical" operation.
3936 * It wants all the individual pages to be for_sync==TRUE ie only
3937 * unlocked once durably stored, but it only wants one call to
3938 * zil_commit() at the very end, once all the pages are synced. So,
3939 * it repurposes sync_mode slightly to indicate who issue and wait for
3940 * the IO: for NONE, the caller to zfs_putpage() will do it, while for
3941 * ALL, zfs_putpage should do it.
3942 *
3943 * Summary:
3944 * for_sync: 0=unlock immediately; 1=unlock once on disk
3945 * sync_mode: NONE=caller will commit; ALL=we will commit
3946 */
3947 boolean_t need_commit = (wbc->sync_mode != WB_SYNC_NONE);
3948
3949 /*
3950 * We use for_sync as the "commit" arg to zfs_log_write() (arg 7)
3951 * because it is a policy flag that indicates "someone will call
3952 * zil_commit() soon". for_sync=TRUE means exactly that; the only
3953 * question is whether it will be us, or zpl_writepages().
3954 */
3955 zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, pgoff, pglen, for_sync,
3956 B_FALSE, for_sync ? zfs_putpage_commit_cb : NULL, pp);
3957
3958 if (!for_sync) {
3959 /*
3960 * Async writeback is logged and written to the DMU, so page
3961 * can now be unlocked.
3962 */
3963 zfs_page_writeback_done(pp, 0);
3964 }
3965
3966 dmu_tx_commit(tx);
3967
3968 zfs_rangelock_exit(lr);
3969
3970 if (need_commit) {
3971 err = zil_commit_flags(zfsvfs->z_log, zp->z_id, ZIL_COMMIT_NOW);
3972 if (err != 0) {
3973 zfs_exit(zfsvfs, FTAG);
3974 return (err);
3975 }
3976 }
3977
3978 dataset_kstats_update_write_kstats(&zfsvfs->z_kstat, pglen);
3979
3980 zfs_exit(zfsvfs, FTAG);
3981 return (err);
3982 }
3983
3984 /*
3985 * Update the system attributes when the inode has been dirtied. For the
3986 * moment we only update the mode, atime, mtime, and ctime.
3987 */
3988 int
zfs_dirty_inode(struct inode * ip,int flags)3989 zfs_dirty_inode(struct inode *ip, int flags)
3990 {
3991 znode_t *zp = ITOZ(ip);
3992 zfsvfs_t *zfsvfs = ITOZSB(ip);
3993 dmu_tx_t *tx;
3994 uint64_t mode, atime[2], mtime[2], ctime[2];
3995 inode_timespec_t tmp_ts;
3996 sa_bulk_attr_t bulk[4];
3997 int error = 0;
3998 int cnt = 0;
3999
4000 if (zfs_is_readonly(zfsvfs) || dmu_objset_is_snapshot(zfsvfs->z_os))
4001 return (0);
4002
4003 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
4004 return (error);
4005
4006 #ifdef I_DIRTY_TIME
4007 /*
4008 * This is the lazytime semantic introduced in Linux 4.0
4009 * This flag will only be called from update_time when lazytime is set.
4010 * (Note, I_DIRTY_SYNC will also set if not lazytime)
4011 * Fortunately mtime and ctime are managed within ZFS itself, so we
4012 * only need to dirty atime.
4013 */
4014 if (flags == I_DIRTY_TIME) {
4015 zp->z_atime_dirty = B_TRUE;
4016 goto out;
4017 }
4018 #endif
4019
4020 tx = dmu_tx_create(zfsvfs->z_os);
4021
4022 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4023 zfs_sa_upgrade_txholds(tx, zp);
4024
4025 error = dmu_tx_assign(tx, DMU_TX_WAIT);
4026 if (error) {
4027 dmu_tx_abort(tx);
4028 goto out;
4029 }
4030
4031 mutex_enter(&zp->z_lock);
4032 zp->z_atime_dirty = B_FALSE;
4033
4034 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
4035 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
4036 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
4037 SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
4038
4039 /* Preserve the mode, mtime and ctime provided by the inode */
4040 tmp_ts = zpl_inode_get_atime(ip);
4041 ZFS_TIME_ENCODE(&tmp_ts, atime);
4042 tmp_ts = zpl_inode_get_mtime(ip);
4043 ZFS_TIME_ENCODE(&tmp_ts, mtime);
4044 tmp_ts = zpl_inode_get_ctime(ip);
4045 ZFS_TIME_ENCODE(&tmp_ts, ctime);
4046 mode = ip->i_mode;
4047
4048 zp->z_mode = mode;
4049
4050 error = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
4051 mutex_exit(&zp->z_lock);
4052
4053 dmu_tx_commit(tx);
4054 out:
4055 zfs_exit(zfsvfs, FTAG);
4056 return (error);
4057 }
4058
4059 void
zfs_inactive(struct inode * ip)4060 zfs_inactive(struct inode *ip)
4061 {
4062 znode_t *zp = ITOZ(ip);
4063 zfsvfs_t *zfsvfs = ITOZSB(ip);
4064 uint64_t atime[2];
4065 int error;
4066 int need_unlock = 0;
4067
4068 /* Only read lock if we haven't already write locked, e.g. rollback */
4069 if (!RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock)) {
4070 need_unlock = 1;
4071 rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
4072 }
4073 if (zp->z_sa_hdl == NULL) {
4074 if (need_unlock)
4075 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4076 return;
4077 }
4078
4079 if (zp->z_atime_dirty && zp->z_unlinked == B_FALSE) {
4080 dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
4081
4082 dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
4083 zfs_sa_upgrade_txholds(tx, zp);
4084 error = dmu_tx_assign(tx, DMU_TX_WAIT);
4085 if (error) {
4086 dmu_tx_abort(tx);
4087 } else {
4088 inode_timespec_t tmp_atime;
4089 tmp_atime = zpl_inode_get_atime(ip);
4090 ZFS_TIME_ENCODE(&tmp_atime, atime);
4091 mutex_enter(&zp->z_lock);
4092 (void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
4093 (void *)&atime, sizeof (atime), tx);
4094 zp->z_atime_dirty = B_FALSE;
4095 mutex_exit(&zp->z_lock);
4096 dmu_tx_commit(tx);
4097 }
4098 }
4099
4100 zfs_zinactive(zp);
4101 if (need_unlock)
4102 rw_exit(&zfsvfs->z_teardown_inactive_lock);
4103 }
4104
4105 /*
4106 * Fill pages with data from the disk.
4107 */
4108 static int
zfs_fillpage(struct inode * ip,struct page * pp)4109 zfs_fillpage(struct inode *ip, struct page *pp)
4110 {
4111 znode_t *zp = ITOZ(ip);
4112 zfsvfs_t *zfsvfs = ITOZSB(ip);
4113 loff_t i_size = i_size_read(ip);
4114 u_offset_t io_off = page_offset(pp);
4115 size_t io_len = PAGE_SIZE;
4116
4117 ASSERT3U(io_off, <, i_size);
4118
4119 if (io_off + io_len > i_size)
4120 io_len = i_size - io_off;
4121
4122 void *va = kmap(pp);
4123 int error = dmu_read(zfsvfs->z_os, zp->z_id, io_off,
4124 io_len, va, DMU_READ_PREFETCH);
4125 if (io_len != PAGE_SIZE)
4126 memset((char *)va + io_len, 0, PAGE_SIZE - io_len);
4127 kunmap(pp);
4128
4129 if (error) {
4130 /* convert checksum errors into IO errors */
4131 if (error == ECKSUM)
4132 error = SET_ERROR(EIO);
4133
4134 SetPageError(pp);
4135 ClearPageUptodate(pp);
4136 } else {
4137 ClearPageError(pp);
4138 SetPageUptodate(pp);
4139 }
4140
4141 return (error);
4142 }
4143
4144 /*
4145 * Uses zfs_fillpage to read data from the file and fill the page.
4146 *
4147 * IN: ip - inode of file to get data from.
4148 * pp - page to read
4149 *
4150 * RETURN: 0 on success, error code on failure.
4151 *
4152 * Timestamps:
4153 * vp - atime updated
4154 */
4155 int
zfs_getpage(struct inode * ip,struct page * pp)4156 zfs_getpage(struct inode *ip, struct page *pp)
4157 {
4158 zfsvfs_t *zfsvfs = ITOZSB(ip);
4159 znode_t *zp = ITOZ(ip);
4160 int error;
4161 loff_t i_size = i_size_read(ip);
4162 u_offset_t io_off = page_offset(pp);
4163 size_t io_len = PAGE_SIZE;
4164
4165 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
4166 return (error);
4167
4168 ASSERT3U(io_off, <, i_size);
4169
4170 if (io_off + io_len > i_size)
4171 io_len = i_size - io_off;
4172
4173 /*
4174 * It is important to hold the rangelock here because it is possible
4175 * a Direct I/O write or block clone might be taking place at the same
4176 * time that a page is being faulted in through filemap_fault(). With
4177 * Direct I/O writes and block cloning db->db_data will be set to NULL
4178 * with dbuf_clear_data() in dmu_buif_will_clone_or_dio(). If the
4179 * rangelock is not held, then there is a race between faulting in a
4180 * page and writing out a Direct I/O write or block cloning. Without
4181 * the rangelock a NULL pointer dereference can occur in
4182 * dmu_read_impl() for db->db_data during the mempcy operation when
4183 * zfs_fillpage() calls dmu_read().
4184 */
4185 zfs_locked_range_t *lr = zfs_rangelock_tryenter(&zp->z_rangelock,
4186 io_off, io_len, RL_READER);
4187 if (lr == NULL) {
4188 /*
4189 * It is important to drop the page lock before grabbing the
4190 * rangelock to avoid another deadlock between here and
4191 * zfs_write() -> update_pages(). update_pages() holds both the
4192 * rangelock and the page lock.
4193 */
4194 get_page(pp);
4195 unlock_page(pp);
4196 lr = zfs_rangelock_enter(&zp->z_rangelock, io_off,
4197 io_len, RL_READER);
4198 lock_page(pp);
4199 put_page(pp);
4200 }
4201 error = zfs_fillpage(ip, pp);
4202 zfs_rangelock_exit(lr);
4203
4204 if (error == 0)
4205 dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, PAGE_SIZE);
4206
4207 zfs_exit(zfsvfs, FTAG);
4208
4209 return (error);
4210 }
4211
4212 /*
4213 * Check ZFS specific permissions to memory map a section of a file.
4214 *
4215 * IN: ip - inode of the file to mmap
4216 * off - file offset
4217 * addrp - start address in memory region
4218 * len - length of memory region
4219 * vm_flags- address flags
4220 *
4221 * RETURN: 0 if success
4222 * error code if failure
4223 */
4224 int
zfs_map(struct inode * ip,offset_t off,caddr_t * addrp,size_t len,unsigned long vm_flags)4225 zfs_map(struct inode *ip, offset_t off, caddr_t *addrp, size_t len,
4226 unsigned long vm_flags)
4227 {
4228 (void) addrp;
4229 znode_t *zp = ITOZ(ip);
4230 zfsvfs_t *zfsvfs = ITOZSB(ip);
4231 int error;
4232
4233 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
4234 return (error);
4235
4236 if ((vm_flags & VM_WRITE) && (vm_flags & VM_SHARED) &&
4237 (zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
4238 zfs_exit(zfsvfs, FTAG);
4239 return (SET_ERROR(EPERM));
4240 }
4241
4242 if ((vm_flags & (VM_READ | VM_EXEC)) &&
4243 (zp->z_pflags & ZFS_AV_QUARANTINED)) {
4244 zfs_exit(zfsvfs, FTAG);
4245 return (SET_ERROR(EACCES));
4246 }
4247
4248 if (off < 0 || len > MAXOFFSET_T - off) {
4249 zfs_exit(zfsvfs, FTAG);
4250 return (SET_ERROR(ENXIO));
4251 }
4252
4253 zfs_exit(zfsvfs, FTAG);
4254 return (0);
4255 }
4256
4257 /*
4258 * Free or allocate space in a file. Currently, this function only
4259 * supports the `F_FREESP' command. However, this command is somewhat
4260 * misnamed, as its functionality includes the ability to allocate as
4261 * well as free space.
4262 *
4263 * IN: zp - znode of file to free data in.
4264 * cmd - action to take (only F_FREESP supported).
4265 * bfp - section of file to free/alloc.
4266 * flag - current file open mode flags.
4267 * offset - current file offset.
4268 * cr - credentials of caller.
4269 *
4270 * RETURN: 0 on success, error code on failure.
4271 *
4272 * Timestamps:
4273 * zp - ctime|mtime updated
4274 */
4275 int
zfs_space(znode_t * zp,int cmd,flock64_t * bfp,int flag,offset_t offset,cred_t * cr)4276 zfs_space(znode_t *zp, int cmd, flock64_t *bfp, int flag,
4277 offset_t offset, cred_t *cr)
4278 {
4279 (void) offset;
4280 zfsvfs_t *zfsvfs = ZTOZSB(zp);
4281 uint64_t off, len;
4282 int error;
4283
4284 if ((error = zfs_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
4285 return (error);
4286
4287 if (cmd != F_FREESP) {
4288 zfs_exit(zfsvfs, FTAG);
4289 return (SET_ERROR(EINVAL));
4290 }
4291
4292 /*
4293 * Callers might not be able to detect properly that we are read-only,
4294 * so check it explicitly here.
4295 */
4296 if (zfs_is_readonly(zfsvfs)) {
4297 zfs_exit(zfsvfs, FTAG);
4298 return (SET_ERROR(EROFS));
4299 }
4300
4301 if (bfp->l_len < 0) {
4302 zfs_exit(zfsvfs, FTAG);
4303 return (SET_ERROR(EINVAL));
4304 }
4305
4306 /*
4307 * Permissions aren't checked on Solaris because on this OS
4308 * zfs_space() can only be called with an opened file handle.
4309 * On Linux we can get here through truncate_range() which
4310 * operates directly on inodes, so we need to check access rights.
4311 */
4312 if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr,
4313 zfs_init_idmap))) {
4314 zfs_exit(zfsvfs, FTAG);
4315 return (error);
4316 }
4317
4318 off = bfp->l_start;
4319 len = bfp->l_len; /* 0 means from off to end of file */
4320
4321 error = zfs_freesp(zp, off, len, flag, TRUE);
4322
4323 zfs_exit(zfsvfs, FTAG);
4324 return (error);
4325 }
4326
4327 int
zfs_fid(struct inode * ip,fid_t * fidp)4328 zfs_fid(struct inode *ip, fid_t *fidp)
4329 {
4330 znode_t *zp = ITOZ(ip);
4331 zfsvfs_t *zfsvfs = ITOZSB(ip);
4332 uint32_t gen;
4333 uint64_t gen64;
4334 uint64_t object = zp->z_id;
4335 zfid_short_t *zfid;
4336 int size, i, error;
4337
4338 if ((error = zfs_enter(zfsvfs, FTAG)) != 0)
4339 return (error);
4340
4341 if (fidp->fid_len < SHORT_FID_LEN) {
4342 fidp->fid_len = SHORT_FID_LEN;
4343 zfs_exit(zfsvfs, FTAG);
4344 return (SET_ERROR(ENOSPC));
4345 }
4346
4347 if ((error = zfs_verify_zp(zp)) != 0) {
4348 zfs_exit(zfsvfs, FTAG);
4349 return (error);
4350 }
4351
4352 if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
4353 &gen64, sizeof (uint64_t))) != 0) {
4354 zfs_exit(zfsvfs, FTAG);
4355 return (error);
4356 }
4357
4358 gen = (uint32_t)gen64;
4359
4360 size = SHORT_FID_LEN;
4361
4362 zfid = (zfid_short_t *)fidp;
4363
4364 zfid->zf_len = size;
4365
4366 for (i = 0; i < sizeof (zfid->zf_object); i++)
4367 zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
4368
4369 /* Must have a non-zero generation number to distinguish from .zfs */
4370 if (gen == 0)
4371 gen = 1;
4372 for (i = 0; i < sizeof (zfid->zf_gen); i++)
4373 zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
4374
4375 zfs_exit(zfsvfs, FTAG);
4376 return (0);
4377 }
4378
4379 #if defined(_KERNEL)
4380 EXPORT_SYMBOL(zfs_open);
4381 EXPORT_SYMBOL(zfs_close);
4382 EXPORT_SYMBOL(zfs_lookup);
4383 EXPORT_SYMBOL(zfs_create);
4384 EXPORT_SYMBOL(zfs_tmpfile);
4385 EXPORT_SYMBOL(zfs_remove);
4386 EXPORT_SYMBOL(zfs_mkdir);
4387 EXPORT_SYMBOL(zfs_rmdir);
4388 EXPORT_SYMBOL(zfs_readdir);
4389 EXPORT_SYMBOL(zfs_getattr_fast);
4390 EXPORT_SYMBOL(zfs_setattr);
4391 EXPORT_SYMBOL(zfs_rename);
4392 EXPORT_SYMBOL(zfs_symlink);
4393 EXPORT_SYMBOL(zfs_readlink);
4394 EXPORT_SYMBOL(zfs_link);
4395 EXPORT_SYMBOL(zfs_inactive);
4396 EXPORT_SYMBOL(zfs_space);
4397 EXPORT_SYMBOL(zfs_fid);
4398 EXPORT_SYMBOL(zfs_getpage);
4399 EXPORT_SYMBOL(zfs_putpage);
4400 EXPORT_SYMBOL(zfs_dirty_inode);
4401 EXPORT_SYMBOL(zfs_map);
4402
4403 module_param(zfs_delete_blocks, ulong, 0644);
4404 MODULE_PARM_DESC(zfs_delete_blocks, "Delete files larger than N blocks async");
4405 #endif
4406