Lines Matching +full:conf +full:- +full:cmd +full:- +full:dat

1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
75 #include <sys/conf.h>
159 return (uio->uio_segflg == UIO_USERSPACE && vp->v_type == VREG && in do_vn_io_fault()
160 (mp = vp->v_mount) != NULL && in do_vn_io_fault()
161 (mp->mnt_kern_flag & MNTK_NO_IOPF) != 0 && vn_io_fault_enable); in do_vn_io_fault()
166 * file- or vnode-based I/O calls.
194 return (vn_open_cred(ndp, flagp, cmode, 0, td->td_ucred, fp)); in vn_open()
247 ndp->ni_cnd.cn_nameiop = CREATE; in vn_open_cred()
248 ndp->ni_cnd.cn_flags = open2nameif(fmode, vn_open_flags); in vn_open_cred()
256 ndp->ni_cnd.cn_flags |= LOCKPARENT | NOCACHE | NC_KEEPPOSENTRY; in vn_open_cred()
258 ndp->ni_cnd.cn_flags |= FOLLOW; in vn_open_cred()
263 if (ndp->ni_vp == NULL) { in vn_open_cred()
265 vap->va_type = VREG; in vn_open_cred()
266 vap->va_mode = cmode; in vn_open_cred()
268 vap->va_vaflags |= VA_EXCLUSIVE; in vn_open_cred()
269 if (vn_start_write(ndp->ni_dvp, &mp, V_NOWAIT) != 0) { in vn_open_cred()
271 vput(ndp->ni_dvp); in vn_open_cred()
279 ndp->ni_cnd.cn_flags |= MAKEENTRY; in vn_open_cred()
281 error = mac_vnode_check_create(cred, ndp->ni_dvp, in vn_open_cred()
282 &ndp->ni_cnd, vap); in vn_open_cred()
285 error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, in vn_open_cred()
286 &ndp->ni_cnd, vap); in vn_open_cred()
287 vp = ndp->ni_vp; in vn_open_cred()
291 vp->v_iflag |= VI_FOPENING; in vn_open_cred()
295 VOP_VPUT_PAIR(ndp->ni_dvp, error == 0 ? &vp : NULL, in vn_open_cred()
308 if (ndp->ni_dvp == ndp->ni_vp) in vn_open_cred()
309 vrele(ndp->ni_dvp); in vn_open_cred()
311 vput(ndp->ni_dvp); in vn_open_cred()
312 ndp->ni_dvp = NULL; in vn_open_cred()
313 vp = ndp->ni_vp; in vn_open_cred()
318 if (vp->v_type == VDIR) { in vn_open_cred()
325 ndp->ni_cnd.cn_nameiop = LOOKUP; in vn_open_cred()
326 ndp->ni_cnd.cn_flags = open2nameif(fmode, vn_open_flags); in vn_open_cred()
327 ndp->ni_cnd.cn_flags |= (fmode & O_NOFOLLOW) != 0 ? NOFOLLOW : in vn_open_cred()
330 ndp->ni_cnd.cn_flags |= LOCKSHARED; in vn_open_cred()
333 vp = ndp->ni_vp; in vn_open_cred()
338 vp->v_iflag &= ~VI_FOPENING; in vn_open_cred()
350 ndp->ni_vp = NULL; in vn_open_cred()
364 if (fp->f_type != DTYPE_NONE && fp->f_type != DTYPE_VNODE) in vn_open_vnode_advlock()
381 fp->f_flag |= FHASLOCK; in vn_open_vnode_advlock()
398 if (vp->v_type == VLNK) { in vn_open_vnode()
402 if (vp->v_type != VDIR && fmode & O_DIRECTORY) in vn_open_vnode()
407 if (vp->v_type == VSOCK) in vn_open_vnode()
410 if (vp->v_type == VDIR) in vn_open_vnode()
440 if (vp->v_type != VFIFO && vp->v_type != VSOCK && in vn_open_vnode()
442 fp->f_flag |= FKQALLOWED; in vn_open_vnode()
446 if (vp->v_type == VFIFO && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) in vn_open_vnode()
457 __func__, vp, vp->v_writecount); in vn_open_vnode()
473 fp->f_flag |= FOPENFAILED; in vn_open_vnode()
474 fp->f_vnode = vp; in vn_open_vnode()
475 if (fp->f_ops == &badfileops) { in vn_open_vnode()
476 fp->f_type = DTYPE_VNODE; in vn_open_vnode()
477 fp->f_ops = &vnops; in vn_open_vnode()
482 * If there is no fp, due to kernel-mode open, in vn_open_vnode()
485 if ((vp->v_type == VFIFO || in vn_open_vnode()
486 !MNT_EXTENDED_SHARED(vp->v_mount)) && in vn_open_vnode()
530 lock_flags = vp->v_type != VFIFO && MNT_EXTENDED_SHARED(vp->v_mount) ? in vn_close1()
537 VOP_ADD_WRITECOUNT_CHECKED(vp, -1); in vn_close1()
539 __func__, vp, vp->v_writecount); in vn_close1()
566 ASSERT_VOP_LOCKED(fp->f_vnode, __func__); in sequential_heuristic()
568 rw = uio->uio_rw; in sequential_heuristic()
569 if (fp->f_flag & FRDAHEAD) in sequential_heuristic()
570 return (fp->f_seqcount[rw] << IO_SEQSHIFT); in sequential_heuristic()
579 if ((uio->uio_offset == 0 && fp->f_seqcount[rw] > 0) || in sequential_heuristic()
580 uio->uio_offset == fp->f_nextoff[rw]) { in sequential_heuristic()
582 * f_seqcount is in units of fixed-size blocks so that it in sequential_heuristic()
585 * of 16384 is hard-coded here since it is (not quite) just in sequential_heuristic()
590 if (uio->uio_resid >= IO_SEQMAX * 16384) in sequential_heuristic()
591 fp->f_seqcount[rw] = IO_SEQMAX; in sequential_heuristic()
593 fp->f_seqcount[rw] += howmany(uio->uio_resid, 16384); in sequential_heuristic()
594 if (fp->f_seqcount[rw] > IO_SEQMAX) in sequential_heuristic()
595 fp->f_seqcount[rw] = IO_SEQMAX; in sequential_heuristic()
597 return (fp->f_seqcount[rw] << IO_SEQSHIFT); in sequential_heuristic()
600 /* Not sequential. Quickly draw-down sequentiality. */ in sequential_heuristic()
601 if (fp->f_seqcount[rw] > 1) in sequential_heuristic()
602 fp->f_seqcount[rw] = 1; in sequential_heuristic()
604 fp->f_seqcount[rw] = 0; in sequential_heuristic()
624 if (offset < 0 && vp->v_type != VCHR) in vn_rdwr()
652 if (vp->v_type != VCHR && in vn_rdwr()
732 chunk = MAXBSIZE - (uoff_t)offset % MAXBSIZE; in vn_rdwr_inchunks()
736 if (rw != UIO_READ && vp->v_type == VREG) in vn_rdwr_inchunks()
741 len -= chunk; /* aresid calc already includes length */ in vn_rdwr_inchunks()
764 return (atomic_load_long(&fp->f_offset)); in foffset_lock()
770 flagsp = &fp->f_vnread_flags; in foffset_lock()
772 return (atomic_load_long(&fp->f_offset)); in foffset_lock()
774 sleepq_lock(&fp->f_vnread_flags); in foffset_lock()
789 sleepq_add(&fp->f_vnread_flags, NULL, "vofflock", 0, 0); in foffset_lock()
790 sleepq_wait(&fp->f_vnread_flags, PUSER -1); in foffset_lock()
792 sleepq_lock(&fp->f_vnread_flags); in foffset_lock()
795 res = atomic_load_long(&fp->f_offset); in foffset_lock()
796 sleepq_release(&fp->f_vnread_flags); in foffset_lock()
809 atomic_store_long(&fp->f_offset, val); in foffset_unlock()
811 fp->f_nextoff[UIO_READ] = val; in foffset_unlock()
813 fp->f_nextoff[UIO_WRITE] = val; in foffset_unlock()
818 flagsp = &fp->f_vnread_flags; in foffset_unlock()
824 sleepq_lock(&fp->f_vnread_flags); in foffset_unlock()
825 MPASS((fp->f_vnread_flags & FOFFSET_LOCKED) != 0); in foffset_unlock()
826 MPASS((fp->f_vnread_flags & FOFFSET_LOCK_WAITING) != 0); in foffset_unlock()
827 fp->f_vnread_flags = 0; in foffset_unlock()
828 sleepq_broadcast(&fp->f_vnread_flags, SLEEPQ_SLEEP, 0, 0); in foffset_unlock()
829 sleepq_release(&fp->f_vnread_flags); in foffset_unlock()
836 return (atomic_load_long(&fp->f_offset)); in foffset_read()
850 while (fp->f_vnread_flags & FOFFSET_LOCKED) { in foffset_lock()
851 fp->f_vnread_flags |= FOFFSET_LOCK_WAITING; in foffset_lock()
852 msleep(&fp->f_vnread_flags, mtxp, PUSER -1, in foffset_lock()
855 fp->f_vnread_flags |= FOFFSET_LOCKED; in foffset_lock()
857 res = fp->f_offset; in foffset_lock()
872 fp->f_offset = val; in foffset_unlock()
874 fp->f_nextoff[UIO_READ] = val; in foffset_unlock()
876 fp->f_nextoff[UIO_WRITE] = val; in foffset_unlock()
878 KASSERT((fp->f_vnread_flags & FOFFSET_LOCKED) != 0, in foffset_unlock()
880 if (fp->f_vnread_flags & FOFFSET_LOCK_WAITING) in foffset_unlock()
881 wakeup(&fp->f_vnread_flags); in foffset_unlock()
882 fp->f_vnread_flags = 0; in foffset_unlock()
900 uio->uio_offset = foffset_lock(fp, flags); in foffset_lock_uio()
908 foffset_unlock(fp, uio->uio_offset, flags); in foffset_unlock_uio()
918 if (fp->f_advice == NULL || fp->f_vnode->v_type != VREG) in get_advice()
923 if (fp->f_advice != NULL && in get_advice()
924 uio->uio_offset >= fp->f_advice->fa_start && in get_advice()
925 uio->uio_offset + uio->uio_resid <= fp->f_advice->fa_end) in get_advice()
926 ret = fp->f_advice->fa_advice; in get_advice()
939 vp = fp->f_vnode; in get_write_ioflag()
940 mp = atomic_load_ptr(&vp->v_mount); in get_write_ioflag()
942 if ((fp->f_flag & O_DIRECT) != 0) in get_write_ioflag()
945 if ((fp->f_flag & O_FSYNC) != 0 || in get_write_ioflag()
946 (mp != NULL && (mp->mnt_flag & MNT_SYNCHRONOUS) != 0)) in get_write_ioflag()
954 if ((fp->f_flag & O_DSYNC) != 0) in get_write_ioflag()
969 MPASS(uio->uio_resid <= ptoa(io_hold_cnt + 2)); in vn_read_from_obj()
970 obj = atomic_load_ptr(&vp->v_object); in vn_read_from_obj()
978 if ((obj->flags & OBJ_DEAD) != 0) { in vn_read_from_obj()
993 resid = uio->uio_resid; in vn_read_from_obj()
994 off = uio->uio_offset; in vn_read_from_obj()
1012 resid -= PAGE_SIZE; in vn_read_from_obj()
1030 resid = PAGE_SIZE - (uio->uio_offset & PAGE_MASK) + ptoa(i - 1); in vn_read_from_obj()
1031 if (resid > uio->uio_resid) in vn_read_from_obj()
1032 resid = uio->uio_resid; in vn_read_from_obj()
1041 vsz = obj->un_pager.vnp.vnp_size; in vn_read_from_obj()
1043 vsz = atomic_load_64(&obj->un_pager.vnp.vnp_size); in vn_read_from_obj()
1045 if (uio->uio_offset >= vsz) { in vn_read_from_obj()
1049 if (uio->uio_offset + resid > vsz) in vn_read_from_obj()
1050 resid = vsz - uio->uio_offset; in vn_read_from_obj()
1052 error = vn_io_fault_pgmove(ma, uio->uio_offset & PAGE_MASK, resid, uio); in vn_read_from_obj()
1064 return (uio->uio_resid == 0 ? 0 : EJUSTRETURN); in vn_read_from_obj()
1079 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", in vn_read()
1080 uio->uio_td, td)); in vn_read()
1082 vp = fp->f_vnode; in vn_read()
1084 if (fp->f_flag & FNONBLOCK) in vn_read()
1086 if (fp->f_flag & O_DIRECT) in vn_read()
1095 error = VOP_READ_PGCACHE(vp, uio, ioflag, fp->f_cred); in vn_read()
1097 fp->f_nextoff[UIO_READ] = uio->uio_offset; in vn_read()
1114 /* Disable read-ahead for random I/O. */ in vn_read()
1117 orig_offset = uio->uio_offset; in vn_read()
1120 error = mac_vnode_check_read(active_cred, fp->f_cred, vp); in vn_read()
1123 error = VOP_READ(vp, uio, ioflag, fp->f_cred); in vn_read()
1124 fp->f_nextoff[UIO_READ] = uio->uio_offset; in vn_read()
1127 orig_offset != uio->uio_offset) in vn_read()
1133 error = VOP_ADVISE(vp, orig_offset, uio->uio_offset - 1, in vn_read()
1152 KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", in vn_write()
1153 uio->uio_td, td)); in vn_write()
1155 vp = fp->f_vnode; in vn_write()
1156 if (vp->v_type == VREG) in vn_write()
1159 if (vp->v_type == VREG && (fp->f_flag & O_APPEND) != 0) in vn_write()
1161 if ((fp->f_flag & FNONBLOCK) != 0) in vn_write()
1167 if (vp->v_type != VCHR) { in vn_write()
1187 orig_offset = uio->uio_offset; in vn_write()
1190 error = mac_vnode_check_write(active_cred, fp->f_cred, vp); in vn_write()
1193 error = VOP_WRITE(vp, uio, ioflag, fp->f_cred); in vn_write()
1194 fp->f_nextoff[UIO_WRITE] = uio->uio_offset; in vn_write()
1199 orig_offset != uio->uio_offset) in vn_write()
1205 error = VOP_ADVISE(vp, orig_offset, uio->uio_offset - 1, in vn_write()
1218 * vn_read() -> VOP_READ(vp1) -> uiomove() -> [Page Fault] ->
1219 * vm_fault(buf1) -> vnode_pager_getpages(vp2) -> VOP_GETPAGES(vp2)
1220 * which establishes lock order vp1->vn_lock, then vp2->vn_lock.
1223 * resident, we get a reversed order vp2->vn_lock, then vp1->vn_lock.
1235 * array of the held pages from uio, saved in the curthread->td_ma,
1256 switch (args->kind) { in vn_io_fault_doio()
1258 error = (args->args.fop_args.doio)(args->args.fop_args.fp, in vn_io_fault_doio()
1259 uio, args->cred, args->flags, td); in vn_io_fault_doio()
1262 switch (uio->uio_rw) { in vn_io_fault_doio()
1264 error = VOP_READ(args->args.vop_args.vp, uio, in vn_io_fault_doio()
1265 args->flags, args->cred); in vn_io_fault_doio()
1268 error = VOP_WRITE(args->args.vop_args.vp, uio, in vn_io_fault_doio()
1269 args->flags, args->cred); in vn_io_fault_doio()
1275 args->kind, uio->uio_rw); in vn_io_fault_doio()
1287 if (r == -1 || (uio->uio_rw == UIO_READ && subyte(base, r) == -1)) in vn_io_fault_touch()
1301 KASSERT(uio->uio_segflg == UIO_USERSPACE, in vn_io_fault_prefault_user()
1305 iov = uio->uio_iov; in vn_io_fault_prefault_user()
1306 resid = uio->uio_resid; in vn_io_fault_prefault_user()
1307 base = iov->iov_base; in vn_io_fault_prefault_user()
1308 len = iov->iov_len; in vn_io_fault_prefault_user()
1315 error = vn_io_fault_touch(base + len - 1, uio); in vn_io_fault_prefault_user()
1318 resid -= len; in vn_io_fault_prefault_user()
1320 if (++i >= uio->uio_iovcnt) in vn_io_fault_prefault_user()
1322 iov = uio->uio_iov + i; in vn_io_fault_prefault_user()
1323 base = iov->iov_base; in vn_io_fault_prefault_user()
1324 len = iov->iov_len; in vn_io_fault_prefault_user()
1326 len -= PAGE_SIZE; in vn_io_fault_prefault_user()
1328 resid -= PAGE_SIZE; in vn_io_fault_prefault_user()
1361 prot = uio->uio_rw == UIO_READ ? VM_PROT_WRITE : VM_PROT_READ; in vn_io_fault1()
1373 resid = uio->uio_resid; in vn_io_fault1()
1376 short_uio.uio_rw = uio->uio_rw; in vn_io_fault1()
1377 short_uio.uio_td = uio->uio_td; in vn_io_fault1()
1384 uio_clone->uio_segflg = UIO_NOCOPY; in vn_io_fault1()
1385 uiomove(NULL, resid - uio->uio_resid, uio_clone); in vn_io_fault1()
1386 uio_clone->uio_segflg = uio->uio_segflg; in vn_io_fault1()
1389 prev_td_ma = td->td_ma; in vn_io_fault1()
1390 prev_td_ma_cnt = td->td_ma_cnt; in vn_io_fault1()
1392 while (uio_clone->uio_resid != 0) { in vn_io_fault1()
1393 len = uio_clone->uio_iov->iov_len; in vn_io_fault1()
1395 KASSERT(uio_clone->uio_iovcnt >= 1, in vn_io_fault1()
1397 uio_clone->uio_iov++; in vn_io_fault1()
1398 uio_clone->uio_iovcnt--; in vn_io_fault1()
1403 addr = (uintptr_t)uio_clone->uio_iov->iov_base; in vn_io_fault1()
1414 cnt = vm_fault_quick_hold_pages(&td->td_proc->p_vmspace->vm_map, in vn_io_fault1()
1416 if (cnt == -1) { in vn_io_fault1()
1424 short_uio.uio_offset = uio_clone->uio_offset; in vn_io_fault1()
1425 td->td_ma = ma; in vn_io_fault1()
1426 td->td_ma_cnt = cnt; in vn_io_fault1()
1430 adv = len - short_uio.uio_resid; in vn_io_fault1()
1432 uio_clone->uio_iov->iov_base = in vn_io_fault1()
1433 (char *)uio_clone->uio_iov->iov_base + adv; in vn_io_fault1()
1434 uio_clone->uio_iov->iov_len -= adv; in vn_io_fault1()
1435 uio_clone->uio_resid -= adv; in vn_io_fault1()
1436 uio_clone->uio_offset += adv; in vn_io_fault1()
1438 uio->uio_resid -= adv; in vn_io_fault1()
1439 uio->uio_offset += adv; in vn_io_fault1()
1444 td->td_ma = prev_td_ma; in vn_io_fault1()
1445 td->td_ma_cnt = prev_td_ma_cnt; in vn_io_fault1()
1463 doio = uio->uio_rw == UIO_READ ? vn_read : vn_write; in vn_io_fault()
1464 vp = fp->f_vnode; in vn_io_fault()
1474 if (vp->v_type == VDIR) { in vn_io_fault()
1475 KASSERT(uio->uio_rw == UIO_READ, in vn_io_fault()
1487 if (uio->uio_rw == UIO_READ) { in vn_io_fault()
1488 rl_cookie = vn_rangelock_rlock(vp, uio->uio_offset, in vn_io_fault()
1489 uio->uio_offset + uio->uio_resid); in vn_io_fault()
1490 } else if ((fp->f_flag & O_APPEND) != 0 || in vn_io_fault()
1495 rl_cookie = vn_rangelock_wlock(vp, uio->uio_offset, in vn_io_fault()
1496 uio->uio_offset + uio->uio_resid); in vn_io_fault()
1517 * the held pages for io->uio_iov[0].iov_base buffer instead of
1537 if ((td->td_pflags & TDP_UIOHELD) == 0 || in vn_io_fault_uiomove()
1538 uio->uio_segflg != UIO_USERSPACE) in vn_io_fault_uiomove()
1541 KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt)); in vn_io_fault_uiomove()
1545 if (xfersize > uio->uio_resid) in vn_io_fault_uiomove()
1546 xfersize = uio->uio_resid; in vn_io_fault_uiomove()
1552 * corresponds to original uio->uio_iov, we need to invert the in vn_io_fault_uiomove()
1556 switch (uio->uio_rw) { in vn_io_fault_uiomove()
1564 transp_uio.uio_td = uio->uio_td; in vn_io_fault_uiomove()
1565 error = uiomove_fromphys(td->td_ma, in vn_io_fault_uiomove()
1566 ((vm_offset_t)uio->uio_iov->iov_base) & PAGE_MASK, in vn_io_fault_uiomove()
1568 adv = xfersize - transp_uio.uio_resid; in vn_io_fault_uiomove()
1570 (((vm_offset_t)uio->uio_iov->iov_base + adv) >> PAGE_SHIFT) - in vn_io_fault_uiomove()
1571 (((vm_offset_t)uio->uio_iov->iov_base) >> PAGE_SHIFT); in vn_io_fault_uiomove()
1572 td->td_ma += pgadv; in vn_io_fault_uiomove()
1573 KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt, in vn_io_fault_uiomove()
1575 td->td_ma_cnt -= pgadv; in vn_io_fault_uiomove()
1576 uio->uio_iov->iov_base = (char *)uio->uio_iov->iov_base + adv; in vn_io_fault_uiomove()
1577 uio->uio_iov->iov_len -= adv; in vn_io_fault_uiomove()
1578 uio->uio_resid -= adv; in vn_io_fault_uiomove()
1579 uio->uio_offset += adv; in vn_io_fault_uiomove()
1592 if ((td->td_pflags & TDP_UIOHELD) == 0 || in vn_io_fault_pgmove()
1593 uio->uio_segflg != UIO_USERSPACE) in vn_io_fault_pgmove()
1596 KASSERT(uio->uio_iovcnt == 1, ("uio_iovcnt %d", uio->uio_iovcnt)); in vn_io_fault_pgmove()
1597 cnt = xfersize > uio->uio_resid ? uio->uio_resid : xfersize; in vn_io_fault_pgmove()
1598 iov_base = (vm_offset_t)uio->uio_iov->iov_base; in vn_io_fault_pgmove()
1599 switch (uio->uio_rw) { in vn_io_fault_pgmove()
1601 pmap_copy_pages(td->td_ma, iov_base & PAGE_MASK, ma, in vn_io_fault_pgmove()
1605 pmap_copy_pages(ma, offset, td->td_ma, iov_base & PAGE_MASK, in vn_io_fault_pgmove()
1609 pgadv = ((iov_base + cnt) >> PAGE_SHIFT) - (iov_base >> PAGE_SHIFT); in vn_io_fault_pgmove()
1610 td->td_ma += pgadv; in vn_io_fault_pgmove()
1611 KASSERT(td->td_ma_cnt >= pgadv, ("consumed pages %d %d", td->td_ma_cnt, in vn_io_fault_pgmove()
1613 td->td_ma_cnt -= pgadv; in vn_io_fault_pgmove()
1614 uio->uio_iov->iov_base = (char *)(iov_base + cnt); in vn_io_fault_pgmove()
1615 uio->uio_iov->iov_len -= cnt; in vn_io_fault_pgmove()
1616 uio->uio_resid -= cnt; in vn_io_fault_pgmove()
1617 uio->uio_offset += cnt; in vn_io_fault_pgmove()
1633 vp = fp->f_vnode; in vn_truncate()
1646 if (vp->v_type == VDIR) { in vn_truncate()
1651 error = mac_vnode_check_write(active_cred, fp->f_cred, vp); in vn_truncate()
1655 error = vn_truncate_locked(vp, length, (fp->f_flag & O_FSYNC) != 0, in vn_truncate()
1656 fp->f_cred); in vn_truncate()
1684 VOP_ADD_WRITECOUNT_CHECKED(vp, -1); in vn_truncate_locked()
1695 struct vnode *vp = fp->f_vnode; in vn_statfile()
1699 error = VOP_STAT(vp, sb, active_cred, fp->f_cred); in vn_statfile()
1717 vp = fp->f_vnode; in vn_ioctl()
1718 switch (vp->v_type) { in vn_ioctl()
1725 *(int *)data = size - fp->f_offset; in vn_ioctl()
1731 error = mac_vnode_check_read(active_cred, fp->f_cred, in vn_ioctl()
1735 error = VOP_BMAP(vp, bmarg->bn, NULL, in vn_ioctl()
1736 &bmarg->bn, &bmarg->runp, &bmarg->runb); in vn_ioctl()
1743 return (VOP_IOCTL(vp, com, data, fp->f_flag, in vn_ioctl()
1748 return (VOP_IOCTL(vp, com, data, fp->f_flag, in vn_ioctl()
1765 vp = fp->f_vnode; in vn_poll()
1770 error = mac_vnode_check_poll(active_cred, fp->f_cred, vp); in vn_poll()
1776 error = VOP_POLL(vp, events, fp->f_cred, td); in vn_poll()
1828 VNPASS(vp->v_holdcnt > 0, vp); in _vn_lock()
1846 vp = fp->f_vnode; in vn_closefile()
1847 fp->f_ops = &badfileops; in vn_closefile()
1848 ref = (fp->f_flag & FHASLOCK) != 0; in vn_closefile()
1850 error = vn_close1(vp, fp->f_flag, fp->f_cred, td, ref); in vn_closefile()
1877 MPASS((mp->mnt_kern_flag & MNTK_SUSPEND) == 0); in vn_start_write_refed()
1893 if ((curthread->td_pflags & TDP_IGNSUSP) == 0 || in vn_start_write_refed()
1894 mp->mnt_susp_owner != curthread) { in vn_start_write_refed()
1896 if ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0) { in vn_start_write_refed()
1900 mflags |= (PUSER - 1); in vn_start_write_refed()
1901 while ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { in vn_start_write_refed()
1906 error = msleep(&mp->mnt_flag, MNT_MTX(mp), mflags, in vn_start_write_refed()
1914 mp->mnt_writeopcount++; in vn_start_write_refed()
2005 if ((mp->mnt_kern_flag & (MNTK_SUSPENDED | MNTK_SUSPEND2)) == 0) { in vn_start_secondary_write()
2006 mp->mnt_secondary_writes++; in vn_start_secondary_write()
2007 mp->mnt_secondary_accwrites++; in vn_start_secondary_write()
2021 if ((mp->mnt_vfc->vfc_flags & VFCF_SBDRY) != 0) { in vn_start_secondary_write()
2025 mflags |= (PUSER - 1) | PDROP; in vn_start_secondary_write()
2026 error = msleep(&mp->mnt_flag, MNT_MTX(mp), mflags, "suspfs", 0); in vn_start_secondary_write()
2058 c = --mp->mnt_writeopcount; in vn_finished_write()
2059 if (mp->mnt_vfs_ops == 0) { in vn_finished_write()
2060 MPASS((mp->mnt_kern_flag & MNTK_SUSPEND) == 0); in vn_finished_write()
2066 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && c == 0) in vn_finished_write()
2067 wakeup(&mp->mnt_writeopcount); in vn_finished_write()
2083 mp->mnt_secondary_writes--; in vn_finished_secondary_write()
2084 if (mp->mnt_secondary_writes < 0) in vn_finished_secondary_write()
2086 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && in vn_finished_secondary_write()
2087 mp->mnt_secondary_writes <= 0) in vn_finished_secondary_write()
2088 wakeup(&mp->mnt_secondary_writes); in vn_finished_secondary_write()
2104 if (mp->mnt_susp_owner == curthread) { in vfs_write_suspend()
2109 while (mp->mnt_kern_flag & MNTK_SUSPEND) in vfs_write_suspend()
2110 msleep(&mp->mnt_flag, MNT_MTX(mp), PUSER - 1, "wsuspfs", 0); in vfs_write_suspend()
2121 (mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) { in vfs_write_suspend()
2127 mp->mnt_kern_flag |= MNTK_SUSPEND; in vfs_write_suspend()
2128 mp->mnt_susp_owner = curthread; in vfs_write_suspend()
2129 if (mp->mnt_writeopcount > 0) in vfs_write_suspend()
2130 (void) msleep(&mp->mnt_writeopcount, in vfs_write_suspend()
2131 MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0); in vfs_write_suspend()
2149 if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0) { in vfs_write_resume()
2150 KASSERT(mp->mnt_susp_owner == curthread, ("mnt_susp_owner")); in vfs_write_resume()
2151 mp->mnt_kern_flag &= ~(MNTK_SUSPEND | MNTK_SUSPEND2 | in vfs_write_resume()
2153 mp->mnt_susp_owner = NULL; in vfs_write_resume()
2154 wakeup(&mp->mnt_writeopcount); in vfs_write_resume()
2155 wakeup(&mp->mnt_flag); in vfs_write_resume()
2156 curthread->td_pflags &= ~TDP_IGNSUSP; in vfs_write_resume()
2159 mp->mnt_writeopcount++; in vfs_write_resume()
2182 KASSERT((curthread->td_pflags & TDP_IGNSUSP) == 0, in vfs_write_suspend_umnt()
2194 if ((mp->mnt_kern_flag & MNTK_SUSPENDED) != 0) in vfs_write_suspend_umnt()
2199 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2); in vfs_write_suspend_umnt()
2200 wakeup(&mp->mnt_flag); in vfs_write_suspend_umnt()
2202 curthread->td_pflags |= TDP_IGNSUSP; in vfs_write_suspend_umnt()
2213 return (VOP_KQFILTER(fp->f_vnode, kn)); in vn_kqfilter()
2219 if ((fp->f_flag & FKQALLOWED) == 0) in vn_kqfilter_opath()
2225 * Simplified in-kernel wrapper calls for extended attribute access.
2261 *buflen = *buflen - auio.uio_resid; in vn_extattr_get()
2362 mp = vp->v_mount; in vn_vget_ino_gen()
2410 vn_send_sigxfsz(td->td_proc); in vn_rlimit_trunc()
2421 if (vp->v_type != VREG) in vn_rlimit_fsizex1()
2427 if (maxfsz != 0 && uio->uio_offset + uio->uio_resid > maxfsz) { in vn_rlimit_fsizex1()
2428 if (!adj || uio->uio_offset >= maxfsz) in vn_rlimit_fsizex1()
2430 uio->uio_resid = maxfsz - uio->uio_offset; in vn_rlimit_fsizex1()
2437 if (td == NULL || (td->td_pflags2 & TDP2_ACCT) != 0) in vn_rlimit_fsizex1()
2443 ktr_write = (td->td_pflags & TDP_INKTRACE) != 0; in vn_rlimit_fsizex1()
2444 lim = __predict_false(ktr_write) ? td->td_ktr_io_lim : in vn_rlimit_fsizex1()
2450 if (__predict_true((uoff_t)uio->uio_offset + uio->uio_resid <= lim)) in vn_rlimit_fsizex1()
2457 if (adj && (uoff_t)uio->uio_offset < lim) { in vn_rlimit_fsizex1()
2458 uio->uio_resid = lim - (uoff_t)uio->uio_offset; in vn_rlimit_fsizex1()
2463 vn_send_sigxfsz(td->td_proc); in vn_rlimit_fsizex1()
2474 * - return EFBIG if uio_offset is beyond it
2475 * - otherwise, clamp uio_resid if write would extend file beyond maxfsz.
2478 * - return EFBIG and send SIGXFSZ if uio_offset is beyond the limit
2479 * - otherwise, clamp uio_resid if write would extend file beyond limit.
2482 * *resid_adj, to be re-applied by vn_rlimit_fsizex_res() on return
2493 resid_orig = uio->uio_resid; in vn_rlimit_fsizex()
2497 *resid_adj = resid_orig - uio->uio_resid; in vn_rlimit_fsizex()
2504 uio->uio_resid += resid_adj; in vn_rlimit_fsizex_res()
2521 vp = fp->f_vnode; in vn_chmod()
2536 vp = fp->f_vnode; in vn_chown()
2554 if ((object = vp->v_object) == NULL) in vn_pages_remove()
2572 if ((object = vp->v_object) == NULL) in vn_pages_remove_valid()
2580 vn_bmap_seekhole_locked(struct vnode *vp, u_long cmd, off_t *off, in vn_bmap_seekhole_locked() argument
2589 KASSERT(cmd == FIOSEEKHOLE || cmd == FIOSEEKDATA, in vn_bmap_seekhole_locked()
2590 ("%s: Wrong command %lu", __func__, cmd)); in vn_bmap_seekhole_locked()
2593 if (vp->v_type != VREG) { in vn_bmap_seekhole_locked()
2609 bsize = vp->v_mount->mnt_stat.f_iosize; in vn_bmap_seekhole_locked()
2610 for (bn = noff / bsize; noff < size; bn++, noff += bsize - in vn_bmap_seekhole_locked()
2617 if ((bnp == -1 && cmd == FIOSEEKHOLE) || in vn_bmap_seekhole_locked()
2618 (bnp != -1 && cmd == FIOSEEKDATA)) { in vn_bmap_seekhole_locked()
2628 if (cmd == FIOSEEKDATA) in vn_bmap_seekhole_locked()
2637 vn_bmap_seekhole(struct vnode *vp, u_long cmd, off_t *off, struct ucred *cred) in vn_bmap_seekhole() argument
2641 KASSERT(cmd == FIOSEEKHOLE || cmd == FIOSEEKDATA, in vn_bmap_seekhole()
2642 ("%s: Wrong command %lu", __func__, cmd)); in vn_bmap_seekhole()
2646 error = vn_bmap_seekhole_locked(vp, cmd, off, cred); in vn_bmap_seekhole()
2659 cred = td->td_ucred; in vn_seek()
2660 vp = fp->f_vnode; in vn_seek()
2661 noneg = (vp->v_type != VCHR); in vn_seek()
2670 td->td_uretoff.tdu_off = foffset; in vn_seek()
2679 (offset > 0 && foffset > OFF_MAX - offset))) { in vn_seek()
2695 if (fsize == 0 && vp->v_type == VCHR && in vn_seek()
2698 if (noneg && offset > 0 && fsize > OFF_MAX - offset) { in vn_seek()
2724 td->td_uretoff.tdu_off = offset; in vn_seek()
2738 * the super-user, or has ACL_WRITE_ATTRIBUTES permission on in vn_utimes_perm()
2748 if (error != 0 && (vap->va_vaflags & VA_UTIMES_NULL) != 0) in vn_utimes_perm()
2759 if (fp->f_type == DTYPE_FIFO) in vn_fill_kinfo()
2760 kif->kf_type = KF_TYPE_FIFO; in vn_fill_kinfo()
2762 kif->kf_type = KF_TYPE_VNODE; in vn_fill_kinfo()
2763 vp = fp->f_vnode; in vn_fill_kinfo()
2781 len = (arc4random() % (sizeof(kif->kf_path) - 2)) + 1; in vn_fill_junk()
2782 olen = strlen(kif->kf_path); in vn_fill_junk()
2784 strcpy(&kif->kf_path[len - 1], "$"); in vn_fill_junk()
2787 strcpy(&kif->kf_path[olen], "A"); in vn_fill_junk()
2797 kif->kf_un.kf_file.kf_file_type = vntype_to_kinfo(vp->v_type); in vn_fill_kinfo_vnode()
2799 fullpath = "-"; in vn_fill_kinfo_vnode()
2802 strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path)); in vn_fill_kinfo_vnode()
2817 error = VOP_GETATTR(vp, &va, curthread->td_ucred); in vn_fill_kinfo_vnode()
2822 kif->kf_un.kf_file.kf_file_fsid = va.va_fsid; in vn_fill_kinfo_vnode()
2824 kif->kf_un.kf_file.kf_file_fsid = in vn_fill_kinfo_vnode()
2825 vp->v_mount->mnt_stat.f_fsid.val[0]; in vn_fill_kinfo_vnode()
2826 kif->kf_un.kf_file.kf_file_fsid_freebsd11 = in vn_fill_kinfo_vnode()
2827 kif->kf_un.kf_file.kf_file_fsid; /* truncate */ in vn_fill_kinfo_vnode()
2828 kif->kf_un.kf_file.kf_file_fileid = va.va_fileid; in vn_fill_kinfo_vnode()
2829 kif->kf_un.kf_file.kf_file_mode = MAKEIMODE(va.va_type, va.va_mode); in vn_fill_kinfo_vnode()
2830 kif->kf_un.kf_file.kf_file_size = va.va_size; in vn_fill_kinfo_vnode()
2831 kif->kf_un.kf_file.kf_file_rdev = va.va_rdev; in vn_fill_kinfo_vnode()
2832 kif->kf_un.kf_file.kf_file_rdev_freebsd11 = in vn_fill_kinfo_vnode()
2833 kif->kf_un.kf_file.kf_file_rdev; /* truncate */ in vn_fill_kinfo_vnode()
2834 kif->kf_un.kf_file.kf_file_nlink = va.va_nlink; in vn_fill_kinfo_vnode()
2856 * POSIX shared-memory objects are defined to have in vn_mmap()
2858 * read(2)/write(2) -- or even open(2). Thus, we can in vn_mmap()
2859 * use MAP_ASYNC to trade on-disk coherence for speed. in vn_mmap()
2863 if ((fp->f_flag & FPOSIXSHM) != 0) in vn_mmap()
2866 vp = fp->f_vnode; in vn_mmap()
2877 mp = vp->v_mount; in vn_mmap()
2878 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { in vn_mmap()
2884 if ((fp->f_flag & FREAD) != 0) in vn_mmap()
2895 if ((fp->f_flag & FWRITE) != 0) in vn_mmap()
2910 * possible wraparound or user-level access into reserved in vn_mmap()
2920 foff > OFF_MAX - size) in vn_mmap()
2957 f = &vp->v_mount->mnt_stat.f_fsid; in vn_fsid()
2958 va->va_fsid = (uint32_t)f->val[1]; in vn_fsid()
2959 va->va_fsid <<= sizeof(f->val[1]) * NBBY; in vn_fsid()
2960 va->va_fsid += (uint32_t)f->val[0]; in vn_fsid()
2974 if (vp->v_type == VCHR) { in vn_fsync_buf()
2976 mp = vp->v_rdev->si_mountpt; in vn_fsync_buf()
2979 bo = &vp->v_bufobj; in vn_fsync_buf()
2985 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) { in vn_fsync_buf()
2986 bp->b_vflags &= ~BV_SCANNED; in vn_fsync_buf()
2987 bp->b_error = 0; in vn_fsync_buf()
2994 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { in vn_fsync_buf()
2995 if ((bp->b_vflags & BV_SCANNED) != 0) in vn_fsync_buf()
2997 bp->b_vflags |= BV_SCANNED; in vn_fsync_buf()
3010 KASSERT(bp->b_bufobj == bo, in vn_fsync_buf()
3012 bp, bp->b_bufobj, bo)); in vn_fsync_buf()
3013 if ((bp->b_flags & B_DELWRI) == 0) in vn_fsync_buf()
3015 if ((vp->v_object != NULL) && (bp->b_flags & B_CLUSTEROK)) { in vn_fsync_buf()
3029 * dirty buffers in the system. Wait for in-progress I/O to in vn_fsync_buf()
3035 if (bo->bo_dirty.bv_cnt > 0) { in vn_fsync_buf()
3041 TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) in vn_fsync_buf()
3042 if ((error = bp->b_error) != 0) in vn_fsync_buf()
3044 if ((mp != NULL && mp->mnt_secondary_writes > 0) || in vn_fsync_buf()
3045 (error == 0 && --maxretry >= 0)) in vn_fsync_buf()
3083 if (invp->v_type == VDIR || outvp->v_type == VDIR) in vn_copy_file_range()
3086 invp->v_type != VREG || outvp->v_type != VREG) in vn_copy_file_range()
3095 len = INT64_MAX - *inoffp; in vn_copy_file_range()
3099 len = INT64_MAX - *outoffp; in vn_copy_file_range()
3110 inmp = invpl->v_mount; in vn_copy_file_range()
3111 outmp = outvpl->v_mount; in vn_copy_file_range()
3140 if (inmp == outmp || inmp->mnt_vfc == outmp->mnt_vfc) in vn_copy_file_range()
3162 * Test len bytes of data starting at dat for all bytes == 0.
3164 * Expects dat to be well aligned.
3167 mem_iszero(void *dat, int len) in mem_iszero() argument
3173 for (p = dat; len > 0; len -= sizeof(*p), p++) { in mem_iszero()
3219 *holeoffp = -1; /* Disable use of vn_skip_hole(). */ in vn_skip_hole()
3235 delta = *dataoffp - *outoffp; in vn_skip_hole()
3242 *xferp -= delta; in vn_skip_hole()
3251 * non-INVARIANTS case, check this to make sure xfer2 remains a sane in vn_skip_hole()
3255 xfer2 = *holeoffp - *outoffp; in vn_skip_hole()
3260 * Write an xfer sized chunk to outvp in blksize blocks from dat.
3261 * dat is a maximum of blksize in length and can be written repeatedly in
3269 vn_write_outvp(struct vnode *outvp, char *dat, off_t outoff, off_t xfer, in vn_write_outvp() argument
3315 error = vn_rdwr(UIO_WRITE, outvp, dat, xfer2, in vn_write_outvp()
3317 curthread->td_ucred, cred, NULL, curthread); in vn_write_outvp()
3319 xfer -= xfer2; in vn_write_outvp()
3349 char *dat; in vn_generic_copy_file_range() local
3357 dat = NULL; in vn_generic_copy_file_range()
3416 *outoffp <= OFF_MAX - len && outsize <= *outoffp + len && in vn_generic_copy_file_range()
3418 *outoffp <= OFF_MAX - (inva.va_size - *inoffp) && in vn_generic_copy_file_range()
3419 outsize <= *outoffp + (inva.va_size - *inoffp)) { in vn_generic_copy_file_range()
3421 error = mac_vnode_check_write(curthread->td_ucred, in vn_generic_copy_file_range()
3452 blksize = outvp->v_mount->mnt_stat.f_iosize; in vn_generic_copy_file_range()
3460 blksize = MAX(invp->v_mount->mnt_stat.f_iosize, in vn_generic_copy_file_range()
3461 outvp->v_mount->mnt_stat.f_iosize); in vn_generic_copy_file_range()
3469 dat = malloc(blksize, M_TEMP, M_WAITOK); in vn_generic_copy_file_range()
3533 xfer = MIN(startoff - *inoffp, len); in vn_generic_copy_file_range()
3536 xfer2 = MIN(outsize - *outoffp, in vn_generic_copy_file_range()
3538 memset(dat, 0, MIN(xfer2, blksize)); in vn_generic_copy_file_range()
3539 error = vn_write_outvp(outvp, dat, in vn_generic_copy_file_range()
3547 error = vn_write_outvp(outvp, dat, in vn_generic_copy_file_range()
3554 len -= xfer; in vn_generic_copy_file_range()
3568 copylen = MIN(len, endoff - startoff); in vn_generic_copy_file_range()
3586 xfer -= (*inoffp % blksize); in vn_generic_copy_file_range()
3591 * to copy anything, allow a zero-length block so that the VOPs in vn_generic_copy_file_range()
3602 error = vn_rdwr(UIO_READ, invp, dat, xfer, in vn_generic_copy_file_range()
3604 curthread->td_ucred, incred, &aresid, in vn_generic_copy_file_range()
3610 xfer -= aresid; in vn_generic_copy_file_range()
3620 readzeros = cantseek ? mem_iszero(dat, xfer) : in vn_generic_copy_file_range()
3626 error = vn_write_outvp(outvp, dat, in vn_generic_copy_file_range()
3635 copylen -= xfer; in vn_generic_copy_file_range()
3636 len -= xfer; in vn_generic_copy_file_range()
3654 *lenp = savlen - len; in vn_generic_copy_file_range()
3655 free(dat, M_TEMP); in vn_generic_copy_file_range()
3670 vp = fp->f_vnode; in vn_fallocate()
3671 if (vp->v_type != VREG) in vn_fallocate()
3696 error = mac_vnode_check_write(td->td_ucred, fp->f_cred, vp); in vn_fallocate()
3700 td->td_ucred); in vn_fallocate()
3797 if (*offset < 0 || *length <= 0 || *length > OFF_MAX - *offset || in vn_deallocate()
3800 if (vp->v_type != VREG) in vn_deallocate()
3809 vn_fspacectl(struct file *fp, int cmd, off_t *offset, off_t *length, int flags, in vn_fspacectl() argument
3816 KASSERT(cmd == SPACECTL_DEALLOC, ("vn_fspacectl: Invalid cmd")); in vn_fspacectl()
3818 ("vn_fspacectl: non-zero flags")); in vn_fspacectl()
3819 KASSERT(*offset >= 0 && *length > 0 && *length <= OFF_MAX - *offset, in vn_fspacectl()
3821 vp = fp->f_vnode; in vn_fspacectl()
3823 if (vp->v_type != VREG) in vn_fspacectl()
3828 switch (cmd) { in vn_fspacectl()
3831 active_cred, active_cred, fp->f_cred); in vn_fspacectl()
3834 panic("vn_fspacectl: unknown cmd %d", cmd); in vn_fspacectl()
3870 * and '*eofflag') must be re-initialized before a subsequent call. On error
3892 VNASSERT(vp->v_type == VDIR, vp, ("vnode is not a directory")); in vn_dir_next_dirent()
3917 reclen = dp->d_reclen; in vn_dir_next_dirent()
3923 *len -= reclen; in vn_dir_next_dirent()
3958 error = mac_vnode_check_readdir(td->td_ucred, vp); in vn_dir_next_dirent()
3961 error = VOP_READDIR(vp, &uio, td->td_ucred, eofflag, in vn_dir_next_dirent()
3966 *len = dirbuflen - uio.uio_resid; in vn_dir_next_dirent()
3987 dp->d_reclen < GENERIC_MINDIRSIZ)) { in vn_dir_next_dirent()
4018 VNPASS(vp->v_type == VDIR, vp); in vn_dir_check_empty()
4020 error = VOP_GETATTR(vp, &va, td->td_ucred); in vn_dir_check_empty()
4051 * union-specific metadata of its upper layer, meaning that in vn_dir_check_empty()
4057 if (dp->d_type == DT_WHT) in vn_dir_check_empty()
4064 switch (dp->d_namlen) { in vn_dir_check_empty()
4066 if (dp->d_name[1] != '.') { in vn_dir_check_empty()
4073 if (dp->d_name[0] != '.') { in vn_dir_check_empty()
4154 if ((vp1->v_vnlock->lock_object.lo_flags & in vn_lock_pair()
4174 (vp1->v_vnlock->lock_object.lo_flags & LK_NOSHARE) != 0) in vn_lock_pair()
4192 (vp2->v_vnlock->lock_object.lo_flags & LK_NOSHARE) != 0) in vn_lock_pair()
4259 (mp == NULL && MNT_SHARED_WRITES(vp->v_mount))) in vn_lktype_write()
4267 if (fp2->f_type != DTYPE_VNODE) in vn_cmp()
4269 return (kcmp_cmp((uintptr_t)fp1->f_vnode, (uintptr_t)fp2->f_vnode)); in vn_cmp()