Lines Matching +full:smp +full:- +full:offset
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2000-2001 Boris Popov
82 int error/*, *eofflag = ap->a_eofflag*/; in smbfs_readvdir()
83 long offset, limit; in smbfs_readvdir() local
86 SMBVDEBUG("dirname='%s'\n", np->n_name); in smbfs_readvdir()
88 smb_makescred(scred, uio->uio_td, cred); in smbfs_readvdir()
89 offset = uio->uio_offset / DE_SIZE; /* offset in the directory */ in smbfs_readvdir()
90 limit = uio->uio_resid / DE_SIZE; in smbfs_readvdir()
91 if (uio->uio_resid < DE_SIZE || uio->uio_offset < 0) { in smbfs_readvdir()
95 while (limit && offset < 2) { in smbfs_readvdir()
96 limit--; in smbfs_readvdir()
99 de.d_fileno = (offset == 0) ? np->n_ino : in smbfs_readvdir()
100 (np->n_parent ? np->n_parentino : 2); in smbfs_readvdir()
102 de.d_fileno = 0x7ffffffd + offset; in smbfs_readvdir()
103 de.d_off = offset + 1; in smbfs_readvdir()
104 de.d_namlen = offset + 1; in smbfs_readvdir()
112 offset++; in smbfs_readvdir()
113 uio->uio_offset += DE_SIZE; in smbfs_readvdir()
119 if (offset != np->n_dirofs || np->n_dirseq == NULL) { in smbfs_readvdir()
120 SMBVDEBUG("Reopening search %ld:%ld\n", offset, np->n_dirofs); in smbfs_readvdir()
121 if (np->n_dirseq) { in smbfs_readvdir()
122 smbfs_findclose(np->n_dirseq, scred); in smbfs_readvdir()
123 np->n_dirseq = NULL; in smbfs_readvdir()
125 np->n_dirofs = 2; in smbfs_readvdir()
133 np->n_dirseq = ctx; in smbfs_readvdir()
135 ctx = np->n_dirseq; in smbfs_readvdir()
136 while (np->n_dirofs < offset) { in smbfs_readvdir()
137 error = smbfs_findnext(ctx, offset - np->n_dirofs++, scred); in smbfs_readvdir()
139 smbfs_findclose(np->n_dirseq, scred); in smbfs_readvdir()
140 np->n_dirseq = NULL; in smbfs_readvdir()
146 for (; limit; limit--, offset++) { in smbfs_readvdir()
150 np->n_dirofs++; in smbfs_readvdir()
153 de.d_fileno = ctx->f_attr.fa_ino; in smbfs_readvdir()
154 de.d_off = offset + 1; in smbfs_readvdir()
155 de.d_type = (ctx->f_attr.fa_attr & SMB_FA_DIR) ? DT_DIR : DT_REG; in smbfs_readvdir()
156 de.d_namlen = ctx->f_nmlen; in smbfs_readvdir()
157 bcopy(ctx->f_name, de.d_name, de.d_namlen); in smbfs_readvdir()
160 error = smbfs_nget(vp->v_mount, vp, ctx->f_name, in smbfs_readvdir()
161 ctx->f_nmlen, &ctx->f_attr, &newvp); in smbfs_readvdir()
175 uio->uio_offset = offset * DE_SIZE; in smbfs_readvdir()
184 struct smbmount *smp = VFSTOSMBFS(vp->v_mount); in smbfs_readvnode() local
194 if (uiop->uio_segflg == UIO_NOCOPY) in smbfs_readvnode()
197 if (vp->v_type != VREG && vp->v_type != VDIR) { in smbfs_readvnode()
201 if (uiop->uio_resid == 0) in smbfs_readvnode()
203 if (uiop->uio_offset < 0) in smbfs_readvnode()
205 /* if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize) in smbfs_readvnode()
207 td = uiop->uio_td; in smbfs_readvnode()
208 if (vp->v_type == VDIR) { in smbfs_readvnode()
209 lks = LK_EXCLUSIVE; /* lockstatus(vp->v_vnlock); */ in smbfs_readvnode()
218 /* biosize = SSTOCN(smp->sm_share)->sc_txmax;*/ in smbfs_readvnode()
219 if (np->n_flag & NMODIFIED) { in smbfs_readvnode()
224 np->n_mtime.tv_sec = vattr.va_mtime.tv_sec; in smbfs_readvnode()
229 if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) { in smbfs_readvnode()
233 np->n_mtime.tv_sec = vattr.va_mtime.tv_sec; in smbfs_readvnode()
238 error = smb_read(smp->sm_share, np->n_fid, uiop, scred); in smbfs_readvnode()
247 struct smbmount *smp = VTOSMBFS(vp); in smbfs_writevnode() local
253 if (vp->v_type != VREG) { in smbfs_writevnode()
257 SMBVDEBUG("ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset, in smbfs_writevnode()
258 uiop->uio_resid); in smbfs_writevnode()
259 if (uiop->uio_offset < 0) in smbfs_writevnode()
261 /* if (uiop->uio_offset + uiop->uio_resid > smp->nm_maxfilesize) in smbfs_writevnode()
263 td = uiop->uio_td; in smbfs_writevnode()
265 if (np->n_flag & NMODIFIED) { in smbfs_writevnode()
280 uiop->uio_offset = np->n_size; in smbfs_writevnode()
283 if (uiop->uio_resid == 0) in smbfs_writevnode()
292 error = smb_write(smp->sm_share, np->n_fid, uiop, scred); in smbfs_writevnode()
294 SMBVDEBUG("after: ofs=%jd,resid=%zd\n", (intmax_t)uiop->uio_offset, in smbfs_writevnode()
295 uiop->uio_resid); in smbfs_writevnode()
297 if (uiop->uio_offset > np->n_size) { in smbfs_writevnode()
298 np->n_size = uiop->uio_offset; in smbfs_writevnode()
299 vnode_pager_setsize(vp, np->n_size); in smbfs_writevnode()
311 struct smbmount *smp = VFSTOSMBFS(vp->v_mount); in smbfs_doio() local
319 uiop->uio_iov = &io; in smbfs_doio()
320 uiop->uio_iovcnt = 1; in smbfs_doio()
321 uiop->uio_segflg = UIO_SYSSPACE; in smbfs_doio()
322 uiop->uio_td = td; in smbfs_doio()
327 if (bp->b_iocmd == BIO_READ) { in smbfs_doio()
328 io.iov_len = uiop->uio_resid = bp->b_bcount; in smbfs_doio()
329 io.iov_base = bp->b_data; in smbfs_doio()
330 uiop->uio_rw = UIO_READ; in smbfs_doio()
331 switch (vp->v_type) { in smbfs_doio()
333 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE; in smbfs_doio()
334 error = smb_read(smp->sm_share, np->n_fid, uiop, scred); in smbfs_doio()
337 if (uiop->uio_resid) { in smbfs_doio()
338 int left = uiop->uio_resid; in smbfs_doio()
339 int nread = bp->b_bcount - left; in smbfs_doio()
341 bzero((char *)bp->b_data + nread, left); in smbfs_doio()
345 printf("smbfs_doio: type %x unexpected\n",vp->v_type); in smbfs_doio()
349 bp->b_error = error; in smbfs_doio()
350 bp->b_ioflags |= BIO_ERROR; in smbfs_doio()
353 if (((bp->b_blkno * DEV_BSIZE) + bp->b_dirtyend) > np->n_size) in smbfs_doio()
354 bp->b_dirtyend = np->n_size - (bp->b_blkno * DEV_BSIZE); in smbfs_doio()
356 if (bp->b_dirtyend > bp->b_dirtyoff) { in smbfs_doio()
357 io.iov_len = uiop->uio_resid = bp->b_dirtyend - bp->b_dirtyoff; in smbfs_doio()
358 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff; in smbfs_doio()
359 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff; in smbfs_doio()
360 uiop->uio_rw = UIO_WRITE; in smbfs_doio()
361 error = smb_write(smp->sm_share, np->n_fid, uiop, scred); in smbfs_doio()
377 || (!error && (bp->b_flags & B_NEEDCOMMIT))) { in smbfs_doio()
378 bp->b_flags &= ~(B_INVAL|B_NOCACHE); in smbfs_doio()
379 if ((bp->b_flags & B_ASYNC) == 0) in smbfs_doio()
380 bp->b_flags |= B_EINTR; in smbfs_doio()
381 if ((bp->b_flags & B_PAGING) == 0) { in smbfs_doio()
383 bp->b_flags &= ~B_DONE; in smbfs_doio()
385 if ((bp->b_flags & B_ASYNC) == 0) in smbfs_doio()
386 bp->b_flags |= B_EINTR; in smbfs_doio()
389 bp->b_ioflags |= BIO_ERROR; in smbfs_doio()
390 bp->b_error = error; in smbfs_doio()
392 bp->b_dirtyoff = bp->b_dirtyend = 0; in smbfs_doio()
395 bp->b_resid = 0; in smbfs_doio()
402 bp->b_resid = uiop->uio_resid; in smbfs_doio()
427 struct smbmount *smp; in smbfs_getpages()
433 vp = ap->a_vp; in smbfs_getpages()
434 if ((object = vp->v_object) == NULL) { in smbfs_getpages()
435 printf("smbfs_getpages: called with non-merged cache vnode??\n"); in smbfs_getpages()
440 cred = td->td_ucred; /* XXX */ in smbfs_getpages()
442 smp = VFSTOSMBFS(vp->v_mount); in smbfs_getpages()
443 pages = ap->a_m; in smbfs_getpages()
444 npages = ap->a_count; in smbfs_getpages()
448 * allow the pager to zero-out the blanks. Partially valid pages in smbfs_getpages()
454 if (!vm_page_none_valid(pages[npages - 1]) && --npages == 0) in smbfs_getpages()
463 kva = (vm_offset_t) bp->b_data; in smbfs_getpages()
473 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); in smbfs_getpages()
479 error = smb_read(smp->sm_share, np->n_fid, &uio, scred); in smbfs_getpages()
490 size = count - uio.uio_resid; in smbfs_getpages()
503 KASSERT(m->dirty == 0, in smbfs_getpages()
510 vm_page_set_valid_range(m, 0, size - toff); in smbfs_getpages()
511 KASSERT(m->dirty == 0, in smbfs_getpages()
516 * we may have hit a zero-fill section. We simply in smbfs_getpages()
524 if (ap->a_rbehind) in smbfs_getpages()
525 *ap->a_rbehind = 0; in smbfs_getpages()
526 if (ap->a_rahead) in smbfs_getpages()
527 *ap->a_rahead = 0; in smbfs_getpages()
542 struct vnode *vp = ap->a_vp; in smbfs_putpages()
548 cred = td->td_ucred; /* XXX */ in smbfs_putpages()
560 struct smbmount *smp; in smbfs_putpages() local
566 cred = td->td_ucred; /* XXX */ in smbfs_putpages()
569 smp = VFSTOSMBFS(vp->v_mount); in smbfs_putpages()
570 pages = ap->a_m; in smbfs_putpages()
571 count = ap->a_count; in smbfs_putpages()
572 rtvals = ap->a_rtvals; in smbfs_putpages()
581 kva = (vm_offset_t) bp->b_data; in smbfs_putpages()
590 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex); in smbfs_putpages()
600 error = smb_write(smp->sm_share, np->n_fid, &uio, scred); in smbfs_putpages()
610 vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid, in smbfs_putpages()
630 while (np->n_flag & NFLUSHINPROG) { in smbfs_vinvalbuf()
631 np->n_flag |= NFLUSHWANT; in smbfs_vinvalbuf()
632 error = tsleep(&np->n_flag, PRIBIO + 2, "smfsvinv", 2 * hz); in smbfs_vinvalbuf()
637 np->n_flag |= NFLUSHINPROG; in smbfs_vinvalbuf()
643 np->n_flag &= ~NFLUSHINPROG; in smbfs_vinvalbuf()
644 if (np->n_flag & NFLUSHWANT) { in smbfs_vinvalbuf()
645 np->n_flag &= ~NFLUSHWANT; in smbfs_vinvalbuf()
646 wakeup(&np->n_flag); in smbfs_vinvalbuf()
652 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG); in smbfs_vinvalbuf()
653 if (np->n_flag & NFLUSHWANT) { in smbfs_vinvalbuf()
654 np->n_flag &= ~NFLUSHWANT; in smbfs_vinvalbuf()
655 wakeup(&np->n_flag); in smbfs_vinvalbuf()