xref: /titanic_41/usr/src/uts/common/fs/hsfs/hsfs_vnops.c (revision 2917a9c9c3eee6fcaedb239f5f68da01f4ed0da9)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Vnode operations for the High Sierra filesystem
30  */
31 
32 #include <sys/types.h>
33 #include <sys/t_lock.h>
34 #include <sys/param.h>
35 #include <sys/time.h>
36 #include <sys/systm.h>
37 #include <sys/sysmacros.h>
38 #include <sys/resource.h>
39 #include <sys/signal.h>
40 #include <sys/cred.h>
41 #include <sys/user.h>
42 #include <sys/buf.h>
43 #include <sys/vfs.h>
44 #include <sys/vfs_opreg.h>
45 #include <sys/stat.h>
46 #include <sys/vnode.h>
47 #include <sys/mode.h>
48 #include <sys/proc.h>
49 #include <sys/disp.h>
50 #include <sys/file.h>
51 #include <sys/fcntl.h>
52 #include <sys/flock.h>
53 #include <sys/kmem.h>
54 #include <sys/uio.h>
55 #include <sys/conf.h>
56 #include <sys/errno.h>
57 #include <sys/mman.h>
58 #include <sys/pathname.h>
59 #include <sys/debug.h>
60 #include <sys/vmsystm.h>
61 #include <sys/cmn_err.h>
62 #include <sys/fbuf.h>
63 #include <sys/dirent.h>
64 #include <sys/errno.h>
65 
66 #include <vm/hat.h>
67 #include <vm/page.h>
68 #include <vm/pvn.h>
69 #include <vm/as.h>
70 #include <vm/seg.h>
71 #include <vm/seg_map.h>
72 #include <vm/seg_kmem.h>
73 #include <vm/seg_vn.h>
74 #include <vm/rm.h>
75 #include <vm/page.h>
76 #include <sys/swap.h>
77 
78 #include <sys/fs/hsfs_spec.h>
79 #include <sys/fs/hsfs_node.h>
80 #include <sys/fs/hsfs_impl.h>
81 #include <sys/fs/hsfs_susp.h>
82 #include <sys/fs/hsfs_rrip.h>
83 
84 #include <fs/fs_subr.h>
85 
86 /*
87  * This tunable allows us to ignore inode numbers from rrip-1.12.
88  * In this case, we fall back to our default inode algorithm.
89  */
90 extern int use_rrip_inodes;
91 
92 
93 /* ARGSUSED */
94 static int
95 hsfs_fsync(vnode_t *cp, int syncflag, cred_t *cred)
96 {
97 	return (0);
98 }
99 
100 
101 /*ARGSUSED*/
102 static int
103 hsfs_read(struct vnode *vp, struct uio *uiop, int ioflag, struct cred *cred,
104 	struct caller_context *ct)
105 {
106 	caddr_t base;
107 	offset_t diff;
108 	int error;
109 	struct hsnode *hp;
110 	uint_t filesize;
111 
112 	hp = VTOH(vp);
113 	/*
114 	 * if vp is of type VDIR, make sure dirent
115 	 * is filled up with all info (because of ptbl)
116 	 */
117 	if (vp->v_type == VDIR) {
118 		if (hp->hs_dirent.ext_size == 0)
119 			hs_filldirent(vp, &hp->hs_dirent);
120 	}
121 	filesize = hp->hs_dirent.ext_size;
122 
123 	/* Sanity checks. */
124 	if (uiop->uio_resid == 0 ||		/* No data wanted. */
125 	    uiop->uio_loffset > HS_MAXFILEOFF ||	/* Offset too big. */
126 	    uiop->uio_loffset >= filesize)	/* Past EOF. */
127 		return (0);
128 
129 	do {
130 		/*
131 		 * We want to ask for only the "right" amount of data.
132 		 * In this case that means:-
133 		 *
134 		 * We can't get data from beyond our EOF. If asked,
135 		 * we will give a short read.
136 		 *
137 		 * segmap_getmapflt returns buffers of MAXBSIZE bytes.
138 		 * These buffers are always MAXBSIZE aligned.
139 		 * If our starting offset is not MAXBSIZE aligned,
140 		 * we can only ask for less than MAXBSIZE bytes.
141 		 *
142 		 * If our requested offset and length are such that
143 		 * they belong in different MAXBSIZE aligned slots
144 		 * then we'll be making more than one call on
145 		 * segmap_getmapflt.
146 		 *
147 		 * This diagram shows the variables we use and their
148 		 * relationships.
149 		 *
150 		 * |<-----MAXBSIZE----->|
151 		 * +--------------------------...+
152 		 * |.....mapon->|<--n-->|....*...|EOF
153 		 * +--------------------------...+
154 		 * uio_loffset->|
155 		 * uio_resid....|<---------->|
156 		 * diff.........|<-------------->|
157 		 *
158 		 * So, in this case our offset is not aligned
159 		 * and our request takes us outside of the
160 		 * MAXBSIZE window. We will break this up into
161 		 * two segmap_getmapflt calls.
162 		 */
163 		size_t nbytes;
164 		offset_t mapon;
165 		size_t n;
166 		uint_t flags;
167 
168 		mapon = uiop->uio_loffset & MAXBOFFSET;
169 		diff = filesize - uiop->uio_loffset;
170 		nbytes = (size_t)MIN(MAXBSIZE - mapon, uiop->uio_resid);
171 		n = MIN(diff, nbytes);
172 		if (n <= 0) {
173 			/* EOF or request satisfied. */
174 			return (0);
175 		}
176 
177 		base = segmap_getmapflt(segkmap, vp,
178 		    (u_offset_t)uiop->uio_loffset, n, 1, S_READ);
179 
180 		error = uiomove(base + mapon, n, UIO_READ, uiop);
181 
182 		if (error == 0) {
183 			/*
184 			 * if read a whole block, or read to eof,
185 			 *  won't need this buffer again soon.
186 			 */
187 			if (n + mapon == MAXBSIZE ||
188 			    uiop->uio_loffset == filesize)
189 				flags = SM_DONTNEED;
190 			else
191 				flags = 0;
192 			error = segmap_release(segkmap, base, flags);
193 		} else
194 			(void) segmap_release(segkmap, base, 0);
195 	} while (error == 0 && uiop->uio_resid > 0);
196 
197 	return (error);
198 }
199 
200 /*ARGSUSED2*/
201 static int
202 hsfs_getattr(
203 	struct vnode *vp,
204 	struct vattr *vap,
205 	int flags,
206 	struct cred *cred)
207 {
208 	struct hsnode *hp;
209 	struct vfs *vfsp;
210 	struct hsfs *fsp;
211 
212 	hp = VTOH(vp);
213 	fsp = VFS_TO_HSFS(vp->v_vfsp);
214 	vfsp = vp->v_vfsp;
215 
216 	if ((hp->hs_dirent.ext_size == 0) && (vp->v_type == VDIR)) {
217 		hs_filldirent(vp, &hp->hs_dirent);
218 	}
219 	vap->va_type = IFTOVT(hp->hs_dirent.mode);
220 	vap->va_mode = hp->hs_dirent.mode;
221 	vap->va_uid = hp->hs_dirent.uid;
222 	vap->va_gid = hp->hs_dirent.gid;
223 
224 	vap->va_fsid = vfsp->vfs_dev;
225 	vap->va_nodeid = (ino64_t)hp->hs_nodeid;
226 	vap->va_nlink = hp->hs_dirent.nlink;
227 	vap->va_size =	(offset_t)hp->hs_dirent.ext_size;
228 
229 	vap->va_atime.tv_sec = hp->hs_dirent.adate.tv_sec;
230 	vap->va_atime.tv_nsec = hp->hs_dirent.adate.tv_usec*1000;
231 	vap->va_mtime.tv_sec = hp->hs_dirent.mdate.tv_sec;
232 	vap->va_mtime.tv_nsec = hp->hs_dirent.mdate.tv_usec*1000;
233 	vap->va_ctime.tv_sec = hp->hs_dirent.cdate.tv_sec;
234 	vap->va_ctime.tv_nsec = hp->hs_dirent.cdate.tv_usec*1000;
235 	if (vp->v_type == VCHR || vp->v_type == VBLK)
236 		vap->va_rdev = hp->hs_dirent.r_dev;
237 	else
238 		vap->va_rdev = 0;
239 	vap->va_blksize = vfsp->vfs_bsize;
240 	/* no. of blocks = no. of data blocks + no. of xar blocks */
241 	vap->va_nblocks = (fsblkcnt64_t)howmany(vap->va_size + (u_longlong_t)
242 	    (hp->hs_dirent.xar_len << fsp->hsfs_vol.lbn_shift), DEV_BSIZE);
243 	vap->va_seq = hp->hs_seq;
244 	return (0);
245 }
246 
247 /*ARGSUSED*/
248 static int
249 hsfs_readlink(struct vnode *vp, struct uio *uiop, struct cred *cred)
250 {
251 	struct hsnode *hp;
252 
253 	if (vp->v_type != VLNK)
254 		return (EINVAL);
255 
256 	hp = VTOH(vp);
257 
258 	if (hp->hs_dirent.sym_link == (char *)NULL)
259 		return (ENOENT);
260 
261 	return (uiomove(hp->hs_dirent.sym_link,
262 	    (size_t)MIN(hp->hs_dirent.ext_size,
263 	    uiop->uio_resid), UIO_READ, uiop));
264 }
265 
266 /*ARGSUSED*/
267 static void
268 hsfs_inactive(struct vnode *vp, struct cred *cred)
269 {
270 	struct hsnode *hp;
271 	struct hsfs *fsp;
272 
273 	int nopage;
274 
275 	hp = VTOH(vp);
276 	fsp = VFS_TO_HSFS(vp->v_vfsp);
277 	/*
278 	 * Note: acquiring and holding v_lock for quite a while
279 	 * here serializes on the vnode; this is unfortunate, but
280 	 * likely not to overly impact performance, as the underlying
281 	 * device (CDROM drive) is quite slow.
282 	 */
283 	rw_enter(&fsp->hsfs_hash_lock, RW_WRITER);
284 	mutex_enter(&hp->hs_contents_lock);
285 	mutex_enter(&vp->v_lock);
286 
287 	if (vp->v_count < 1) {
288 		panic("hsfs_inactive: v_count < 1");
289 		/*NOTREACHED*/
290 	}
291 
292 	if (vp->v_count > 1 || (hp->hs_flags & HREF) == 0) {
293 		vp->v_count--;	/* release hold from vn_rele */
294 		mutex_exit(&vp->v_lock);
295 		mutex_exit(&hp->hs_contents_lock);
296 		rw_exit(&fsp->hsfs_hash_lock);
297 		return;
298 	}
299 	vp->v_count--;	/* release hold from vn_rele */
300 	if (vp->v_count == 0) {
301 		/*
302 		 * Free the hsnode.
303 		 * If there are no pages associated with the
304 		 * hsnode, give it back to the kmem_cache,
305 		 * else put at the end of this file system's
306 		 * internal free list.
307 		 */
308 		nopage = !vn_has_cached_data(vp);
309 		hp->hs_flags = 0;
310 		/*
311 		 * exit these locks now, since hs_freenode may
312 		 * kmem_free the hsnode and embedded vnode
313 		 */
314 		mutex_exit(&vp->v_lock);
315 		mutex_exit(&hp->hs_contents_lock);
316 		hs_freenode(vp, fsp, nopage);
317 	} else {
318 		mutex_exit(&vp->v_lock);
319 		mutex_exit(&hp->hs_contents_lock);
320 	}
321 	rw_exit(&fsp->hsfs_hash_lock);
322 }
323 
324 
325 /*ARGSUSED*/
326 static int
327 hsfs_lookup(
328 	struct vnode *dvp,
329 	char *nm,
330 	struct vnode **vpp,
331 	struct pathname *pnp,
332 	int flags,
333 	struct vnode *rdir,
334 	struct cred *cred)
335 {
336 	int error;
337 	int namelen = (int)strlen(nm);
338 
339 	if (*nm == '\0') {
340 		VN_HOLD(dvp);
341 		*vpp = dvp;
342 		return (0);
343 	}
344 
345 	/*
346 	 * If we're looking for ourself, life is simple.
347 	 */
348 	if (namelen == 1 && *nm == '.') {
349 		if (error = hs_access(dvp, (mode_t)VEXEC, cred))
350 			return (error);
351 		VN_HOLD(dvp);
352 		*vpp = dvp;
353 		return (0);
354 	}
355 
356 	return (hs_dirlook(dvp, nm, namelen, vpp, cred));
357 }
358 
359 
360 /*ARGSUSED*/
361 static int
362 hsfs_readdir(
363 	struct vnode	*vp,
364 	struct uio	*uiop,
365 	struct cred	*cred,
366 	int		*eofp)
367 {
368 	struct hsnode	*dhp;
369 	struct hsfs	*fsp;
370 	struct hs_direntry hd;
371 	struct dirent64	*nd;
372 	int		error;
373 	uint_t		offset;		/* real offset in directory */
374 	uint_t		dirsiz;		/* real size of directory */
375 	uchar_t		*blkp;
376 	int		hdlen;		/* length of hs directory entry */
377 	long		ndlen;		/* length of dirent entry */
378 	int		bytes_wanted;
379 	size_t		bufsize;	/* size of dirent buffer */
380 	char		*outbuf;	/* ptr to dirent buffer */
381 	char		*dname;
382 	int		dnamelen;
383 	size_t		dname_size;
384 	struct fbuf	*fbp;
385 	uint_t		last_offset;	/* last index into current dir block */
386 	ino64_t		dirino;	/* temporary storage before storing in dirent */
387 	off_t		diroff;
388 
389 	dhp = VTOH(vp);
390 	fsp = VFS_TO_HSFS(vp->v_vfsp);
391 	if (dhp->hs_dirent.ext_size == 0)
392 		hs_filldirent(vp, &dhp->hs_dirent);
393 	dirsiz = dhp->hs_dirent.ext_size;
394 	if (uiop->uio_loffset >= dirsiz) {	/* at or beyond EOF */
395 		if (eofp)
396 			*eofp = 1;
397 		return (0);
398 	}
399 	ASSERT(uiop->uio_loffset <= HS_MAXFILEOFF);
400 	offset = uiop->uio_loffset;
401 
402 	dname_size = fsp->hsfs_namemax + 1;	/* 1 for the ending NUL */
403 	dname = kmem_alloc(dname_size, KM_SLEEP);
404 	bufsize = uiop->uio_resid + sizeof (struct dirent64);
405 
406 	outbuf = kmem_alloc(bufsize, KM_SLEEP);
407 	nd = (struct dirent64 *)outbuf;
408 
409 	while (offset < dirsiz) {
410 		bytes_wanted = MIN(MAXBSIZE, dirsiz - (offset & MAXBMASK));
411 
412 		error = fbread(vp, (offset_t)(offset & MAXBMASK),
413 			(unsigned int)bytes_wanted, S_READ, &fbp);
414 		if (error)
415 			goto done;
416 
417 		blkp = (uchar_t *)fbp->fb_addr;
418 		last_offset = (offset & MAXBMASK) + fbp->fb_count;
419 
420 #define	rel_offset(offset) ((offset) & MAXBOFFSET)	/* index into blkp */
421 
422 		while (offset < last_offset) {
423 			/*
424 			 * Very similar validation code is found in
425 			 * process_dirblock(), hsfs_node.c.
426 			 * For an explanation, see there.
427 			 * It may make sense for the future to
428 			 * "consolidate" the code in hs_parsedir(),
429 			 * process_dirblock() and hsfs_readdir() into
430 			 * a single utility function.
431 			 */
432 			hdlen = (int)((uchar_t)
433 				HDE_DIR_LEN(&blkp[rel_offset(offset)]));
434 			if (hdlen < HDE_ROOT_DIR_REC_SIZE ||
435 			    offset + hdlen > last_offset) {
436 				/*
437 				 * advance to next sector boundary
438 				 */
439 				offset = roundup(offset + 1, HS_SECTOR_SIZE);
440 				if (hdlen)
441 					hs_log_bogus_disk_warning(fsp,
442 					    HSFS_ERR_TRAILING_JUNK, 0);
443 
444 				continue;
445 			}
446 
447 			bzero(&hd, sizeof (hd));
448 
449 			/*
450 			 * Just ignore invalid directory entries.
451 			 * XXX - maybe hs_parsedir() will detect EXISTENCE bit
452 			 */
453 			if (!hs_parsedir(fsp, &blkp[rel_offset(offset)],
454 				&hd, dname, &dnamelen,
455 					last_offset - offset)) {
456 				/*
457 				 * Determine if there is enough room
458 				 */
459 				ndlen = (long)DIRENT64_RECLEN((dnamelen));
460 
461 				if ((ndlen + ((char *)nd - outbuf)) >
462 				    uiop->uio_resid) {
463 					fbrelse(fbp, S_READ);
464 					goto done; /* output buffer full */
465 				}
466 
467 				diroff = offset + hdlen;
468 				/*
469 				 * If the media carries rrip-v1.12 or newer,
470 				 * and we trust the inodes from the rrip data
471 				 * (use_rrip_inodes != 0), use that data. If the
472 				 * media has been created by a recent mkisofs
473 				 * version, we may trust all numbers in the
474 				 * starting extent number; otherwise, we cannot
475 				 * do this for zero sized files. We use
476 				 * HS_DUMMY_INO in this case and make sure that
477 				 * we will not map all files to the same
478 				 * meta data.
479 				 */
480 				if (hd.inode != 0 && use_rrip_inodes) {
481 					dirino = hd.inode;
482 				} else {
483 					dirino = hd.ext_lbn;
484 					if (hd.ext_size == 0 &&
485 					    (fsp->hsfs_flags &
486 							HSFSMNT_INODE) == 0) {
487 						dirino = HS_DUMMY_INO;
488 					}
489 				}
490 
491 
492 				/* strncpy(9f) will zero uninitialized bytes */
493 
494 				ASSERT(strlen(dname) + 1 <=
495 				    DIRENT64_NAMELEN(ndlen));
496 				(void) strncpy(nd->d_name, dname,
497 				    DIRENT64_NAMELEN(ndlen));
498 				nd->d_reclen = (ushort_t)ndlen;
499 				nd->d_off = (offset_t)diroff;
500 				nd->d_ino = dirino;
501 				nd = (struct dirent64 *)((char *)nd + ndlen);
502 
503 				/*
504 				 * free up space allocated for symlink
505 				 */
506 				if (hd.sym_link != (char *)NULL) {
507 					kmem_free(hd.sym_link,
508 					    (size_t)(hd.ext_size+1));
509 					hd.sym_link = (char *)NULL;
510 				}
511 			}
512 			offset += hdlen;
513 		}
514 		fbrelse(fbp, S_READ);
515 	}
516 
517 	/*
518 	 * Got here for one of the following reasons:
519 	 *	1) outbuf is full (error == 0)
520 	 *	2) end of directory reached (error == 0)
521 	 *	3) error reading directory sector (error != 0)
522 	 *	4) directory entry crosses sector boundary (error == 0)
523 	 *
524 	 * If any directory entries have been copied, don't report
525 	 * case 4.  Instead, return the valid directory entries.
526 	 *
527 	 * If no entries have been copied, report the error.
528 	 * If case 4, this will be indistiguishable from EOF.
529 	 */
530 done:
531 	ndlen = ((char *)nd - outbuf);
532 	if (ndlen != 0) {
533 		error = uiomove(outbuf, (size_t)ndlen, UIO_READ, uiop);
534 		uiop->uio_loffset = offset;
535 	}
536 	kmem_free(dname, dname_size);
537 	kmem_free(outbuf, bufsize);
538 	if (eofp && error == 0)
539 		*eofp = (uiop->uio_loffset >= dirsiz);
540 	return (error);
541 }
542 
543 static int
544 hsfs_fid(struct vnode *vp, struct fid *fidp)
545 {
546 	struct hsnode *hp;
547 	struct hsfid *fid;
548 
549 	if (fidp->fid_len < (sizeof (*fid) - sizeof (fid->hf_len))) {
550 		fidp->fid_len = sizeof (*fid) - sizeof (fid->hf_len);
551 		return (ENOSPC);
552 	}
553 
554 	fid = (struct hsfid *)fidp;
555 	fid->hf_len = sizeof (*fid) - sizeof (fid->hf_len);
556 	hp = VTOH(vp);
557 	mutex_enter(&hp->hs_contents_lock);
558 	fid->hf_dir_lbn = hp->hs_dir_lbn;
559 	fid->hf_dir_off = (ushort_t)hp->hs_dir_off;
560 	fid->hf_ino = hp->hs_nodeid;
561 	mutex_exit(&hp->hs_contents_lock);
562 	return (0);
563 }
564 
565 /*ARGSUSED*/
566 static int
567 hsfs_open(struct vnode **vpp, int flag, struct cred *cred)
568 {
569 	return (0);
570 }
571 
572 /*ARGSUSED*/
573 static int
574 hsfs_close(
575 	struct vnode *vp,
576 	int flag,
577 	int count,
578 	offset_t offset,
579 	struct cred *cred)
580 {
581 	(void) cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
582 	cleanshares(vp, ttoproc(curthread)->p_pid);
583 	return (0);
584 }
585 
586 /*ARGSUSED2*/
587 static int
588 hsfs_access(struct vnode *vp, int mode, int flags, cred_t *cred)
589 {
590 	return (hs_access(vp, (mode_t)mode, cred));
591 }
592 
593 /*
594  * the seek time of a CD-ROM is very slow, and data transfer
595  * rate is even worse (max. 150K per sec).  The design
596  * decision is to reduce access to cd-rom as much as possible,
597  * and to transfer a sizable block (read-ahead) of data at a time.
598  * UFS style of read ahead one block at a time is not appropriate,
599  * and is not supported
600  */
601 
602 /*
603  * KLUSTSIZE should be a multiple of PAGESIZE and <= MAXPHYS.
604  */
605 #define	KLUSTSIZE	(56 * 1024)
606 /* we don't support read ahead */
607 int hsfs_lostpage;	/* no. of times we lost original page */
608 
609 /*
610  * Used to prevent biodone() from releasing buf resources that
611  * we didn't allocate in quite the usual way.
612  */
613 /*ARGSUSED*/
614 int
615 hsfs_iodone(struct buf *bp)
616 {
617 	sema_v(&bp->b_io);
618 	return (0);
619 }
620 
621 /*
622  * Each file may have a different interleaving on disk.  This makes
623  * things somewhat interesting.  The gist is that there are some
624  * number of contiguous data sectors, followed by some other number
625  * of contiguous skip sectors.  The sum of those two sets of sectors
626  * defines the interleave size.  Unfortunately, it means that we generally
627  * can't simply read N sectors starting at a given offset to satisfy
628  * any given request.
629  *
630  * What we do is get the relevant memory pages via pvn_read_kluster(),
631  * then stride through the interleaves, setting up a buf for each
632  * sector that needs to be brought in.  Instead of kmem_alloc'ing
633  * space for the sectors, though, we just point at the appropriate
634  * spot in the relevant page for each of them.  This saves us a bunch
635  * of copying.
636  */
637 /*ARGSUSED*/
638 static int
639 hsfs_getapage(
640 	struct vnode *vp,
641 	u_offset_t off,
642 	size_t len,
643 	uint_t *protp,
644 	struct page *pl[],
645 	size_t plsz,
646 	struct seg *seg,
647 	caddr_t addr,
648 	enum seg_rw rw,
649 	struct cred *cred)
650 {
651 	struct hsnode *hp;
652 	struct hsfs *fsp;
653 	int	err;
654 	struct buf *bufs;
655 	caddr_t *vas;
656 	caddr_t va;
657 	struct page *pp, *searchp, *lastp;
658 	page_t	*pagefound;
659 	offset_t	bof;
660 	struct vnode *devvp;
661 	ulong_t	byte_offset;
662 	size_t	io_len_tmp;
663 	uint_t	io_off, io_len;
664 	uint_t	xlen;
665 	uint_t	filsiz;
666 	uint_t	secsize;
667 	uint_t	bufcnt;
668 	uint_t	bufsused;
669 	uint_t	count;
670 	uint_t	io_end;
671 	uint_t	which_chunk_lbn;
672 	uint_t	offset_lbn;
673 	uint_t	offset_extra;
674 	offset_t	offset_bytes;
675 	uint_t	remaining_bytes;
676 	uint_t	extension;
677 	int	remainder;	/* must be signed */
678 	int	chunk_lbn_count;
679 	int	chunk_data_bytes;
680 	int	xarsiz;
681 	diskaddr_t driver_block;
682 	u_offset_t io_off_tmp;
683 
684 	/*
685 	 * We don't support asynchronous operation at the moment, so
686 	 * just pretend we did it.  If the pages are ever actually
687 	 * needed, they'll get brought in then.
688 	 */
689 	if (pl == NULL)
690 		return (0);
691 
692 	hp = VTOH(vp);
693 	fsp = VFS_TO_HSFS(vp->v_vfsp);
694 	devvp = fsp->hsfs_devvp;
695 	secsize = fsp->hsfs_vol.lbn_size;  /* bytes per logical block */
696 
697 	/* file data size */
698 	filsiz = hp->hs_dirent.ext_size;
699 
700 	/* disk addr for start of file */
701 	bof = LBN_TO_BYTE((offset_t)hp->hs_dirent.ext_lbn, vp->v_vfsp);
702 
703 	/* xarsiz byte must be skipped for data */
704 	xarsiz = hp->hs_dirent.xar_len << fsp->hsfs_vol.lbn_shift;
705 
706 	/* how many logical blocks in an interleave (data+skip) */
707 	chunk_lbn_count = hp->hs_dirent.intlf_sz + hp->hs_dirent.intlf_sk;
708 
709 	if (chunk_lbn_count == 0) {
710 		chunk_lbn_count = 1;
711 	}
712 
713 	/*
714 	 * Convert interleaving size into bytes.  The zero case
715 	 * (no interleaving) optimization is handled as a side-
716 	 * effect of the read-ahead logic.
717 	 */
718 	if (hp->hs_dirent.intlf_sz == 0) {
719 		chunk_data_bytes = LBN_TO_BYTE(1, vp->v_vfsp);
720 	} else {
721 		chunk_data_bytes = LBN_TO_BYTE(hp->hs_dirent.intlf_sz,
722 			vp->v_vfsp);
723 	}
724 
725 reread:
726 	err = 0;
727 	pagefound = 0;
728 
729 	/*
730 	 * Do some read-ahead.  This mostly saves us a bit of
731 	 * system cpu time more than anything else when doing
732 	 * sequential reads.  At some point, could do the
733 	 * read-ahead asynchronously which might gain us something
734 	 * on wall time, but it seems unlikely....
735 	 *
736 	 * We do the easy case here, which is to read through
737 	 * the end of the chunk, minus whatever's at the end that
738 	 * won't exactly fill a page.
739 	 */
740 	which_chunk_lbn = (off + len) / chunk_data_bytes;
741 	extension = ((which_chunk_lbn + 1) * chunk_data_bytes) - off;
742 	extension -= (extension % PAGESIZE);
743 	if (extension != 0 && extension < filsiz - off) {
744 		len = extension;
745 	} else {
746 		len = PAGESIZE;
747 	}
748 	/*
749 	 * Some cd writers don't write sectors that aren't used.  Also,
750 	 * there's no point in reading sectors we'll never look at.  So,
751 	 * if we're asked to go beyond the end of a file, truncate to the
752 	 * length of that file.
753 	 *
754 	 * Additionally, this behaviour is required by section 6.4.5 of
755 	 * ISO 9660:1988(E).
756 	 */
757 	if (len > (filsiz - off)) {
758 		len = filsiz - off;
759 	}
760 
761 	/* A little paranoia. */
762 	ASSERT(len > 0);
763 
764 	/*
765 	 * After all that, make sure we're asking for things in units
766 	 * that bdev_strategy() will understand (see bug 4202551).
767 	 */
768 	len = roundup(len, DEV_BSIZE);
769 
770 	pp = NULL;
771 again:
772 	/* search for page in buffer */
773 	if ((pagefound = page_exists(vp, off)) == 0) {
774 		/*
775 		 * Need to really do disk IO to get the page.
776 		 */
777 		pp = pvn_read_kluster(vp, off, seg, addr, &io_off_tmp,
778 		    &io_len_tmp, off, len, 0);
779 
780 		if (pp == NULL)
781 			goto again;
782 
783 		io_off = (uint_t)io_off_tmp;
784 		io_len = (uint_t)io_len_tmp;
785 
786 		/* check for truncation */
787 		/*
788 		 * xxx Clean up and return EIO instead?
789 		 * xxx Ought to go to u_offset_t for everything, but we
790 		 * xxx call lots of things that want uint_t arguments.
791 		 */
792 		ASSERT(io_off == io_off_tmp);
793 
794 		/*
795 		 * get enough buffers for worst-case scenario
796 		 * (i.e., no coalescing possible).
797 		 */
798 		bufcnt = (len + secsize - 1) / secsize;
799 		bufs = kmem_zalloc(bufcnt * sizeof (struct buf), KM_SLEEP);
800 		vas = kmem_alloc(bufcnt * sizeof (caddr_t), KM_SLEEP);
801 		for (count = 0; count < bufcnt; count++) {
802 			bufs[count].b_edev = devvp->v_rdev;
803 			bufs[count].b_dev = cmpdev(devvp->v_rdev);
804 			bufs[count].b_flags = B_NOCACHE|B_BUSY|B_READ;
805 			bufs[count].b_iodone = hsfs_iodone;
806 			bufs[count].b_vp = vp;
807 			bufs[count].b_file = vp;
808 			sema_init(&bufs[count].b_io, 0, NULL,
809 			    SEMA_DEFAULT, NULL);
810 			sema_init(&bufs[count].b_sem, 0, NULL,
811 			    SEMA_DEFAULT, NULL);
812 		}
813 
814 		/*
815 		 * If our filesize is not an integer multiple of PAGESIZE,
816 		 * we zero that part of the last page that's between EOF and
817 		 * the PAGESIZE boundary.
818 		 */
819 		xlen = io_len & PAGEOFFSET;
820 		if (xlen != 0)
821 			pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
822 
823 		va = NULL;
824 		lastp = NULL;
825 		searchp = pp;
826 		io_end = io_off + io_len;
827 		for (count = 0, byte_offset = io_off;
828 			byte_offset < io_end;
829 			count++) {
830 			ASSERT(count < bufcnt);
831 
832 			/* Compute disk address for interleaving. */
833 
834 			/* considered without skips */
835 			which_chunk_lbn = byte_offset / chunk_data_bytes;
836 
837 			/* factor in skips */
838 			offset_lbn = which_chunk_lbn * chunk_lbn_count;
839 
840 			/* convert to physical byte offset for lbn */
841 			offset_bytes = LBN_TO_BYTE(offset_lbn, vp->v_vfsp);
842 
843 			/* don't forget offset into lbn */
844 			offset_extra = byte_offset % chunk_data_bytes;
845 
846 			/* get virtual block number for driver */
847 			driver_block = lbtodb(bof + xarsiz
848 				+ offset_bytes + offset_extra);
849 
850 			if (lastp != searchp) {
851 				/* this branch taken first time through loop */
852 				va = vas[count]
853 					= ppmapin(searchp, PROT_WRITE,
854 						(caddr_t)-1);
855 				/* ppmapin() guarantees not to return NULL */
856 			} else {
857 				vas[count] = NULL;
858 			}
859 
860 			bufs[count].b_un.b_addr = va + byte_offset % PAGESIZE;
861 			bufs[count].b_offset =
862 			    (offset_t)(byte_offset - io_off + off);
863 
864 			/*
865 			 * We specifically use the b_lblkno member here
866 			 * as even in the 32 bit world driver_block can
867 			 * get very large in line with the ISO9660 spec.
868 			 */
869 
870 			bufs[count].b_lblkno = driver_block;
871 
872 			remaining_bytes = ((which_chunk_lbn + 1)
873 				* chunk_data_bytes)
874 				- byte_offset;
875 
876 			/*
877 			 * remaining_bytes can't be zero, as we derived
878 			 * which_chunk_lbn directly from byte_offset.
879 			 */
880 			if ((remaining_bytes + byte_offset) < (off + len)) {
881 				/* coalesce-read the rest of the chunk */
882 				bufs[count].b_bcount = remaining_bytes;
883 			} else {
884 				/* get the final bits */
885 				bufs[count].b_bcount = off + len - byte_offset;
886 			}
887 
888 			/*
889 			 * It would be nice to do multiple pages'
890 			 * worth at once here when the opportunity
891 			 * arises, as that has been shown to improve
892 			 * our wall time.  However, to do that
893 			 * requires that we use the pageio subsystem,
894 			 * which doesn't mix well with what we're
895 			 * already using here.  We can't use pageio
896 			 * all the time, because that subsystem
897 			 * assumes that a page is stored in N
898 			 * contiguous blocks on the device.
899 			 * Interleaving violates that assumption.
900 			 */
901 
902 			remainder = PAGESIZE - (byte_offset % PAGESIZE);
903 			if (bufs[count].b_bcount > remainder) {
904 				bufs[count].b_bcount = remainder;
905 			}
906 
907 			bufs[count].b_bufsize = bufs[count].b_bcount;
908 			if (((offset_t)byte_offset + bufs[count].b_bcount) >
909 				HS_MAXFILEOFF) {
910 				break;
911 			}
912 			byte_offset += bufs[count].b_bcount;
913 
914 			(void) bdev_strategy(&bufs[count]);
915 
916 			lwp_stat_update(LWP_STAT_INBLK, 1);
917 			lastp = searchp;
918 			if ((remainder - bufs[count].b_bcount) < 1) {
919 				searchp = searchp->p_next;
920 			}
921 		}
922 
923 		bufsused = count;
924 		/* Now wait for everything to come in */
925 		for (count = 0; count < bufsused; count++) {
926 			if (err == 0) {
927 				err = biowait(&bufs[count]);
928 			} else
929 				(void) biowait(&bufs[count]);
930 		}
931 
932 		/* Don't leak resources */
933 		for (count = 0; count < bufcnt; count++) {
934 			sema_destroy(&bufs[count].b_io);
935 			sema_destroy(&bufs[count].b_sem);
936 			if (count < bufsused && vas[count] != NULL) {
937 				ppmapout(vas[count]);
938 			}
939 		}
940 
941 		kmem_free(vas, bufcnt * sizeof (caddr_t));
942 		kmem_free(bufs, bufcnt * sizeof (struct buf));
943 	}
944 
945 	if (err) {
946 		pvn_read_done(pp, B_ERROR);
947 		return (err);
948 	}
949 
950 	/*
951 	 * Lock the requested page, and the one after it if possible.
952 	 * Don't bother if our caller hasn't given us a place to stash
953 	 * the page pointers, since otherwise we'd lock pages that would
954 	 * never get unlocked.
955 	 */
956 	if (pagefound) {
957 		int index;
958 		ulong_t soff;
959 
960 		/*
961 		 * Make sure it's in memory before we say it's here.
962 		 */
963 		if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) {
964 			hsfs_lostpage++;
965 			goto reread;
966 		}
967 
968 		pl[0] = pp;
969 		index = 1;
970 
971 		/*
972 		 * Try to lock the next page, if it exists, without
973 		 * blocking.
974 		 */
975 		plsz -= PAGESIZE;
976 		/* LINTED (plsz is unsigned) */
977 		for (soff = off + PAGESIZE; plsz > 0;
978 		    soff += PAGESIZE, plsz -= PAGESIZE) {
979 			pp = page_lookup_nowait(vp, (u_offset_t)soff,
980 					SE_SHARED);
981 			if (pp == NULL)
982 				break;
983 			pl[index++] = pp;
984 		}
985 		pl[index] = NULL;
986 		return (0);
987 	}
988 
989 	if (pp != NULL) {
990 		pvn_plist_init(pp, pl, plsz, off, io_len, rw);
991 	}
992 
993 	return (err);
994 }
995 
996 static int
997 hsfs_getpage(
998 	struct vnode *vp,
999 	offset_t off,
1000 	size_t len,
1001 	uint_t *protp,
1002 	struct page *pl[],
1003 	size_t plsz,
1004 	struct seg *seg,
1005 	caddr_t addr,
1006 	enum seg_rw rw,
1007 	struct cred *cred)
1008 {
1009 	int err;
1010 	uint_t filsiz;
1011 	struct hsnode *hp = VTOH(vp);
1012 
1013 	/* does not support write */
1014 	if (rw == S_WRITE) {
1015 		panic("write attempt on READ ONLY HSFS");
1016 		/*NOTREACHED*/
1017 	}
1018 
1019 	if (vp->v_flag & VNOMAP) {
1020 		return (ENOSYS);
1021 	}
1022 
1023 	ASSERT(off <= HS_MAXFILEOFF);
1024 
1025 	/*
1026 	 * Determine file data size for EOF check.
1027 	 */
1028 	filsiz = hp->hs_dirent.ext_size;
1029 	if ((off + len) > (offset_t)(filsiz + PAGEOFFSET) && seg != segkmap)
1030 		return (EFAULT);	/* beyond EOF */
1031 
1032 	if (protp != NULL)
1033 		*protp = PROT_ALL;
1034 
1035 	if (len <= PAGESIZE)
1036 		err = hsfs_getapage(vp, (u_offset_t)off, len, protp, pl, plsz,
1037 		    seg, addr, rw, cred);
1038 	else
1039 		err = pvn_getpages(hsfs_getapage, vp, off, len, protp,
1040 		    pl, plsz, seg, addr, rw, cred);
1041 
1042 	return (err);
1043 }
1044 
1045 
1046 
1047 /*
1048  * This function should never be called. We need to have it to pass
1049  * it as an argument to other functions.
1050  */
1051 /*ARGSUSED*/
1052 int
1053 hsfs_putapage(
1054 	vnode_t		*vp,
1055 	page_t		*pp,
1056 	u_offset_t	*offp,
1057 	size_t		*lenp,
1058 	int		flags,
1059 	cred_t		*cr)
1060 {
1061 	/* should never happen - just destroy it */
1062 	cmn_err(CE_NOTE, "hsfs_putapage: dirty HSFS page");
1063 	pvn_write_done(pp, B_ERROR | B_WRITE | B_INVAL | B_FORCE | flags);
1064 	return (0);
1065 }
1066 
1067 
1068 /*
1069  * The only flags we support are B_INVAL, B_FREE and B_DONTNEED.
1070  * B_INVAL is set by:
1071  *
1072  *	1) the MC_SYNC command of memcntl(2) to support the MS_INVALIDATE flag.
1073  *	2) the MC_ADVISE command of memcntl(2) with the MADV_DONTNEED advice
1074  *	   which translates to an MC_SYNC with the MS_INVALIDATE flag.
1075  *
1076  * The B_FREE (as well as the B_DONTNEED) flag is set when the
1077  * MADV_SEQUENTIAL advice has been used. VOP_PUTPAGE is invoked
1078  * from SEGVN to release pages behind a pagefault.
1079  */
1080 /*ARGSUSED*/
1081 static int
1082 hsfs_putpage(
1083 	struct vnode	*vp,
1084 	offset_t	off,
1085 	size_t		len,
1086 	int		flags,
1087 	struct cred	*cr)
1088 {
1089 	int error = 0;
1090 
1091 	if (vp->v_count == 0) {
1092 		panic("hsfs_putpage: bad v_count");
1093 		/*NOTREACHED*/
1094 	}
1095 
1096 	if (vp->v_flag & VNOMAP)
1097 		return (ENOSYS);
1098 
1099 	ASSERT(off <= HS_MAXFILEOFF);
1100 
1101 	if (!vn_has_cached_data(vp))	/* no pages mapped */
1102 		return (0);
1103 
1104 	if (len == 0)		/* from 'off' to EOF */
1105 		error = pvn_vplist_dirty(vp, off,
1106 					hsfs_putapage, flags, cr);
1107 	else {
1108 		offset_t end_off = off + len;
1109 		offset_t file_size = VTOH(vp)->hs_dirent.ext_size;
1110 		offset_t io_off;
1111 
1112 		file_size = (file_size + PAGESIZE - 1) & PAGEMASK;
1113 		if (end_off > file_size)
1114 			end_off = file_size;
1115 
1116 		for (io_off = off; io_off < end_off; io_off += PAGESIZE) {
1117 			page_t *pp;
1118 
1119 			/*
1120 			 * We insist on getting the page only if we are
1121 			 * about to invalidate, free or write it and
1122 			 * the B_ASYNC flag is not set.
1123 			 */
1124 			if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
1125 				pp = page_lookup(vp, io_off,
1126 					(flags & (B_INVAL | B_FREE)) ?
1127 					    SE_EXCL : SE_SHARED);
1128 			} else {
1129 				pp = page_lookup_nowait(vp, io_off,
1130 					(flags & B_FREE) ? SE_EXCL : SE_SHARED);
1131 			}
1132 
1133 			if (pp == NULL)
1134 				continue;
1135 			/*
1136 			 * Normally pvn_getdirty() should return 0, which
1137 			 * impies that it has done the job for us.
1138 			 * The shouldn't-happen scenario is when it returns 1.
1139 			 * This means that the page has been modified and
1140 			 * needs to be put back.
1141 			 * Since we can't write on a CD, we fake a failed
1142 			 * I/O and force pvn_write_done() to destroy the page.
1143 			 */
1144 			if (pvn_getdirty(pp, flags) == 1) {
1145 				cmn_err(CE_NOTE,
1146 					"hsfs_putpage: dirty HSFS page");
1147 				pvn_write_done(pp, flags |
1148 				    B_ERROR | B_WRITE | B_INVAL | B_FORCE);
1149 			}
1150 		}
1151 	}
1152 	return (error);
1153 }
1154 
1155 
1156 /*ARGSUSED*/
1157 static int
1158 hsfs_map(
1159 	struct vnode *vp,
1160 	offset_t off,
1161 	struct as *as,
1162 	caddr_t *addrp,
1163 	size_t len,
1164 	uchar_t prot,
1165 	uchar_t maxprot,
1166 	uint_t flags,
1167 	struct cred *cred)
1168 {
1169 	struct segvn_crargs vn_a;
1170 	int error;
1171 
1172 	/* VFS_RECORD(vp->v_vfsp, VS_MAP, VS_CALL); */
1173 
1174 	if (vp->v_flag & VNOMAP)
1175 		return (ENOSYS);
1176 
1177 	if (off > HS_MAXFILEOFF || off < 0 ||
1178 	    (off + len) < 0 || (off + len) > HS_MAXFILEOFF)
1179 		return (ENXIO);
1180 
1181 	if (vp->v_type != VREG) {
1182 		return (ENODEV);
1183 	}
1184 
1185 	/*
1186 	 * If file is being locked, disallow mapping.
1187 	 */
1188 	if (vn_has_mandatory_locks(vp, VTOH(vp)->hs_dirent.mode))
1189 		return (EAGAIN);
1190 
1191 	as_rangelock(as);
1192 
1193 	if ((flags & MAP_FIXED) == 0) {
1194 		map_addr(addrp, len, off, 1, flags);
1195 		if (*addrp == NULL) {
1196 			as_rangeunlock(as);
1197 			return (ENOMEM);
1198 		}
1199 	} else {
1200 		/*
1201 		 * User specified address - blow away any previous mappings
1202 		 */
1203 		(void) as_unmap(as, *addrp, len);
1204 	}
1205 
1206 	vn_a.vp = vp;
1207 	vn_a.offset = off;
1208 	vn_a.type = flags & MAP_TYPE;
1209 	vn_a.prot = prot;
1210 	vn_a.maxprot = maxprot;
1211 	vn_a.flags = flags & ~MAP_TYPE;
1212 	vn_a.cred = cred;
1213 	vn_a.amp = NULL;
1214 	vn_a.szc = 0;
1215 	vn_a.lgrp_mem_policy_flags = 0;
1216 
1217 	error = as_map(as, *addrp, len, segvn_create, &vn_a);
1218 	as_rangeunlock(as);
1219 	return (error);
1220 }
1221 
1222 /* ARGSUSED */
1223 static int
1224 hsfs_addmap(
1225 	struct vnode *vp,
1226 	offset_t off,
1227 	struct as *as,
1228 	caddr_t addr,
1229 	size_t len,
1230 	uchar_t prot,
1231 	uchar_t maxprot,
1232 	uint_t flags,
1233 	struct cred *cr)
1234 {
1235 	struct hsnode *hp;
1236 
1237 	if (vp->v_flag & VNOMAP)
1238 		return (ENOSYS);
1239 
1240 	hp = VTOH(vp);
1241 	mutex_enter(&hp->hs_contents_lock);
1242 	hp->hs_mapcnt += btopr(len);
1243 	mutex_exit(&hp->hs_contents_lock);
1244 	return (0);
1245 }
1246 
1247 /*ARGSUSED*/
1248 static int
1249 hsfs_delmap(
1250 	struct vnode *vp,
1251 	offset_t off,
1252 	struct as *as,
1253 	caddr_t addr,
1254 	size_t len,
1255 	uint_t prot,
1256 	uint_t maxprot,
1257 	uint_t flags,
1258 	struct cred *cr)
1259 {
1260 	struct hsnode *hp;
1261 
1262 	if (vp->v_flag & VNOMAP)
1263 		return (ENOSYS);
1264 
1265 	hp = VTOH(vp);
1266 	mutex_enter(&hp->hs_contents_lock);
1267 	hp->hs_mapcnt -= btopr(len);	/* Count released mappings */
1268 	ASSERT(hp->hs_mapcnt >= 0);
1269 	mutex_exit(&hp->hs_contents_lock);
1270 	return (0);
1271 }
1272 
1273 /* ARGSUSED */
1274 static int
1275 hsfs_seek(struct vnode *vp, offset_t ooff, offset_t *noffp)
1276 {
1277 	return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
1278 }
1279 
1280 /* ARGSUSED */
1281 static int
1282 hsfs_frlock(
1283 	struct vnode *vp,
1284 	int cmd,
1285 	struct flock64 *bfp,
1286 	int flag,
1287 	offset_t offset,
1288 	struct flk_callback *flk_cbp,
1289 	cred_t *cr)
1290 {
1291 	struct hsnode *hp = VTOH(vp);
1292 
1293 	/*
1294 	 * If the file is being mapped, disallow fs_frlock.
1295 	 * We are not holding the hs_contents_lock while checking
1296 	 * hs_mapcnt because the current locking strategy drops all
1297 	 * locks before calling fs_frlock.
1298 	 * So, hs_mapcnt could change before we enter fs_frlock making
1299 	 * it meaningless to have held hs_contents_lock in the first place.
1300 	 */
1301 	if (hp->hs_mapcnt > 0 && MANDLOCK(vp, hp->hs_dirent.mode))
1302 		return (EAGAIN);
1303 
1304 	return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr));
1305 }
1306 
1307 /* ARGSUSED */
1308 static int
1309 hsfs_pathconf(struct vnode *vp, int cmd, ulong_t *valp, struct cred *cr)
1310 {
1311 	struct hsfs	*fsp;
1312 
1313 	int		error = 0;
1314 
1315 	switch (cmd) {
1316 
1317 	case _PC_NAME_MAX:
1318 		fsp = VFS_TO_HSFS(vp->v_vfsp);
1319 		*valp = fsp->hsfs_namemax;
1320 		break;
1321 
1322 	case _PC_FILESIZEBITS:
1323 		*valp = 33;	/* Without multi extent support: 4 GB - 2k */
1324 		break;
1325 
1326 	default:
1327 		error = fs_pathconf(vp, cmd, valp, cr);
1328 	}
1329 
1330 	return (error);
1331 }
1332 
1333 
1334 
1335 const fs_operation_def_t hsfs_vnodeops_template[] = {
1336 	VOPNAME_OPEN,		{ .vop_open = hsfs_open },
1337 	VOPNAME_CLOSE,		{ .vop_close = hsfs_close },
1338 	VOPNAME_READ,		{ .vop_read = hsfs_read },
1339 	VOPNAME_GETATTR,	{ .vop_getattr = hsfs_getattr },
1340 	VOPNAME_ACCESS,		{ .vop_access = hsfs_access },
1341 	VOPNAME_LOOKUP,		{ .vop_lookup = hsfs_lookup },
1342 	VOPNAME_READDIR,	{ .vop_readdir = hsfs_readdir },
1343 	VOPNAME_READLINK,	{ .vop_readlink = hsfs_readlink },
1344 	VOPNAME_FSYNC,		{ .vop_fsync = hsfs_fsync },
1345 	VOPNAME_INACTIVE,	{ .vop_inactive = hsfs_inactive },
1346 	VOPNAME_FID,		{ .vop_fid = hsfs_fid },
1347 	VOPNAME_SEEK,		{ .vop_seek = hsfs_seek },
1348 	VOPNAME_FRLOCK,		{ .vop_frlock = hsfs_frlock },
1349 	VOPNAME_GETPAGE,	{ .vop_getpage = hsfs_getpage },
1350 	VOPNAME_PUTPAGE,	{ .vop_putpage = hsfs_putpage },
1351 	VOPNAME_MAP,		{ .vop_map = hsfs_map },
1352 	VOPNAME_ADDMAP,		{ .vop_addmap = hsfs_addmap },
1353 	VOPNAME_DELMAP,		{ .vop_delmap = hsfs_delmap },
1354 	VOPNAME_PATHCONF,	{ .vop_pathconf = hsfs_pathconf },
1355 	NULL,			NULL
1356 };
1357 
1358 struct vnodeops *hsfs_vnodeops;
1359