xref: /titanic_44/usr/src/uts/common/fs/hsfs/hsfs_vnops.c (revision a192e900f6d2b0e1a822e3252c0dfd795ed49d76)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Vnode operations for the High Sierra filesystem
30  */
31 
32 #include <sys/types.h>
33 #include <sys/t_lock.h>
34 #include <sys/param.h>
35 #include <sys/time.h>
36 #include <sys/systm.h>
37 #include <sys/sysmacros.h>
38 #include <sys/resource.h>
39 #include <sys/signal.h>
40 #include <sys/cred.h>
41 #include <sys/user.h>
42 #include <sys/buf.h>
43 #include <sys/vfs.h>
44 #include <sys/stat.h>
45 #include <sys/vnode.h>
46 #include <sys/mode.h>
47 #include <sys/proc.h>
48 #include <sys/disp.h>
49 #include <sys/file.h>
50 #include <sys/fcntl.h>
51 #include <sys/flock.h>
52 #include <sys/kmem.h>
53 #include <sys/uio.h>
54 #include <sys/conf.h>
55 #include <sys/errno.h>
56 #include <sys/mman.h>
57 #include <sys/pathname.h>
58 #include <sys/debug.h>
59 #include <sys/vmsystm.h>
60 #include <sys/cmn_err.h>
61 #include <sys/fbuf.h>
62 #include <sys/dirent.h>
63 #include <sys/errno.h>
64 
65 #include <vm/hat.h>
66 #include <vm/page.h>
67 #include <vm/pvn.h>
68 #include <vm/as.h>
69 #include <vm/seg.h>
70 #include <vm/seg_map.h>
71 #include <vm/seg_kmem.h>
72 #include <vm/seg_vn.h>
73 #include <vm/rm.h>
74 #include <vm/page.h>
75 #include <sys/swap.h>
76 
77 #include <sys/fs/hsfs_spec.h>
78 #include <sys/fs/hsfs_node.h>
79 #include <sys/fs/hsfs_impl.h>
80 #include <sys/fs/hsfs_susp.h>
81 #include <sys/fs/hsfs_rrip.h>
82 
83 #include <fs/fs_subr.h>
84 
85 /* ARGSUSED */
86 static int
87 hsfs_fsync(vnode_t *cp, int syncflag, cred_t *cred)
88 {
89 	return (0);
90 }
91 
92 
93 /*ARGSUSED*/
94 static int
95 hsfs_read(struct vnode *vp, struct uio *uiop, int ioflag, struct cred *cred,
96 	struct caller_context *ct)
97 {
98 	caddr_t base;
99 	offset_t diff;
100 	int error;
101 	struct hsnode *hp;
102 	uint_t filesize;
103 
104 	hp = VTOH(vp);
105 	/*
106 	 * if vp is of type VDIR, make sure dirent
107 	 * is filled up with all info (because of ptbl)
108 	 */
109 	if (vp->v_type == VDIR) {
110 		if (hp->hs_dirent.ext_size == 0)
111 			hs_filldirent(vp, &hp->hs_dirent);
112 	}
113 	filesize = hp->hs_dirent.ext_size;
114 
115 	/* Sanity checks. */
116 	if (uiop->uio_resid == 0 ||		/* No data wanted. */
117 	    uiop->uio_loffset > HS_MAXFILEOFF ||	/* Offset too big. */
118 	    uiop->uio_loffset >= filesize)	/* Past EOF. */
119 		return (0);
120 
121 	do {
122 		/*
123 		 * We want to ask for only the "right" amount of data.
124 		 * In this case that means:-
125 		 *
126 		 * We can't get data from beyond our EOF. If asked,
127 		 * we will give a short read.
128 		 *
129 		 * segmap_getmapflt returns buffers of MAXBSIZE bytes.
130 		 * These buffers are always MAXBSIZE aligned.
131 		 * If our starting offset is not MAXBSIZE aligned,
132 		 * we can only ask for less than MAXBSIZE bytes.
133 		 *
134 		 * If our requested offset and length are such that
135 		 * they belong in different MAXBSIZE aligned slots
136 		 * then we'll be making more than one call on
137 		 * segmap_getmapflt.
138 		 *
139 		 * This diagram shows the variables we use and their
140 		 * relationships.
141 		 *
142 		 * |<-----MAXBSIZE----->|
143 		 * +--------------------------...+
144 		 * |.....mapon->|<--n-->|....*...|EOF
145 		 * +--------------------------...+
146 		 * uio_loffset->|
147 		 * uio_resid....|<---------->|
148 		 * diff.........|<-------------->|
149 		 *
150 		 * So, in this case our offset is not aligned
151 		 * and our request takes us outside of the
152 		 * MAXBSIZE window. We will break this up into
153 		 * two segmap_getmapflt calls.
154 		 */
155 		size_t nbytes;
156 		offset_t mapon;
157 		size_t n;
158 		uint_t flags;
159 
160 		mapon = uiop->uio_loffset & MAXBOFFSET;
161 		diff = filesize - uiop->uio_loffset;
162 		nbytes = (size_t)MIN(MAXBSIZE - mapon, uiop->uio_resid);
163 		n = MIN(diff, nbytes);
164 		if (n <= 0) {
165 			/* EOF or request satisfied. */
166 			return (0);
167 		}
168 
169 		base = segmap_getmapflt(segkmap, vp,
170 		    (u_offset_t)uiop->uio_loffset, n, 1, S_READ);
171 
172 		error = uiomove(base + mapon, n, UIO_READ, uiop);
173 
174 		if (error == 0) {
175 			/*
176 			 * if read a whole block, or read to eof,
177 			 *  won't need this buffer again soon.
178 			 */
179 			if (n + mapon == MAXBSIZE ||
180 			    uiop->uio_loffset == filesize)
181 				flags = SM_DONTNEED;
182 			else
183 				flags = 0;
184 			error = segmap_release(segkmap, base, flags);
185 		} else
186 			(void) segmap_release(segkmap, base, 0);
187 	} while (error == 0 && uiop->uio_resid > 0);
188 
189 	return (error);
190 }
191 
192 /*ARGSUSED2*/
193 static int
194 hsfs_getattr(
195 	struct vnode *vp,
196 	struct vattr *vap,
197 	int flags,
198 	struct cred *cred)
199 {
200 	struct hsnode *hp;
201 	struct vfs *vfsp;
202 	struct hsfs *fsp;
203 
204 	hp = VTOH(vp);
205 	fsp = VFS_TO_HSFS(vp->v_vfsp);
206 	vfsp = vp->v_vfsp;
207 
208 	if ((hp->hs_dirent.ext_size == 0) && (vp->v_type == VDIR)) {
209 		hs_filldirent(vp, &hp->hs_dirent);
210 	}
211 	vap->va_type = IFTOVT(hp->hs_dirent.mode);
212 	vap->va_mode = hp->hs_dirent.mode;
213 	vap->va_uid = hp->hs_dirent.uid;
214 	vap->va_gid = hp->hs_dirent.gid;
215 
216 	vap->va_fsid = vfsp->vfs_dev;
217 	vap->va_nodeid = (ino64_t)hp->hs_nodeid;
218 	vap->va_nlink = hp->hs_dirent.nlink;
219 	vap->va_size =	(offset_t)hp->hs_dirent.ext_size;
220 
221 	vap->va_atime.tv_sec = hp->hs_dirent.adate.tv_sec;
222 	vap->va_atime.tv_nsec = hp->hs_dirent.adate.tv_usec*1000;
223 	vap->va_mtime.tv_sec = hp->hs_dirent.mdate.tv_sec;
224 	vap->va_mtime.tv_nsec = hp->hs_dirent.mdate.tv_usec*1000;
225 	vap->va_ctime.tv_sec = hp->hs_dirent.cdate.tv_sec;
226 	vap->va_ctime.tv_nsec = hp->hs_dirent.cdate.tv_usec*1000;
227 	if (vp->v_type == VCHR || vp->v_type == VBLK)
228 		vap->va_rdev = hp->hs_dirent.r_dev;
229 	else
230 		vap->va_rdev = 0;
231 	vap->va_blksize = vfsp->vfs_bsize;
232 	/* no. of blocks = no. of data blocks + no. of xar blocks */
233 	vap->va_nblocks = (fsblkcnt64_t)howmany(vap->va_size + (u_longlong_t)
234 	    (hp->hs_dirent.xar_len << fsp->hsfs_vol.lbn_shift), DEV_BSIZE);
235 	vap->va_seq = hp->hs_seq;
236 	return (0);
237 }
238 
239 /*ARGSUSED*/
240 static int
241 hsfs_readlink(struct vnode *vp, struct uio *uiop, struct cred *cred)
242 {
243 	struct hsnode *hp;
244 
245 	if (vp->v_type != VLNK)
246 		return (EINVAL);
247 
248 	hp = VTOH(vp);
249 
250 	if (hp->hs_dirent.sym_link == (char *)NULL)
251 		return (ENOENT);
252 
253 	return (uiomove(hp->hs_dirent.sym_link,
254 	    (size_t)MIN(hp->hs_dirent.ext_size,
255 	    uiop->uio_resid), UIO_READ, uiop));
256 }
257 
258 /*ARGSUSED*/
259 static void
260 hsfs_inactive(struct vnode *vp, struct cred *cred)
261 {
262 	struct hsnode *hp;
263 	struct hsfs *fsp;
264 
265 	int nopage;
266 
267 	hp = VTOH(vp);
268 	fsp = VFS_TO_HSFS(vp->v_vfsp);
269 	/*
270 	 * Note: acquiring and holding v_lock for quite a while
271 	 * here serializes on the vnode; this is unfortunate, but
272 	 * likely not to overly impact performance, as the underlying
273 	 * device (CDROM drive) is quite slow.
274 	 */
275 	rw_enter(&fsp->hsfs_hash_lock, RW_WRITER);
276 	mutex_enter(&hp->hs_contents_lock);
277 	mutex_enter(&vp->v_lock);
278 
279 	if (vp->v_count < 1) {
280 		panic("hsfs_inactive: v_count < 1");
281 		/*NOTREACHED*/
282 	}
283 
284 	if (vp->v_count > 1 || (hp->hs_flags & HREF) == 0) {
285 		vp->v_count--;	/* release hold from vn_rele */
286 		mutex_exit(&vp->v_lock);
287 		mutex_exit(&hp->hs_contents_lock);
288 		rw_exit(&fsp->hsfs_hash_lock);
289 		return;
290 	}
291 	vp->v_count--;	/* release hold from vn_rele */
292 	if (vp->v_count == 0) {
293 		/*
294 		 * Free the hsnode.
295 		 * If there are no pages associated with the
296 		 * hsnode, give it back to the kmem_cache,
297 		 * else put at the end of this file system's
298 		 * internal free list.
299 		 */
300 		nopage = !vn_has_cached_data(vp);
301 		hp->hs_flags = 0;
302 		/*
303 		 * exit these locks now, since hs_freenode may
304 		 * kmem_free the hsnode and embedded vnode
305 		 */
306 		mutex_exit(&vp->v_lock);
307 		mutex_exit(&hp->hs_contents_lock);
308 		hs_freenode(vp, fsp, nopage);
309 	} else {
310 		mutex_exit(&vp->v_lock);
311 		mutex_exit(&hp->hs_contents_lock);
312 	}
313 	rw_exit(&fsp->hsfs_hash_lock);
314 }
315 
316 
317 /*ARGSUSED*/
318 static int
319 hsfs_lookup(
320 	struct vnode *dvp,
321 	char *nm,
322 	struct vnode **vpp,
323 	struct pathname *pnp,
324 	int flags,
325 	struct vnode *rdir,
326 	struct cred *cred)
327 {
328 	int error;
329 	int namelen = (int)strlen(nm);
330 
331 	if (*nm == '\0') {
332 		VN_HOLD(dvp);
333 		*vpp = dvp;
334 		return (0);
335 	}
336 
337 	/*
338 	 * If we're looking for ourself, life is simple.
339 	 */
340 	if (namelen == 1 && *nm == '.') {
341 		if (error = hs_access(dvp, (mode_t)VEXEC, cred))
342 			return (error);
343 		VN_HOLD(dvp);
344 		*vpp = dvp;
345 		return (0);
346 	}
347 
348 	return (hs_dirlook(dvp, nm, namelen, vpp, cred));
349 }
350 
351 
352 /*ARGSUSED*/
353 static int
354 hsfs_readdir(
355 	struct vnode	*vp,
356 	struct uio	*uiop,
357 	struct cred	*cred,
358 	int		*eofp)
359 {
360 	struct hsnode	*dhp;
361 	struct hsfs	*fsp;
362 	struct hs_direntry hd;
363 	struct dirent64	*nd;
364 	int		error;
365 	uint_t		offset;		/* real offset in directory */
366 	uint_t		dirsiz;		/* real size of directory */
367 	uchar_t		*blkp;
368 	int		hdlen;		/* length of hs directory entry */
369 	long		ndlen;		/* length of dirent entry */
370 	int		bytes_wanted;
371 	size_t		bufsize;	/* size of dirent buffer */
372 	char		*outbuf;	/* ptr to dirent buffer */
373 	char		*dname;
374 	int		dnamelen;
375 	size_t		dname_size;
376 	struct fbuf	*fbp;
377 	uint_t		last_offset;	/* last index into current dir block */
378 	ulong_t		dir_lbn;	/* lbn of directory */
379 	ino64_t		dirino;	/* temporary storage before storing in dirent */
380 	off_t		diroff;
381 
382 	dhp = VTOH(vp);
383 	fsp = VFS_TO_HSFS(vp->v_vfsp);
384 	if (dhp->hs_dirent.ext_size == 0)
385 		hs_filldirent(vp, &dhp->hs_dirent);
386 	dirsiz = dhp->hs_dirent.ext_size;
387 	dir_lbn = dhp->hs_dirent.ext_lbn;
388 	if (uiop->uio_loffset >= dirsiz) {	/* at or beyond EOF */
389 		if (eofp)
390 			*eofp = 1;
391 		return (0);
392 	}
393 	ASSERT(uiop->uio_loffset <= HS_MAXFILEOFF);
394 	offset = uiop->uio_loffset;
395 
396 	dname_size = fsp->hsfs_namemax + 1;	/* 1 for the ending NUL */
397 	dname = kmem_alloc(dname_size, KM_SLEEP);
398 	bufsize = uiop->uio_resid + sizeof (struct dirent64);
399 
400 	outbuf = kmem_alloc(bufsize, KM_SLEEP);
401 	nd = (struct dirent64 *)outbuf;
402 
403 	while (offset < dirsiz) {
404 		bytes_wanted = MIN(MAXBSIZE, dirsiz - (offset & MAXBMASK));
405 
406 		error = fbread(vp, (offset_t)(offset & MAXBMASK),
407 			(unsigned int)bytes_wanted, S_READ, &fbp);
408 		if (error)
409 			goto done;
410 
411 		blkp = (uchar_t *)fbp->fb_addr;
412 		last_offset = (offset & MAXBMASK) + fbp->fb_count;
413 
414 #define	rel_offset(offset) ((offset) & MAXBOFFSET)	/* index into blkp */
415 
416 		while (offset < last_offset) {
417 			/*
418 			 * Very similar validation code is found in
419 			 * process_dirblock(), hsfs_node.c.
420 			 * For an explanation, see there.
421 			 * It may make sense for the future to
422 			 * "consolidate" the code in hs_parsedir(),
423 			 * process_dirblock() and hsfs_readdir() into
424 			 * a single utility function.
425 			 */
426 			hdlen = (int)((uchar_t)
427 				HDE_DIR_LEN(&blkp[rel_offset(offset)]));
428 			if (hdlen < HDE_ROOT_DIR_REC_SIZE ||
429 			    offset + hdlen > last_offset) {
430 				/*
431 				 * advance to next sector boundary
432 				 */
433 				offset = roundup(offset + 1, HS_SECTOR_SIZE);
434 				if (hdlen)
435 					hs_log_bogus_disk_warning(fsp,
436 					    HSFS_ERR_TRAILING_JUNK, 0);
437 
438 				continue;
439 			}
440 
441 			bzero(&hd, sizeof (hd));
442 
443 			/*
444 			 * Just ignore invalid directory entries.
445 			 * XXX - maybe hs_parsedir() will detect EXISTENCE bit
446 			 */
447 			if (!hs_parsedir(fsp, &blkp[rel_offset(offset)],
448 				&hd, dname, &dnamelen,
449 					last_offset - rel_offset(offset))) {
450 				/*
451 				 * Determine if there is enough room
452 				 */
453 				ndlen = (long)DIRENT64_RECLEN((dnamelen));
454 
455 				if ((ndlen + ((char *)nd - outbuf)) >
456 				    uiop->uio_resid) {
457 					fbrelse(fbp, S_READ);
458 					goto done; /* output buffer full */
459 				}
460 
461 				diroff = offset + hdlen;
462 				/*
463 				 * Generate nodeid.
464 				 * If a directory, nodeid points to the
465 				 * canonical dirent describing the directory:
466 				 * the dirent of the "." entry for the
467 				 * directory, which is pointed to by all
468 				 * dirents for that directory.
469 				 * Otherwise, nodeid points to dirent of file.
470 				 */
471 				if (hd.type == VDIR) {
472 					dirino = (ino64_t)
473 					    MAKE_NODEID(hd.ext_lbn, 0,
474 					    vp->v_vfsp);
475 				} else {
476 					struct hs_volume *hvp;
477 					offset_t lbn, off;
478 
479 					/*
480 					 * Normalize lbn and off
481 					 */
482 					hvp = &fsp->hsfs_vol;
483 					lbn = dir_lbn +
484 					    (offset >> hvp->lbn_shift);
485 					off = offset & hvp->lbn_maxoffset;
486 					dirino = (ino64_t)MAKE_NODEID(lbn,
487 					    off, vp->v_vfsp);
488 				}
489 
490 
491 				/* strncpy(9f) will zero uninitialized bytes */
492 
493 				ASSERT(strlen(dname) + 1 <=
494 				    DIRENT64_NAMELEN(ndlen));
495 				(void) strncpy(nd->d_name, dname,
496 				    DIRENT64_NAMELEN(ndlen));
497 				nd->d_reclen = (ushort_t)ndlen;
498 				nd->d_off = (offset_t)diroff;
499 				nd->d_ino = dirino;
500 				nd = (struct dirent64 *)((char *)nd + ndlen);
501 
502 				/*
503 				 * free up space allocated for symlink
504 				 */
505 				if (hd.sym_link != (char *)NULL) {
506 					kmem_free(hd.sym_link,
507 					    (size_t)(hd.ext_size+1));
508 					hd.sym_link = (char *)NULL;
509 				}
510 			}
511 			offset += hdlen;
512 		}
513 		fbrelse(fbp, S_READ);
514 	}
515 
516 	/*
517 	 * Got here for one of the following reasons:
518 	 *	1) outbuf is full (error == 0)
519 	 *	2) end of directory reached (error == 0)
520 	 *	3) error reading directory sector (error != 0)
521 	 *	4) directory entry crosses sector boundary (error == 0)
522 	 *
523 	 * If any directory entries have been copied, don't report
524 	 * case 4.  Instead, return the valid directory entries.
525 	 *
526 	 * If no entries have been copied, report the error.
527 	 * If case 4, this will be indistiguishable from EOF.
528 	 */
529 done:
530 	ndlen = ((char *)nd - outbuf);
531 	if (ndlen != 0) {
532 		error = uiomove(outbuf, (size_t)ndlen, UIO_READ, uiop);
533 		uiop->uio_loffset = offset;
534 	}
535 	kmem_free(dname, dname_size);
536 	kmem_free(outbuf, bufsize);
537 	if (eofp && error == 0)
538 		*eofp = (uiop->uio_loffset >= dirsiz);
539 	return (error);
540 }
541 
542 static int
543 hsfs_fid(struct vnode *vp, struct fid *fidp)
544 {
545 	struct hsnode *hp;
546 	struct hsfid *fid;
547 
548 	if (fidp->fid_len < (sizeof (*fid) - sizeof (fid->hf_len))) {
549 		fidp->fid_len = sizeof (*fid) - sizeof (fid->hf_len);
550 		return (ENOSPC);
551 	}
552 
553 	fid = (struct hsfid *)fidp;
554 	fid->hf_len = sizeof (*fid) - sizeof (fid->hf_len);
555 	hp = VTOH(vp);
556 	mutex_enter(&hp->hs_contents_lock);
557 	fid->hf_dir_lbn = hp->hs_dir_lbn;
558 	fid->hf_dir_off = (ushort_t)hp->hs_dir_off;
559 	mutex_exit(&hp->hs_contents_lock);
560 	return (0);
561 }
562 
563 /*ARGSUSED*/
564 static int
565 hsfs_open(struct vnode **vpp, int flag, struct cred *cred)
566 {
567 	return (0);
568 }
569 
570 /*ARGSUSED*/
571 static int
572 hsfs_close(
573 	struct vnode *vp,
574 	int flag,
575 	int count,
576 	offset_t offset,
577 	struct cred *cred)
578 {
579 	(void) cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
580 	cleanshares(vp, ttoproc(curthread)->p_pid);
581 	return (0);
582 }
583 
584 /*ARGSUSED2*/
585 static int
586 hsfs_access(struct vnode *vp, int mode, int flags, cred_t *cred)
587 {
588 	return (hs_access(vp, (mode_t)mode, cred));
589 }
590 
591 /*
592  * the seek time of a CD-ROM is very slow, and data transfer
593  * rate is even worse (max. 150K per sec).  The design
594  * decision is to reduce access to cd-rom as much as possible,
595  * and to transfer a sizable block (read-ahead) of data at a time.
596  * UFS style of read ahead one block at a time is not appropriate,
597  * and is not supported
598  */
599 
600 /*
601  * KLUSTSIZE should be a multiple of PAGESIZE and <= MAXPHYS.
602  */
603 #define	KLUSTSIZE	(56 * 1024)
604 /* we don't support read ahead */
605 int hsfs_lostpage;	/* no. of times we lost original page */
606 
607 /*
608  * Used to prevent biodone() from releasing buf resources that
609  * we didn't allocate in quite the usual way.
610  */
611 /*ARGSUSED*/
612 int
613 hsfs_iodone(struct buf *bp)
614 {
615 	sema_v(&bp->b_io);
616 	return (0);
617 }
618 
619 /*
620  * Each file may have a different interleaving on disk.  This makes
621  * things somewhat interesting.  The gist is that there are some
622  * number of contiguous data sectors, followed by some other number
623  * of contiguous skip sectors.  The sum of those two sets of sectors
624  * defines the interleave size.  Unfortunately, it means that we generally
625  * can't simply read N sectors starting at a given offset to satisfy
626  * any given request.
627  *
628  * What we do is get the relevant memory pages via pvn_read_kluster(),
629  * then stride through the interleaves, setting up a buf for each
630  * sector that needs to be brought in.  Instead of kmem_alloc'ing
631  * space for the sectors, though, we just point at the appropriate
632  * spot in the relevant page for each of them.  This saves us a bunch
633  * of copying.
634  */
635 /*ARGSUSED*/
636 static int
637 hsfs_getapage(
638 	struct vnode *vp,
639 	u_offset_t off,
640 	size_t len,
641 	uint_t *protp,
642 	struct page *pl[],
643 	size_t plsz,
644 	struct seg *seg,
645 	caddr_t addr,
646 	enum seg_rw rw,
647 	struct cred *cred)
648 {
649 	struct hsnode *hp;
650 	struct hsfs *fsp;
651 	int	err;
652 	struct buf *bufs;
653 	caddr_t *vas;
654 	caddr_t va;
655 	struct page *pp, *searchp, *lastp;
656 	page_t	*pagefound;
657 	offset_t	bof;
658 	struct vnode *devvp;
659 	ulong_t	byte_offset;
660 	size_t	io_len_tmp;
661 	uint_t	io_off, io_len;
662 	uint_t	xlen;
663 	uint_t	filsiz;
664 	uint_t	secsize;
665 	uint_t	bufcnt;
666 	uint_t	bufsused;
667 	uint_t	count;
668 	uint_t	io_end;
669 	uint_t	which_chunk_lbn;
670 	uint_t	offset_lbn;
671 	uint_t	offset_extra;
672 	offset_t	offset_bytes;
673 	uint_t	remaining_bytes;
674 	uint_t	extension;
675 	int	remainder;	/* must be signed */
676 	int	chunk_lbn_count;
677 	int	chunk_data_bytes;
678 	int	xarsiz;
679 	diskaddr_t driver_block;
680 	u_offset_t io_off_tmp;
681 
682 	/*
683 	 * We don't support asynchronous operation at the moment, so
684 	 * just pretend we did it.  If the pages are ever actually
685 	 * needed, they'll get brought in then.
686 	 */
687 	if (pl == NULL)
688 		return (0);
689 
690 	hp = VTOH(vp);
691 	fsp = VFS_TO_HSFS(vp->v_vfsp);
692 	devvp = fsp->hsfs_devvp;
693 	secsize = fsp->hsfs_vol.lbn_size;  /* bytes per logical block */
694 
695 	/* file data size */
696 	filsiz = hp->hs_dirent.ext_size;
697 
698 	/* disk addr for start of file */
699 	bof = LBN_TO_BYTE((offset_t)hp->hs_dirent.ext_lbn, vp->v_vfsp);
700 
701 	/* xarsiz byte must be skipped for data */
702 	xarsiz = hp->hs_dirent.xar_len << fsp->hsfs_vol.lbn_shift;
703 
704 	/* how many logical blocks in an interleave (data+skip) */
705 	chunk_lbn_count = hp->hs_dirent.intlf_sz + hp->hs_dirent.intlf_sk;
706 
707 	if (chunk_lbn_count == 0) {
708 		chunk_lbn_count = 1;
709 	}
710 
711 	/*
712 	 * Convert interleaving size into bytes.  The zero case
713 	 * (no interleaving) optimization is handled as a side-
714 	 * effect of the read-ahead logic.
715 	 */
716 	if (hp->hs_dirent.intlf_sz == 0) {
717 		chunk_data_bytes = LBN_TO_BYTE(1, vp->v_vfsp);
718 	} else {
719 		chunk_data_bytes = LBN_TO_BYTE(hp->hs_dirent.intlf_sz,
720 			vp->v_vfsp);
721 	}
722 
723 reread:
724 	err = 0;
725 	pagefound = 0;
726 
727 	/*
728 	 * Do some read-ahead.  This mostly saves us a bit of
729 	 * system cpu time more than anything else when doing
730 	 * sequential reads.  At some point, could do the
731 	 * read-ahead asynchronously which might gain us something
732 	 * on wall time, but it seems unlikely....
733 	 *
734 	 * We do the easy case here, which is to read through
735 	 * the end of the chunk, minus whatever's at the end that
736 	 * won't exactly fill a page.
737 	 */
738 	which_chunk_lbn = (off + len) / chunk_data_bytes;
739 	extension = ((which_chunk_lbn + 1) * chunk_data_bytes) - off;
740 	extension -= (extension % PAGESIZE);
741 	if (extension != 0 && extension < filsiz - off) {
742 		len = extension;
743 	} else {
744 		len = PAGESIZE;
745 	}
746 	/*
747 	 * Some cd writers don't write sectors that aren't used.  Also,
748 	 * there's no point in reading sectors we'll never look at.  So,
749 	 * if we're asked to go beyond the end of a file, truncate to the
750 	 * length of that file.
751 	 *
752 	 * Additionally, this behaviour is required by section 6.4.5 of
753 	 * ISO 9660:1988(E).
754 	 */
755 	if (len > (filsiz - off)) {
756 		len = filsiz - off;
757 	}
758 
759 	/* A little paranoia. */
760 	ASSERT(len > 0);
761 
762 	/*
763 	 * After all that, make sure we're asking for things in units
764 	 * that bdev_strategy() will understand (see bug 4202551).
765 	 */
766 	len = roundup(len, DEV_BSIZE);
767 
768 	pp = NULL;
769 again:
770 	/* search for page in buffer */
771 	if ((pagefound = page_exists(vp, off)) == 0) {
772 		/*
773 		 * Need to really do disk IO to get the page.
774 		 */
775 		pp = pvn_read_kluster(vp, off, seg, addr, &io_off_tmp,
776 		    &io_len_tmp, off, len, 0);
777 
778 		if (pp == NULL)
779 			goto again;
780 
781 		io_off = (uint_t)io_off_tmp;
782 		io_len = (uint_t)io_len_tmp;
783 
784 		/* check for truncation */
785 		/*
786 		 * xxx Clean up and return EIO instead?
787 		 * xxx Ought to go to u_offset_t for everything, but we
788 		 * xxx call lots of things that want uint_t arguments.
789 		 */
790 		ASSERT(io_off == io_off_tmp);
791 
792 		/*
793 		 * get enough buffers for worst-case scenario
794 		 * (i.e., no coalescing possible).
795 		 */
796 		bufcnt = (len + secsize - 1) / secsize;
797 		bufs = kmem_zalloc(bufcnt * sizeof (struct buf), KM_SLEEP);
798 		vas = kmem_alloc(bufcnt * sizeof (caddr_t), KM_SLEEP);
799 		for (count = 0; count < bufcnt; count++) {
800 			bufs[count].b_edev = devvp->v_rdev;
801 			bufs[count].b_dev = cmpdev(devvp->v_rdev);
802 			bufs[count].b_flags = B_NOCACHE|B_BUSY|B_READ;
803 			bufs[count].b_iodone = hsfs_iodone;
804 			bufs[count].b_vp = vp;
805 			bufs[count].b_file = vp;
806 			sema_init(&bufs[count].b_io, 0, NULL,
807 			    SEMA_DEFAULT, NULL);
808 			sema_init(&bufs[count].b_sem, 0, NULL,
809 			    SEMA_DEFAULT, NULL);
810 		}
811 
812 		/*
813 		 * If our filesize is not an integer multiple of PAGESIZE,
814 		 * we zero that part of the last page that's between EOF and
815 		 * the PAGESIZE boundary.
816 		 */
817 		xlen = io_len & PAGEOFFSET;
818 		if (xlen != 0)
819 			pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
820 
821 		va = NULL;
822 		lastp = NULL;
823 		searchp = pp;
824 		io_end = io_off + io_len;
825 		for (count = 0, byte_offset = io_off;
826 			byte_offset < io_end;
827 			count++) {
828 			ASSERT(count < bufcnt);
829 
830 			/* Compute disk address for interleaving. */
831 
832 			/* considered without skips */
833 			which_chunk_lbn = byte_offset / chunk_data_bytes;
834 
835 			/* factor in skips */
836 			offset_lbn = which_chunk_lbn * chunk_lbn_count;
837 
838 			/* convert to physical byte offset for lbn */
839 			offset_bytes = LBN_TO_BYTE(offset_lbn, vp->v_vfsp);
840 
841 			/* don't forget offset into lbn */
842 			offset_extra = byte_offset % chunk_data_bytes;
843 
844 			/* get virtual block number for driver */
845 			driver_block = lbtodb(bof + xarsiz
846 				+ offset_bytes + offset_extra);
847 
848 			if (lastp != searchp) {
849 				/* this branch taken first time through loop */
850 				va = vas[count]
851 					= ppmapin(searchp, PROT_WRITE,
852 						(caddr_t)-1);
853 				/* ppmapin() guarantees not to return NULL */
854 			} else {
855 				vas[count] = NULL;
856 			}
857 
858 			bufs[count].b_un.b_addr = va + byte_offset % PAGESIZE;
859 			bufs[count].b_offset =
860 			    (offset_t)(byte_offset - io_off + off);
861 
862 			/*
863 			 * We specifically use the b_lblkno member here
864 			 * as even in the 32 bit world driver_block can
865 			 * get very large in line with the ISO9660 spec.
866 			 */
867 
868 			bufs[count].b_lblkno = driver_block;
869 
870 			remaining_bytes = ((which_chunk_lbn + 1)
871 				* chunk_data_bytes)
872 				- byte_offset;
873 
874 			/*
875 			 * remaining_bytes can't be zero, as we derived
876 			 * which_chunk_lbn directly from byte_offset.
877 			 */
878 			if ((remaining_bytes + byte_offset) < (off + len)) {
879 				/* coalesce-read the rest of the chunk */
880 				bufs[count].b_bcount = remaining_bytes;
881 			} else {
882 				/* get the final bits */
883 				bufs[count].b_bcount = off + len - byte_offset;
884 			}
885 
886 			/*
887 			 * It would be nice to do multiple pages'
888 			 * worth at once here when the opportunity
889 			 * arises, as that has been shown to improve
890 			 * our wall time.  However, to do that
891 			 * requires that we use the pageio subsystem,
892 			 * which doesn't mix well with what we're
893 			 * already using here.  We can't use pageio
894 			 * all the time, because that subsystem
895 			 * assumes that a page is stored in N
896 			 * contiguous blocks on the device.
897 			 * Interleaving violates that assumption.
898 			 */
899 
900 			remainder = PAGESIZE - (byte_offset % PAGESIZE);
901 			if (bufs[count].b_bcount > remainder) {
902 				bufs[count].b_bcount = remainder;
903 			}
904 
905 			bufs[count].b_bufsize = bufs[count].b_bcount;
906 			if (((offset_t)byte_offset + bufs[count].b_bcount) >
907 				HS_MAXFILEOFF) {
908 				break;
909 			}
910 			byte_offset += bufs[count].b_bcount;
911 
912 			(void) bdev_strategy(&bufs[count]);
913 
914 			lwp_stat_update(LWP_STAT_INBLK, 1);
915 			lastp = searchp;
916 			if ((remainder - bufs[count].b_bcount) < 1) {
917 				searchp = searchp->p_next;
918 			}
919 		}
920 
921 		bufsused = count;
922 		/* Now wait for everything to come in */
923 		for (count = 0; count < bufsused; count++) {
924 			if (err == 0) {
925 				err = biowait(&bufs[count]);
926 			} else
927 				(void) biowait(&bufs[count]);
928 		}
929 
930 		/* Don't leak resources */
931 		for (count = 0; count < bufcnt; count++) {
932 			sema_destroy(&bufs[count].b_io);
933 			sema_destroy(&bufs[count].b_sem);
934 			if (count < bufsused && vas[count] != NULL) {
935 				ppmapout(vas[count]);
936 			}
937 		}
938 
939 		kmem_free(vas, bufcnt * sizeof (caddr_t));
940 		kmem_free(bufs, bufcnt * sizeof (struct buf));
941 	}
942 
943 	if (err) {
944 		pvn_read_done(pp, B_ERROR);
945 		return (err);
946 	}
947 
948 	/*
949 	 * Lock the requested page, and the one after it if possible.
950 	 * Don't bother if our caller hasn't given us a place to stash
951 	 * the page pointers, since otherwise we'd lock pages that would
952 	 * never get unlocked.
953 	 */
954 	if (pagefound) {
955 		int index;
956 		ulong_t soff;
957 
958 		/*
959 		 * Make sure it's in memory before we say it's here.
960 		 */
961 		if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) {
962 			hsfs_lostpage++;
963 			goto reread;
964 		}
965 
966 		pl[0] = pp;
967 		index = 1;
968 
969 		/*
970 		 * Try to lock the next page, if it exists, without
971 		 * blocking.
972 		 */
973 		plsz -= PAGESIZE;
974 		/* LINTED (plsz is unsigned) */
975 		for (soff = off + PAGESIZE; plsz > 0;
976 		    soff += PAGESIZE, plsz -= PAGESIZE) {
977 			pp = page_lookup_nowait(vp, (u_offset_t)soff,
978 					SE_SHARED);
979 			if (pp == NULL)
980 				break;
981 			pl[index++] = pp;
982 		}
983 		pl[index] = NULL;
984 		return (0);
985 	}
986 
987 	if (pp != NULL) {
988 		pvn_plist_init(pp, pl, plsz, off, io_len, rw);
989 	}
990 
991 	return (err);
992 }
993 
994 static int
995 hsfs_getpage(
996 	struct vnode *vp,
997 	offset_t off,
998 	size_t len,
999 	uint_t *protp,
1000 	struct page *pl[],
1001 	size_t plsz,
1002 	struct seg *seg,
1003 	caddr_t addr,
1004 	enum seg_rw rw,
1005 	struct cred *cred)
1006 {
1007 	int err;
1008 	uint_t filsiz;
1009 	struct hsnode *hp = VTOH(vp);
1010 
1011 	/* does not support write */
1012 	if (rw == S_WRITE) {
1013 		panic("write attempt on READ ONLY HSFS");
1014 		/*NOTREACHED*/
1015 	}
1016 
1017 	if (vp->v_flag & VNOMAP) {
1018 		return (ENOSYS);
1019 	}
1020 
1021 	ASSERT(off <= HS_MAXFILEOFF);
1022 
1023 	/*
1024 	 * Determine file data size for EOF check.
1025 	 */
1026 	filsiz = hp->hs_dirent.ext_size;
1027 	if ((off + len) > (offset_t)(filsiz + PAGEOFFSET) && seg != segkmap)
1028 		return (EFAULT);	/* beyond EOF */
1029 
1030 	if (protp != NULL)
1031 		*protp = PROT_ALL;
1032 
1033 	if (len <= PAGESIZE)
1034 		err = hsfs_getapage(vp, (u_offset_t)off, len, protp, pl, plsz,
1035 		    seg, addr, rw, cred);
1036 	else
1037 		err = pvn_getpages(hsfs_getapage, vp, off, len, protp,
1038 		    pl, plsz, seg, addr, rw, cred);
1039 
1040 	return (err);
1041 }
1042 
1043 
1044 
1045 /*
1046  * This function should never be called. We need to have it to pass
1047  * it as an argument to other functions.
1048  */
1049 /*ARGSUSED*/
1050 int
1051 hsfs_putapage(
1052 	vnode_t		*vp,
1053 	page_t		*pp,
1054 	u_offset_t	*offp,
1055 	size_t		*lenp,
1056 	int		flags,
1057 	cred_t		*cr)
1058 {
1059 	/* should never happen - just destroy it */
1060 	cmn_err(CE_NOTE, "hsfs_putapage: dirty HSFS page");
1061 	pvn_write_done(pp, B_ERROR | B_WRITE | B_INVAL | B_FORCE | flags);
1062 	return (0);
1063 }
1064 
1065 
1066 /*
1067  * The only flags we support are B_INVAL, B_FREE and B_DONTNEED.
1068  * B_INVAL is set by:
1069  *
1070  *	1) the MC_SYNC command of memcntl(2) to support the MS_INVALIDATE flag.
1071  *	2) the MC_ADVISE command of memcntl(2) with the MADV_DONTNEED advice
1072  *	   which translates to an MC_SYNC with the MS_INVALIDATE flag.
1073  *
1074  * The B_FREE (as well as the B_DONTNEED) flag is set when the
1075  * MADV_SEQUENTIAL advice has been used. VOP_PUTPAGE is invoked
1076  * from SEGVN to release pages behind a pagefault.
1077  */
1078 /*ARGSUSED*/
1079 static int
1080 hsfs_putpage(
1081 	struct vnode	*vp,
1082 	offset_t	off,
1083 	size_t		len,
1084 	int		flags,
1085 	struct cred	*cr)
1086 {
1087 	int error = 0;
1088 
1089 	if (vp->v_count == 0) {
1090 		panic("hsfs_putpage: bad v_count");
1091 		/*NOTREACHED*/
1092 	}
1093 
1094 	if (vp->v_flag & VNOMAP)
1095 		return (ENOSYS);
1096 
1097 	ASSERT(off <= HS_MAXFILEOFF);
1098 
1099 	if (!vn_has_cached_data(vp))	/* no pages mapped */
1100 		return (0);
1101 
1102 	if (len == 0)		/* from 'off' to EOF */
1103 		error = pvn_vplist_dirty(vp, off,
1104 					hsfs_putapage, flags, cr);
1105 	else {
1106 		offset_t end_off = off + len;
1107 		offset_t file_size = VTOH(vp)->hs_dirent.ext_size;
1108 		offset_t io_off;
1109 
1110 		file_size = (file_size + PAGESIZE - 1) & PAGEMASK;
1111 		if (end_off > file_size)
1112 			end_off = file_size;
1113 
1114 		for (io_off = off; io_off < end_off; io_off += PAGESIZE) {
1115 			page_t *pp;
1116 
1117 			/*
1118 			 * We insist on getting the page only if we are
1119 			 * about to invalidate, free or write it and
1120 			 * the B_ASYNC flag is not set.
1121 			 */
1122 			if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
1123 				pp = page_lookup(vp, io_off,
1124 					(flags & (B_INVAL | B_FREE)) ?
1125 					    SE_EXCL : SE_SHARED);
1126 			} else {
1127 				pp = page_lookup_nowait(vp, io_off,
1128 					(flags & B_FREE) ? SE_EXCL : SE_SHARED);
1129 			}
1130 
1131 			if (pp == NULL)
1132 				continue;
1133 			/*
1134 			 * Normally pvn_getdirty() should return 0, which
1135 			 * impies that it has done the job for us.
1136 			 * The shouldn't-happen scenario is when it returns 1.
1137 			 * This means that the page has been modified and
1138 			 * needs to be put back.
1139 			 * Since we can't write on a CD, we fake a failed
1140 			 * I/O and force pvn_write_done() to destroy the page.
1141 			 */
1142 			if (pvn_getdirty(pp, flags) == 1) {
1143 				cmn_err(CE_NOTE,
1144 					"hsfs_putpage: dirty HSFS page");
1145 				pvn_write_done(pp, flags |
1146 				    B_ERROR | B_WRITE | B_INVAL | B_FORCE);
1147 			}
1148 		}
1149 	}
1150 	return (error);
1151 }
1152 
1153 
1154 /*ARGSUSED*/
1155 static int
1156 hsfs_map(
1157 	struct vnode *vp,
1158 	offset_t off,
1159 	struct as *as,
1160 	caddr_t *addrp,
1161 	size_t len,
1162 	uchar_t prot,
1163 	uchar_t maxprot,
1164 	uint_t flags,
1165 	struct cred *cred)
1166 {
1167 	struct segvn_crargs vn_a;
1168 	int error;
1169 
1170 	/* VFS_RECORD(vp->v_vfsp, VS_MAP, VS_CALL); */
1171 
1172 	if (vp->v_flag & VNOMAP)
1173 		return (ENOSYS);
1174 
1175 	if (off > HS_MAXFILEOFF || off < 0 ||
1176 	    (off + len) < 0 || (off + len) > HS_MAXFILEOFF)
1177 		return (ENXIO);
1178 
1179 	if (vp->v_type != VREG) {
1180 		return (ENODEV);
1181 	}
1182 
1183 	/*
1184 	 * If file is being locked, disallow mapping.
1185 	 */
1186 	if (vn_has_mandatory_locks(vp, VTOH(vp)->hs_dirent.mode))
1187 		return (EAGAIN);
1188 
1189 	as_rangelock(as);
1190 
1191 	if ((flags & MAP_FIXED) == 0) {
1192 		map_addr(addrp, len, off, 1, flags);
1193 		if (*addrp == NULL) {
1194 			as_rangeunlock(as);
1195 			return (ENOMEM);
1196 		}
1197 	} else {
1198 		/*
1199 		 * User specified address - blow away any previous mappings
1200 		 */
1201 		(void) as_unmap(as, *addrp, len);
1202 	}
1203 
1204 	vn_a.vp = vp;
1205 	vn_a.offset = off;
1206 	vn_a.type = flags & MAP_TYPE;
1207 	vn_a.prot = prot;
1208 	vn_a.maxprot = maxprot;
1209 	vn_a.flags = flags & ~MAP_TYPE;
1210 	vn_a.cred = cred;
1211 	vn_a.amp = NULL;
1212 	vn_a.szc = 0;
1213 	vn_a.lgrp_mem_policy_flags = 0;
1214 
1215 	error = as_map(as, *addrp, len, segvn_create, &vn_a);
1216 	as_rangeunlock(as);
1217 	return (error);
1218 }
1219 
1220 /* ARGSUSED */
1221 static int
1222 hsfs_addmap(
1223 	struct vnode *vp,
1224 	offset_t off,
1225 	struct as *as,
1226 	caddr_t addr,
1227 	size_t len,
1228 	uchar_t prot,
1229 	uchar_t maxprot,
1230 	uint_t flags,
1231 	struct cred *cr)
1232 {
1233 	struct hsnode *hp;
1234 
1235 	if (vp->v_flag & VNOMAP)
1236 		return (ENOSYS);
1237 
1238 	hp = VTOH(vp);
1239 	mutex_enter(&hp->hs_contents_lock);
1240 	hp->hs_mapcnt += btopr(len);
1241 	mutex_exit(&hp->hs_contents_lock);
1242 	return (0);
1243 }
1244 
1245 /*ARGSUSED*/
1246 static int
1247 hsfs_delmap(
1248 	struct vnode *vp,
1249 	offset_t off,
1250 	struct as *as,
1251 	caddr_t addr,
1252 	size_t len,
1253 	uint_t prot,
1254 	uint_t maxprot,
1255 	uint_t flags,
1256 	struct cred *cr)
1257 {
1258 	struct hsnode *hp;
1259 
1260 	if (vp->v_flag & VNOMAP)
1261 		return (ENOSYS);
1262 
1263 	hp = VTOH(vp);
1264 	mutex_enter(&hp->hs_contents_lock);
1265 	hp->hs_mapcnt -= btopr(len);	/* Count released mappings */
1266 	ASSERT(hp->hs_mapcnt >= 0);
1267 	mutex_exit(&hp->hs_contents_lock);
1268 	return (0);
1269 }
1270 
1271 /* ARGSUSED */
1272 static int
1273 hsfs_seek(struct vnode *vp, offset_t ooff, offset_t *noffp)
1274 {
1275 	return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
1276 }
1277 
1278 /* ARGSUSED */
1279 static int
1280 hsfs_frlock(
1281 	struct vnode *vp,
1282 	int cmd,
1283 	struct flock64 *bfp,
1284 	int flag,
1285 	offset_t offset,
1286 	struct flk_callback *flk_cbp,
1287 	cred_t *cr)
1288 {
1289 	struct hsnode *hp = VTOH(vp);
1290 
1291 	/*
1292 	 * If the file is being mapped, disallow fs_frlock.
1293 	 * We are not holding the hs_contents_lock while checking
1294 	 * hs_mapcnt because the current locking strategy drops all
1295 	 * locks before calling fs_frlock.
1296 	 * So, hs_mapcnt could change before we enter fs_frlock making
1297 	 * it meaningless to have held hs_contents_lock in the first place.
1298 	 */
1299 	if (hp->hs_mapcnt > 0 && MANDLOCK(vp, hp->hs_dirent.mode))
1300 		return (EAGAIN);
1301 
1302 	return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr));
1303 }
1304 
1305 /* ARGSUSED */
1306 static int
1307 hsfs_pathconf(struct vnode *vp, int cmd, ulong_t *valp, struct cred *cr)
1308 {
1309 	struct hsfs	*fsp;
1310 
1311 	int		error = 0;
1312 
1313 	switch (cmd) {
1314 
1315 	case _PC_NAME_MAX:
1316 		fsp = VFS_TO_HSFS(vp->v_vfsp);
1317 		*valp = fsp->hsfs_namemax;
1318 		break;
1319 
1320 	case _PC_FILESIZEBITS:
1321 		*valp = 33;	/* Without multi extent support: 4 GB - 2k */
1322 		break;
1323 
1324 	default:
1325 		error = fs_pathconf(vp, cmd, valp, cr);
1326 	}
1327 
1328 	return (error);
1329 }
1330 
1331 
1332 
1333 const fs_operation_def_t hsfs_vnodeops_template[] = {
1334 	VOPNAME_OPEN, hsfs_open,
1335 	VOPNAME_CLOSE, hsfs_close,
1336 	VOPNAME_READ, hsfs_read,
1337 	VOPNAME_GETATTR, hsfs_getattr,
1338 	VOPNAME_ACCESS, hsfs_access,
1339 	VOPNAME_LOOKUP, hsfs_lookup,
1340 	VOPNAME_READDIR, hsfs_readdir,
1341 	VOPNAME_READLINK, hsfs_readlink,
1342 	VOPNAME_FSYNC, hsfs_fsync,
1343 	VOPNAME_INACTIVE, (fs_generic_func_p) hsfs_inactive,
1344 	VOPNAME_FID, hsfs_fid,
1345 	VOPNAME_SEEK, hsfs_seek,
1346 	VOPNAME_FRLOCK, hsfs_frlock,
1347 	VOPNAME_GETPAGE, hsfs_getpage,
1348 	VOPNAME_PUTPAGE, hsfs_putpage,
1349 	VOPNAME_MAP, (fs_generic_func_p) hsfs_map,
1350 	VOPNAME_ADDMAP, (fs_generic_func_p) hsfs_addmap,
1351 	VOPNAME_DELMAP, hsfs_delmap,
1352 	VOPNAME_PATHCONF, hsfs_pathconf,
1353 	NULL, NULL
1354 };
1355 
1356 struct vnodeops *hsfs_vnodeops;
1357