1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2017 by Delphix. All rights reserved.
27 */
28
29 /*
30 * Vnode operations for the High Sierra filesystem
31 */
32
33 #include <sys/types.h>
34 #include <sys/t_lock.h>
35 #include <sys/param.h>
36 #include <sys/time.h>
37 #include <sys/systm.h>
38 #include <sys/sysmacros.h>
39 #include <sys/resource.h>
40 #include <sys/signal.h>
41 #include <sys/cred.h>
42 #include <sys/user.h>
43 #include <sys/buf.h>
44 #include <sys/vfs.h>
45 #include <sys/vfs_opreg.h>
46 #include <sys/stat.h>
47 #include <sys/vnode.h>
48 #include <sys/mode.h>
49 #include <sys/proc.h>
50 #include <sys/disp.h>
51 #include <sys/file.h>
52 #include <sys/fcntl.h>
53 #include <sys/flock.h>
54 #include <sys/kmem.h>
55 #include <sys/uio.h>
56 #include <sys/conf.h>
57 #include <sys/errno.h>
58 #include <sys/mman.h>
59 #include <sys/pathname.h>
60 #include <sys/debug.h>
61 #include <sys/vmsystm.h>
62 #include <sys/cmn_err.h>
63 #include <sys/fbuf.h>
64 #include <sys/dirent.h>
65 #include <sys/errno.h>
66 #include <sys/dkio.h>
67 #include <sys/cmn_err.h>
68 #include <sys/atomic.h>
69
70 #include <vm/hat.h>
71 #include <vm/page.h>
72 #include <vm/pvn.h>
73 #include <vm/as.h>
74 #include <vm/seg.h>
75 #include <vm/seg_map.h>
76 #include <vm/seg_kmem.h>
77 #include <vm/seg_vn.h>
78 #include <vm/rm.h>
79 #include <vm/page.h>
80 #include <sys/swap.h>
81 #include <sys/avl.h>
82 #include <sys/sunldi.h>
83 #include <sys/ddi.h>
84 #include <sys/sunddi.h>
85 #include <sys/sdt.h>
86
87 /*
88 * For struct modlinkage
89 */
90 #include <sys/modctl.h>
91
92 #include <sys/fs/hsfs_spec.h>
93 #include <sys/fs/hsfs_node.h>
94 #include <sys/fs/hsfs_impl.h>
95 #include <sys/fs/hsfs_susp.h>
96 #include <sys/fs/hsfs_rrip.h>
97
98 #include <fs/fs_subr.h>
99
100 /* # of contiguous requests to detect sequential access pattern */
101 static int seq_contig_requests = 2;
102
103 /*
104 * This is the max number os taskq threads that will be created
105 * if required. Since we are using a Dynamic TaskQ by default only
106 * one thread is created initially.
107 *
108 * NOTE: In the usual hsfs use case this per fs instance number
109 * of taskq threads should not place any undue load on a system.
110 * Even on an unusual system with say 100 CDROM drives, 800 threads
111 * will not be created unless all the drives are loaded and all
112 * of them are saturated with I/O at the same time! If there is at
113 * all a complaint of system load due to such an unusual case it
114 * should be easy enough to change to one per-machine Dynamic TaskQ
115 * for all hsfs mounts with a nthreads of say 32.
116 */
117 static int hsfs_taskq_nthreads = 8; /* # of taskq threads per fs */
118
119 /* Min count of adjacent bufs that will avoid buf coalescing */
120 static int hsched_coalesce_min = 2;
121
122 /*
123 * Kmem caches for heavily used small allocations. Using these kmem
124 * caches provides a factor of 3 reduction in system time and greatly
125 * aids overall throughput esp. on SPARC.
126 */
127 struct kmem_cache *hio_cache;
128 struct kmem_cache *hio_info_cache;
129
130 /*
131 * This tunable allows us to ignore inode numbers from rrip-1.12.
132 * In this case, we fall back to our default inode algorithm.
133 */
134 extern int use_rrip_inodes;
135
136 static int hsched_deadline_compare(const void *x1, const void *x2);
137 static int hsched_offset_compare(const void *x1, const void *x2);
138 static void hsched_enqueue_io(struct hsfs *fsp, struct hio *hsio, int ra);
139 int hsched_invoke_strategy(struct hsfs *fsp);
140
141 /* ARGSUSED */
142 static int
hsfs_fsync(vnode_t * cp,int syncflag,cred_t * cred,caller_context_t * ct)143 hsfs_fsync(vnode_t *cp, int syncflag, cred_t *cred, caller_context_t *ct)
144 {
145 return (0);
146 }
147
148
149 /*ARGSUSED*/
150 static int
hsfs_read(struct vnode * vp,struct uio * uiop,int ioflag,struct cred * cred,struct caller_context * ct)151 hsfs_read(struct vnode *vp, struct uio *uiop, int ioflag, struct cred *cred,
152 struct caller_context *ct)
153 {
154 caddr_t base;
155 offset_t diff;
156 int error;
157 struct hsnode *hp;
158 uint_t filesize;
159
160 hp = VTOH(vp);
161 /*
162 * if vp is of type VDIR, make sure dirent
163 * is filled up with all info (because of ptbl)
164 */
165 if (vp->v_type == VDIR) {
166 if (hp->hs_dirent.ext_size == 0)
167 hs_filldirent(vp, &hp->hs_dirent);
168 }
169 filesize = hp->hs_dirent.ext_size;
170
171 /* Sanity checks. */
172 if (uiop->uio_resid == 0 || /* No data wanted. */
173 uiop->uio_loffset > HS_MAXFILEOFF || /* Offset too big. */
174 uiop->uio_loffset >= filesize) /* Past EOF. */
175 return (0);
176
177 do {
178 /*
179 * We want to ask for only the "right" amount of data.
180 * In this case that means:-
181 *
182 * We can't get data from beyond our EOF. If asked,
183 * we will give a short read.
184 *
185 * segmap_getmapflt returns buffers of MAXBSIZE bytes.
186 * These buffers are always MAXBSIZE aligned.
187 * If our starting offset is not MAXBSIZE aligned,
188 * we can only ask for less than MAXBSIZE bytes.
189 *
190 * If our requested offset and length are such that
191 * they belong in different MAXBSIZE aligned slots
192 * then we'll be making more than one call on
193 * segmap_getmapflt.
194 *
195 * This diagram shows the variables we use and their
196 * relationships.
197 *
198 * |<-----MAXBSIZE----->|
199 * +--------------------------...+
200 * |.....mapon->|<--n-->|....*...|EOF
201 * +--------------------------...+
202 * uio_loffset->|
203 * uio_resid....|<---------->|
204 * diff.........|<-------------->|
205 *
206 * So, in this case our offset is not aligned
207 * and our request takes us outside of the
208 * MAXBSIZE window. We will break this up into
209 * two segmap_getmapflt calls.
210 */
211 size_t nbytes;
212 offset_t mapon;
213 size_t n;
214 uint_t flags;
215
216 mapon = uiop->uio_loffset & MAXBOFFSET;
217 diff = filesize - uiop->uio_loffset;
218 nbytes = (size_t)MIN(MAXBSIZE - mapon, uiop->uio_resid);
219 n = MIN(diff, nbytes);
220 if (n <= 0) {
221 /* EOF or request satisfied. */
222 return (0);
223 }
224
225 base = segmap_getmapflt(segkmap, vp,
226 (u_offset_t)uiop->uio_loffset, n, 1, S_READ);
227
228 error = uiomove(base + mapon, n, UIO_READ, uiop);
229
230 if (error == 0) {
231 /*
232 * if read a whole block, or read to eof,
233 * won't need this buffer again soon.
234 */
235 if (n + mapon == MAXBSIZE ||
236 uiop->uio_loffset == filesize)
237 flags = SM_DONTNEED;
238 else
239 flags = 0;
240
241 error = segmap_release(segkmap, base, flags);
242 } else
243 (void) segmap_release(segkmap, base, 0);
244 } while (error == 0 && uiop->uio_resid > 0);
245
246 return (error);
247 }
248
249 /*ARGSUSED2*/
250 static int
hsfs_getattr(struct vnode * vp,struct vattr * vap,int flags,struct cred * cred,caller_context_t * ct)251 hsfs_getattr(struct vnode *vp, struct vattr *vap, int flags, struct cred *cred,
252 caller_context_t *ct)
253 {
254 struct hsnode *hp;
255 struct vfs *vfsp;
256 struct hsfs *fsp;
257
258 hp = VTOH(vp);
259 fsp = VFS_TO_HSFS(vp->v_vfsp);
260 vfsp = vp->v_vfsp;
261
262 if ((hp->hs_dirent.ext_size == 0) && (vp->v_type == VDIR)) {
263 hs_filldirent(vp, &hp->hs_dirent);
264 }
265 vap->va_type = IFTOVT(hp->hs_dirent.mode);
266 vap->va_mode = hp->hs_dirent.mode;
267 vap->va_uid = hp->hs_dirent.uid;
268 vap->va_gid = hp->hs_dirent.gid;
269
270 vap->va_fsid = vfsp->vfs_dev;
271 vap->va_nodeid = (ino64_t)hp->hs_nodeid;
272 vap->va_nlink = hp->hs_dirent.nlink;
273 vap->va_size = (offset_t)hp->hs_dirent.ext_size;
274
275 vap->va_atime.tv_sec = hp->hs_dirent.adate.tv_sec;
276 vap->va_atime.tv_nsec = hp->hs_dirent.adate.tv_usec*1000;
277 vap->va_mtime.tv_sec = hp->hs_dirent.mdate.tv_sec;
278 vap->va_mtime.tv_nsec = hp->hs_dirent.mdate.tv_usec*1000;
279 vap->va_ctime.tv_sec = hp->hs_dirent.cdate.tv_sec;
280 vap->va_ctime.tv_nsec = hp->hs_dirent.cdate.tv_usec*1000;
281 if (vp->v_type == VCHR || vp->v_type == VBLK)
282 vap->va_rdev = hp->hs_dirent.r_dev;
283 else
284 vap->va_rdev = 0;
285 vap->va_blksize = vfsp->vfs_bsize;
286 /* no. of blocks = no. of data blocks + no. of xar blocks */
287 vap->va_nblocks = (fsblkcnt64_t)howmany(vap->va_size + (u_longlong_t)
288 (hp->hs_dirent.xar_len << fsp->hsfs_vol.lbn_shift), DEV_BSIZE);
289 vap->va_seq = hp->hs_seq;
290 return (0);
291 }
292
293 /*ARGSUSED*/
294 static int
hsfs_readlink(struct vnode * vp,struct uio * uiop,struct cred * cred,caller_context_t * ct)295 hsfs_readlink(struct vnode *vp, struct uio *uiop, struct cred *cred,
296 caller_context_t *ct)
297 {
298 struct hsnode *hp;
299
300 if (vp->v_type != VLNK)
301 return (EINVAL);
302
303 hp = VTOH(vp);
304
305 if (hp->hs_dirent.sym_link == (char *)NULL)
306 return (ENOENT);
307
308 return (uiomove(hp->hs_dirent.sym_link,
309 (size_t)MIN(hp->hs_dirent.ext_size,
310 uiop->uio_resid), UIO_READ, uiop));
311 }
312
313 /*ARGSUSED*/
314 static void
hsfs_inactive(struct vnode * vp,struct cred * cred,caller_context_t * ct)315 hsfs_inactive(struct vnode *vp, struct cred *cred, caller_context_t *ct)
316 {
317 struct hsnode *hp;
318 struct hsfs *fsp;
319
320 int nopage;
321
322 hp = VTOH(vp);
323 fsp = VFS_TO_HSFS(vp->v_vfsp);
324 /*
325 * Note: acquiring and holding v_lock for quite a while
326 * here serializes on the vnode; this is unfortunate, but
327 * likely not to overly impact performance, as the underlying
328 * device (CDROM drive) is quite slow.
329 */
330 rw_enter(&fsp->hsfs_hash_lock, RW_WRITER);
331 mutex_enter(&hp->hs_contents_lock);
332 mutex_enter(&vp->v_lock);
333
334 if (vp->v_count < 1) {
335 panic("hsfs_inactive: v_count < 1");
336 /*NOTREACHED*/
337 }
338
339 VN_RELE_LOCKED(vp);
340 if (vp->v_count > 0 || (hp->hs_flags & HREF) == 0) {
341 mutex_exit(&vp->v_lock);
342 mutex_exit(&hp->hs_contents_lock);
343 rw_exit(&fsp->hsfs_hash_lock);
344 return;
345 }
346 if (vp->v_count == 0) {
347 /*
348 * Free the hsnode.
349 * If there are no pages associated with the
350 * hsnode, give it back to the kmem_cache,
351 * else put at the end of this file system's
352 * internal free list.
353 */
354 nopage = !vn_has_cached_data(vp);
355 hp->hs_flags = 0;
356 /*
357 * exit these locks now, since hs_freenode may
358 * kmem_free the hsnode and embedded vnode
359 */
360 mutex_exit(&vp->v_lock);
361 mutex_exit(&hp->hs_contents_lock);
362 hs_freenode(vp, fsp, nopage);
363 } else {
364 mutex_exit(&vp->v_lock);
365 mutex_exit(&hp->hs_contents_lock);
366 }
367 rw_exit(&fsp->hsfs_hash_lock);
368 }
369
370
371 /*ARGSUSED*/
372 static int
hsfs_lookup(struct vnode * dvp,char * nm,struct vnode ** vpp,struct pathname * pnp,int flags,struct vnode * rdir,struct cred * cred,caller_context_t * ct,int * direntflags,pathname_t * realpnp)373 hsfs_lookup(struct vnode *dvp, char *nm, struct vnode **vpp,
374 struct pathname *pnp, int flags, struct vnode *rdir, struct cred *cred,
375 caller_context_t *ct, int *direntflags, pathname_t *realpnp)
376 {
377 int error;
378 int namelen = (int)strlen(nm);
379
380 if (*nm == '\0') {
381 VN_HOLD(dvp);
382 *vpp = dvp;
383 return (0);
384 }
385
386 /*
387 * If we're looking for ourself, life is simple.
388 */
389 if (namelen == 1 && *nm == '.') {
390 if (error = hs_access(dvp, (mode_t)VEXEC, cred))
391 return (error);
392 VN_HOLD(dvp);
393 *vpp = dvp;
394 return (0);
395 }
396
397 return (hs_dirlook(dvp, nm, namelen, vpp, cred));
398 }
399
400
401 /*ARGSUSED*/
402 static int
hsfs_readdir(struct vnode * vp,struct uio * uiop,struct cred * cred,int * eofp,caller_context_t * ct,int flags)403 hsfs_readdir(struct vnode *vp, struct uio *uiop, struct cred *cred, int *eofp,
404 caller_context_t *ct, int flags)
405 {
406 struct hsnode *dhp;
407 struct hsfs *fsp;
408 struct hs_direntry hd;
409 struct dirent64 *nd;
410 int error;
411 uint_t offset; /* real offset in directory */
412 uint_t dirsiz; /* real size of directory */
413 uchar_t *blkp;
414 int hdlen; /* length of hs directory entry */
415 long ndlen; /* length of dirent entry */
416 int bytes_wanted;
417 size_t bufsize; /* size of dirent buffer */
418 char *outbuf; /* ptr to dirent buffer */
419 char *dname;
420 int dnamelen;
421 size_t dname_size;
422 struct fbuf *fbp;
423 uint_t last_offset; /* last index into current dir block */
424 ino64_t dirino; /* temporary storage before storing in dirent */
425 off_t diroff;
426
427 dhp = VTOH(vp);
428 fsp = VFS_TO_HSFS(vp->v_vfsp);
429 if (dhp->hs_dirent.ext_size == 0)
430 hs_filldirent(vp, &dhp->hs_dirent);
431 dirsiz = dhp->hs_dirent.ext_size;
432 if (uiop->uio_loffset >= dirsiz) { /* at or beyond EOF */
433 if (eofp)
434 *eofp = 1;
435 return (0);
436 }
437 ASSERT(uiop->uio_loffset <= HS_MAXFILEOFF);
438 offset = uiop->uio_loffset;
439
440 dname_size = fsp->hsfs_namemax + 1; /* 1 for the ending NUL */
441 dname = kmem_alloc(dname_size, KM_SLEEP);
442 bufsize = uiop->uio_resid + sizeof (struct dirent64);
443
444 outbuf = kmem_alloc(bufsize, KM_SLEEP);
445 nd = (struct dirent64 *)outbuf;
446
447 while (offset < dirsiz) {
448 bytes_wanted = MIN(MAXBSIZE, dirsiz - (offset & MAXBMASK));
449
450 error = fbread(vp, (offset_t)(offset & MAXBMASK),
451 (unsigned int)bytes_wanted, S_READ, &fbp);
452 if (error)
453 goto done;
454
455 blkp = (uchar_t *)fbp->fb_addr;
456 last_offset = (offset & MAXBMASK) + fbp->fb_count;
457
458 #define rel_offset(offset) ((offset) & MAXBOFFSET) /* index into blkp */
459
460 while (offset < last_offset) {
461 /*
462 * Very similar validation code is found in
463 * process_dirblock(), hsfs_node.c.
464 * For an explanation, see there.
465 * It may make sense for the future to
466 * "consolidate" the code in hs_parsedir(),
467 * process_dirblock() and hsfs_readdir() into
468 * a single utility function.
469 */
470 hdlen = (int)((uchar_t)
471 HDE_DIR_LEN(&blkp[rel_offset(offset)]));
472 if (hdlen < HDE_ROOT_DIR_REC_SIZE ||
473 offset + hdlen > last_offset) {
474 /*
475 * advance to next sector boundary
476 */
477 offset = roundup(offset + 1, HS_SECTOR_SIZE);
478 if (hdlen)
479 hs_log_bogus_disk_warning(fsp,
480 HSFS_ERR_TRAILING_JUNK, 0);
481
482 continue;
483 }
484
485 bzero(&hd, sizeof (hd));
486
487 /*
488 * Just ignore invalid directory entries.
489 * XXX - maybe hs_parsedir() will detect EXISTENCE bit
490 */
491 if (!hs_parsedir(fsp, &blkp[rel_offset(offset)],
492 &hd, dname, &dnamelen, last_offset - offset)) {
493 /*
494 * Determine if there is enough room
495 */
496 ndlen = (long)DIRENT64_RECLEN((dnamelen));
497
498 if ((ndlen + ((char *)nd - outbuf)) >
499 uiop->uio_resid) {
500 fbrelse(fbp, S_READ);
501 goto done; /* output buffer full */
502 }
503
504 diroff = offset + hdlen;
505 /*
506 * If the media carries rrip-v1.12 or newer,
507 * and we trust the inodes from the rrip data
508 * (use_rrip_inodes != 0), use that data. If the
509 * media has been created by a recent mkisofs
510 * version, we may trust all numbers in the
511 * starting extent number; otherwise, we cannot
512 * do this for zero sized files and symlinks,
513 * because if we did we'd end up mapping all of
514 * them to the same node. We use HS_DUMMY_INO
515 * in this case and make sure that we will not
516 * map all files to the same meta data.
517 */
518 if (hd.inode != 0 && use_rrip_inodes) {
519 dirino = hd.inode;
520 } else if ((hd.ext_size == 0 ||
521 hd.sym_link != (char *)NULL) &&
522 (fsp->hsfs_flags & HSFSMNT_INODE) == 0) {
523 dirino = HS_DUMMY_INO;
524 } else {
525 dirino = hd.ext_lbn;
526 }
527
528 /* strncpy(9f) will zero uninitialized bytes */
529
530 ASSERT(strlen(dname) + 1 <=
531 DIRENT64_NAMELEN(ndlen));
532 (void) strncpy(nd->d_name, dname,
533 DIRENT64_NAMELEN(ndlen));
534 nd->d_reclen = (ushort_t)ndlen;
535 nd->d_off = (offset_t)diroff;
536 nd->d_ino = dirino;
537 nd = (struct dirent64 *)((char *)nd + ndlen);
538
539 /*
540 * free up space allocated for symlink
541 */
542 if (hd.sym_link != (char *)NULL) {
543 kmem_free(hd.sym_link,
544 (size_t)(hd.ext_size+1));
545 hd.sym_link = (char *)NULL;
546 }
547 }
548 offset += hdlen;
549 }
550 fbrelse(fbp, S_READ);
551 }
552
553 /*
554 * Got here for one of the following reasons:
555 * 1) outbuf is full (error == 0)
556 * 2) end of directory reached (error == 0)
557 * 3) error reading directory sector (error != 0)
558 * 4) directory entry crosses sector boundary (error == 0)
559 *
560 * If any directory entries have been copied, don't report
561 * case 4. Instead, return the valid directory entries.
562 *
563 * If no entries have been copied, report the error.
564 * If case 4, this will be indistiguishable from EOF.
565 */
566 done:
567 ndlen = ((char *)nd - outbuf);
568 if (ndlen != 0) {
569 error = uiomove(outbuf, (size_t)ndlen, UIO_READ, uiop);
570 uiop->uio_loffset = offset;
571 }
572 kmem_free(dname, dname_size);
573 kmem_free(outbuf, bufsize);
574 if (eofp && error == 0)
575 *eofp = (uiop->uio_loffset >= dirsiz);
576 return (error);
577 }
578
579 /*ARGSUSED2*/
580 static int
hsfs_fid(struct vnode * vp,struct fid * fidp,caller_context_t * ct)581 hsfs_fid(struct vnode *vp, struct fid *fidp, caller_context_t *ct)
582 {
583 struct hsnode *hp;
584 struct hsfid *fid;
585
586 if (fidp->fid_len < (sizeof (*fid) - sizeof (fid->hf_len))) {
587 fidp->fid_len = sizeof (*fid) - sizeof (fid->hf_len);
588 return (ENOSPC);
589 }
590
591 fid = (struct hsfid *)fidp;
592 fid->hf_len = sizeof (*fid) - sizeof (fid->hf_len);
593 hp = VTOH(vp);
594 mutex_enter(&hp->hs_contents_lock);
595 fid->hf_dir_lbn = hp->hs_dir_lbn;
596 fid->hf_dir_off = (ushort_t)hp->hs_dir_off;
597 fid->hf_ino = hp->hs_nodeid;
598 mutex_exit(&hp->hs_contents_lock);
599 return (0);
600 }
601
602 /*ARGSUSED*/
603 static int
hsfs_open(struct vnode ** vpp,int flag,struct cred * cred,caller_context_t * ct)604 hsfs_open(struct vnode **vpp, int flag, struct cred *cred, caller_context_t *ct)
605 {
606 return (0);
607 }
608
609 /*ARGSUSED*/
610 static int
hsfs_close(struct vnode * vp,int flag,int count,offset_t offset,struct cred * cred,caller_context_t * ct)611 hsfs_close(struct vnode *vp, int flag, int count, offset_t offset,
612 struct cred *cred, caller_context_t *ct)
613 {
614 (void) cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
615 cleanshares(vp, ttoproc(curthread)->p_pid);
616 return (0);
617 }
618
619 /*ARGSUSED2*/
620 static int
hsfs_access(struct vnode * vp,int mode,int flags,cred_t * cred,caller_context_t * ct)621 hsfs_access(struct vnode *vp, int mode, int flags, cred_t *cred,
622 caller_context_t *ct)
623 {
624 return (hs_access(vp, (mode_t)mode, cred));
625 }
626
627 /*
628 * the seek time of a CD-ROM is very slow, and data transfer
629 * rate is even worse (max. 150K per sec). The design
630 * decision is to reduce access to cd-rom as much as possible,
631 * and to transfer a sizable block (read-ahead) of data at a time.
632 * UFS style of read ahead one block at a time is not appropriate,
633 * and is not supported
634 */
635
636 /*
637 * KLUSTSIZE should be a multiple of PAGESIZE and <= MAXPHYS.
638 */
639 #define KLUSTSIZE (56 * 1024)
640 /* we don't support read ahead */
641 int hsfs_lostpage; /* no. of times we lost original page */
642
643 /*
644 * Used to prevent biodone() from releasing buf resources that
645 * we didn't allocate in quite the usual way.
646 */
647 /*ARGSUSED*/
648 int
hsfs_iodone(struct buf * bp)649 hsfs_iodone(struct buf *bp)
650 {
651 sema_v(&bp->b_io);
652 return (0);
653 }
654
655 /*
656 * The taskq thread that invokes the scheduling function to ensure
657 * that all readaheads are complete and cleans up the associated
658 * memory and releases the page lock.
659 */
660 void
hsfs_ra_task(void * arg)661 hsfs_ra_task(void *arg)
662 {
663 struct hio_info *info = arg;
664 uint_t count;
665 struct buf *wbuf;
666
667 ASSERT(info->pp != NULL);
668
669 for (count = 0; count < info->bufsused; count++) {
670 wbuf = &(info->bufs[count]);
671
672 DTRACE_PROBE1(hsfs_io_wait_ra, struct buf *, wbuf);
673 while (sema_tryp(&(info->sema[count])) == 0) {
674 if (hsched_invoke_strategy(info->fsp)) {
675 sema_p(&(info->sema[count]));
676 break;
677 }
678 }
679 sema_destroy(&(info->sema[count]));
680 DTRACE_PROBE1(hsfs_io_done_ra, struct buf *, wbuf);
681 biofini(&(info->bufs[count]));
682 }
683 for (count = 0; count < info->bufsused; count++) {
684 if (info->vas[count] != NULL) {
685 ppmapout(info->vas[count]);
686 }
687 }
688 kmem_free(info->vas, info->bufcnt * sizeof (caddr_t));
689 kmem_free(info->bufs, info->bufcnt * sizeof (struct buf));
690 kmem_free(info->sema, info->bufcnt * sizeof (ksema_t));
691
692 pvn_read_done(info->pp, 0);
693 kmem_cache_free(hio_info_cache, info);
694 }
695
696 /*
697 * Submit asynchronous readahead requests to the I/O scheduler
698 * depending on the number of pages to read ahead. These requests
699 * are asynchronous to the calling thread but I/O requests issued
700 * subsequently by other threads with higher LBNs must wait for
701 * these readaheads to complete since we have a single ordered
702 * I/O pipeline. Thus these readaheads are semi-asynchronous.
703 * A TaskQ handles waiting for the readaheads to complete.
704 *
705 * This function is mostly a copy of hsfs_getapage but somewhat
706 * simpler. A readahead request is aborted if page allocation
707 * fails.
708 */
709 /*ARGSUSED*/
710 static int
hsfs_getpage_ra(struct vnode * vp,u_offset_t off,struct seg * seg,caddr_t addr,struct hsnode * hp,struct hsfs * fsp,int xarsiz,offset_t bof,int chunk_lbn_count,int chunk_data_bytes)711 hsfs_getpage_ra(struct vnode *vp, u_offset_t off, struct seg *seg,
712 caddr_t addr, struct hsnode *hp, struct hsfs *fsp, int xarsiz,
713 offset_t bof, int chunk_lbn_count, int chunk_data_bytes)
714 {
715 struct buf *bufs;
716 caddr_t *vas;
717 caddr_t va;
718 struct page *pp, *searchp, *lastp;
719 struct vnode *devvp;
720 ulong_t byte_offset;
721 size_t io_len_tmp;
722 uint_t io_off, io_len;
723 uint_t xlen;
724 uint_t filsiz;
725 uint_t secsize;
726 uint_t bufcnt;
727 uint_t bufsused;
728 uint_t count;
729 uint_t io_end;
730 uint_t which_chunk_lbn;
731 uint_t offset_lbn;
732 uint_t offset_extra;
733 offset_t offset_bytes;
734 uint_t remaining_bytes;
735 uint_t extension;
736 int remainder; /* must be signed */
737 diskaddr_t driver_block;
738 u_offset_t io_off_tmp;
739 ksema_t *fio_done;
740 struct hio_info *info;
741 size_t len;
742
743 ASSERT(fsp->hqueue != NULL);
744
745 if (addr >= seg->s_base + seg->s_size) {
746 return (-1);
747 }
748
749 devvp = fsp->hsfs_devvp;
750 secsize = fsp->hsfs_vol.lbn_size; /* bytes per logical block */
751
752 /* file data size */
753 filsiz = hp->hs_dirent.ext_size;
754
755 if (off >= filsiz)
756 return (0);
757
758 extension = 0;
759 pp = NULL;
760
761 extension += hp->hs_ra_bytes;
762
763 /*
764 * Some CD writers (e.g. Kodak Photo CD writers)
765 * create CDs in TAO mode and reserve tracks that
766 * are not completely written. Some sectors remain
767 * unreadable for this reason and give I/O errors.
768 * Also, there's no point in reading sectors
769 * we'll never look at. So, if we're asked to go
770 * beyond the end of a file, truncate to the length
771 * of that file.
772 *
773 * Additionally, this behaviour is required by section
774 * 6.4.5 of ISO 9660:1988(E).
775 */
776 len = MIN(extension ? extension : PAGESIZE, filsiz - off);
777
778 /* A little paranoia */
779 if (len <= 0)
780 return (-1);
781
782 /*
783 * After all that, make sure we're asking for things in units
784 * that bdev_strategy() will understand (see bug 4202551).
785 */
786 len = roundup(len, DEV_BSIZE);
787
788 pp = pvn_read_kluster(vp, off, seg, addr, &io_off_tmp,
789 &io_len_tmp, off, len, 1);
790
791 if (pp == NULL) {
792 hp->hs_num_contig = 0;
793 hp->hs_ra_bytes = 0;
794 hp->hs_prev_offset = 0;
795 return (-1);
796 }
797
798 io_off = (uint_t)io_off_tmp;
799 io_len = (uint_t)io_len_tmp;
800
801 /* check for truncation */
802 /*
803 * xxx Clean up and return EIO instead?
804 * xxx Ought to go to u_offset_t for everything, but we
805 * xxx call lots of things that want uint_t arguments.
806 */
807 ASSERT(io_off == io_off_tmp);
808
809 /*
810 * get enough buffers for worst-case scenario
811 * (i.e., no coalescing possible).
812 */
813 bufcnt = (len + secsize - 1) / secsize;
814 bufs = kmem_alloc(bufcnt * sizeof (struct buf), KM_SLEEP);
815 vas = kmem_alloc(bufcnt * sizeof (caddr_t), KM_SLEEP);
816
817 /*
818 * Allocate a array of semaphores since we are doing I/O
819 * scheduling.
820 */
821 fio_done = kmem_alloc(bufcnt * sizeof (ksema_t), KM_SLEEP);
822
823 /*
824 * If our filesize is not an integer multiple of PAGESIZE,
825 * we zero that part of the last page that's between EOF and
826 * the PAGESIZE boundary.
827 */
828 xlen = io_len & PAGEOFFSET;
829 if (xlen != 0)
830 pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
831
832 DTRACE_PROBE2(hsfs_readahead, struct vnode *, vp, uint_t, io_len);
833
834 va = NULL;
835 lastp = NULL;
836 searchp = pp;
837 io_end = io_off + io_len;
838 for (count = 0, byte_offset = io_off;
839 byte_offset < io_end;
840 count++) {
841 ASSERT(count < bufcnt);
842
843 bioinit(&bufs[count]);
844 bufs[count].b_edev = devvp->v_rdev;
845 bufs[count].b_dev = cmpdev(devvp->v_rdev);
846 bufs[count].b_flags = B_NOCACHE|B_BUSY|B_READ;
847 bufs[count].b_iodone = hsfs_iodone;
848 bufs[count].b_vp = vp;
849 bufs[count].b_file = vp;
850
851 /* Compute disk address for interleaving. */
852
853 /* considered without skips */
854 which_chunk_lbn = byte_offset / chunk_data_bytes;
855
856 /* factor in skips */
857 offset_lbn = which_chunk_lbn * chunk_lbn_count;
858
859 /* convert to physical byte offset for lbn */
860 offset_bytes = LBN_TO_BYTE(offset_lbn, vp->v_vfsp);
861
862 /* don't forget offset into lbn */
863 offset_extra = byte_offset % chunk_data_bytes;
864
865 /* get virtual block number for driver */
866 driver_block = lbtodb(bof + xarsiz
867 + offset_bytes + offset_extra);
868
869 if (lastp != searchp) {
870 /* this branch taken first time through loop */
871 va = vas[count] = ppmapin(searchp, PROT_WRITE,
872 (caddr_t)-1);
873 /* ppmapin() guarantees not to return NULL */
874 } else {
875 vas[count] = NULL;
876 }
877
878 bufs[count].b_un.b_addr = va + byte_offset % PAGESIZE;
879 bufs[count].b_offset =
880 (offset_t)(byte_offset - io_off + off);
881
882 /*
883 * We specifically use the b_lblkno member here
884 * as even in the 32 bit world driver_block can
885 * get very large in line with the ISO9660 spec.
886 */
887
888 bufs[count].b_lblkno = driver_block;
889
890 remaining_bytes = ((which_chunk_lbn + 1) * chunk_data_bytes)
891 - byte_offset;
892
893 /*
894 * remaining_bytes can't be zero, as we derived
895 * which_chunk_lbn directly from byte_offset.
896 */
897 if ((remaining_bytes + byte_offset) < (off + len)) {
898 /* coalesce-read the rest of the chunk */
899 bufs[count].b_bcount = remaining_bytes;
900 } else {
901 /* get the final bits */
902 bufs[count].b_bcount = off + len - byte_offset;
903 }
904
905 remainder = PAGESIZE - (byte_offset % PAGESIZE);
906 if (bufs[count].b_bcount > remainder) {
907 bufs[count].b_bcount = remainder;
908 }
909
910 bufs[count].b_bufsize = bufs[count].b_bcount;
911 if (((offset_t)byte_offset + bufs[count].b_bcount) >
912 HS_MAXFILEOFF) {
913 break;
914 }
915 byte_offset += bufs[count].b_bcount;
916
917 /*
918 * We are scheduling I/O so we need to enqueue
919 * requests rather than calling bdev_strategy
920 * here. A later invocation of the scheduling
921 * function will take care of doing the actual
922 * I/O as it selects requests from the queue as
923 * per the scheduling logic.
924 */
925 struct hio *hsio = kmem_cache_alloc(hio_cache,
926 KM_SLEEP);
927
928 sema_init(&fio_done[count], 0, NULL,
929 SEMA_DEFAULT, NULL);
930 hsio->bp = &bufs[count];
931 hsio->sema = &fio_done[count];
932 hsio->io_lblkno = bufs[count].b_lblkno;
933 hsio->nblocks = howmany(hsio->bp->b_bcount,
934 DEV_BSIZE);
935
936 /* used for deadline */
937 hsio->io_timestamp = drv_hztousec(ddi_get_lbolt());
938
939 /* for I/O coalescing */
940 hsio->contig_chain = NULL;
941 hsched_enqueue_io(fsp, hsio, 1);
942
943 lwp_stat_update(LWP_STAT_INBLK, 1);
944 lastp = searchp;
945 if ((remainder - bufs[count].b_bcount) < 1) {
946 searchp = searchp->p_next;
947 }
948 }
949
950 bufsused = count;
951 info = kmem_cache_alloc(hio_info_cache, KM_SLEEP);
952 info->bufs = bufs;
953 info->vas = vas;
954 info->sema = fio_done;
955 info->bufsused = bufsused;
956 info->bufcnt = bufcnt;
957 info->fsp = fsp;
958 info->pp = pp;
959
960 (void) taskq_dispatch(fsp->hqueue->ra_task,
961 hsfs_ra_task, info, KM_SLEEP);
962 /*
963 * The I/O locked pages are unlocked in our taskq thread.
964 */
965 return (0);
966 }
967
968 /*
969 * Each file may have a different interleaving on disk. This makes
970 * things somewhat interesting. The gist is that there are some
971 * number of contiguous data sectors, followed by some other number
972 * of contiguous skip sectors. The sum of those two sets of sectors
973 * defines the interleave size. Unfortunately, it means that we generally
974 * can't simply read N sectors starting at a given offset to satisfy
975 * any given request.
976 *
977 * What we do is get the relevant memory pages via pvn_read_kluster(),
978 * then stride through the interleaves, setting up a buf for each
979 * sector that needs to be brought in. Instead of kmem_alloc'ing
980 * space for the sectors, though, we just point at the appropriate
981 * spot in the relevant page for each of them. This saves us a bunch
982 * of copying.
983 *
984 * NOTICE: The code below in hsfs_getapage is mostly same as the code
985 * in hsfs_getpage_ra above (with some omissions). If you are
986 * making any change to this function, please also look at
987 * hsfs_getpage_ra.
988 */
989 /*ARGSUSED*/
990 static int
hsfs_getapage(struct vnode * vp,u_offset_t off,size_t len,uint_t * protp,struct page * pl[],size_t plsz,struct seg * seg,caddr_t addr,enum seg_rw rw,struct cred * cred)991 hsfs_getapage(struct vnode *vp, u_offset_t off, size_t len, uint_t *protp,
992 struct page *pl[], size_t plsz, struct seg *seg, caddr_t addr,
993 enum seg_rw rw, struct cred *cred)
994 {
995 struct hsnode *hp;
996 struct hsfs *fsp;
997 int err;
998 struct buf *bufs;
999 caddr_t *vas;
1000 caddr_t va;
1001 struct page *pp, *searchp, *lastp;
1002 page_t *pagefound;
1003 offset_t bof;
1004 struct vnode *devvp;
1005 ulong_t byte_offset;
1006 size_t io_len_tmp;
1007 uint_t io_off, io_len;
1008 uint_t xlen;
1009 uint_t filsiz;
1010 uint_t secsize;
1011 uint_t bufcnt;
1012 uint_t bufsused;
1013 uint_t count;
1014 uint_t io_end;
1015 uint_t which_chunk_lbn;
1016 uint_t offset_lbn;
1017 uint_t offset_extra;
1018 offset_t offset_bytes;
1019 uint_t remaining_bytes;
1020 uint_t extension;
1021 int remainder; /* must be signed */
1022 int chunk_lbn_count;
1023 int chunk_data_bytes;
1024 int xarsiz;
1025 diskaddr_t driver_block;
1026 u_offset_t io_off_tmp;
1027 ksema_t *fio_done;
1028 int calcdone;
1029
1030 /*
1031 * We don't support asynchronous operation at the moment, so
1032 * just pretend we did it. If the pages are ever actually
1033 * needed, they'll get brought in then.
1034 */
1035 if (pl == NULL)
1036 return (0);
1037
1038 hp = VTOH(vp);
1039 fsp = VFS_TO_HSFS(vp->v_vfsp);
1040 devvp = fsp->hsfs_devvp;
1041 secsize = fsp->hsfs_vol.lbn_size; /* bytes per logical block */
1042
1043 /* file data size */
1044 filsiz = hp->hs_dirent.ext_size;
1045
1046 /* disk addr for start of file */
1047 bof = LBN_TO_BYTE((offset_t)hp->hs_dirent.ext_lbn, vp->v_vfsp);
1048
1049 /* xarsiz byte must be skipped for data */
1050 xarsiz = hp->hs_dirent.xar_len << fsp->hsfs_vol.lbn_shift;
1051
1052 /* how many logical blocks in an interleave (data+skip) */
1053 chunk_lbn_count = hp->hs_dirent.intlf_sz + hp->hs_dirent.intlf_sk;
1054
1055 if (chunk_lbn_count == 0) {
1056 chunk_lbn_count = 1;
1057 }
1058
1059 /*
1060 * Convert interleaving size into bytes. The zero case
1061 * (no interleaving) optimization is handled as a side-
1062 * effect of the read-ahead logic.
1063 */
1064 if (hp->hs_dirent.intlf_sz == 0) {
1065 chunk_data_bytes = LBN_TO_BYTE(1, vp->v_vfsp);
1066 /*
1067 * Optimization: If our pagesize is a multiple of LBN
1068 * bytes, we can avoid breaking up a page into individual
1069 * lbn-sized requests.
1070 */
1071 if (PAGESIZE % chunk_data_bytes == 0) {
1072 chunk_lbn_count = BYTE_TO_LBN(PAGESIZE, vp->v_vfsp);
1073 chunk_data_bytes = PAGESIZE;
1074 }
1075 } else {
1076 chunk_data_bytes =
1077 LBN_TO_BYTE(hp->hs_dirent.intlf_sz, vp->v_vfsp);
1078 }
1079
1080 reread:
1081 err = 0;
1082 pagefound = 0;
1083 calcdone = 0;
1084
1085 /*
1086 * Do some read-ahead. This mostly saves us a bit of
1087 * system cpu time more than anything else when doing
1088 * sequential reads. At some point, could do the
1089 * read-ahead asynchronously which might gain us something
1090 * on wall time, but it seems unlikely....
1091 *
1092 * We do the easy case here, which is to read through
1093 * the end of the chunk, minus whatever's at the end that
1094 * won't exactly fill a page.
1095 */
1096 if (hp->hs_ra_bytes > 0 && chunk_data_bytes != PAGESIZE) {
1097 which_chunk_lbn = (off + len) / chunk_data_bytes;
1098 extension = ((which_chunk_lbn + 1) * chunk_data_bytes) - off;
1099 extension -= (extension % PAGESIZE);
1100 } else {
1101 extension = roundup(len, PAGESIZE);
1102 }
1103
1104 atomic_inc_64(&fsp->total_pages_requested);
1105
1106 pp = NULL;
1107 again:
1108 /* search for page in buffer */
1109 if ((pagefound = page_exists(vp, off)) == 0) {
1110 /*
1111 * Need to really do disk IO to get the page.
1112 */
1113 if (!calcdone) {
1114 extension += hp->hs_ra_bytes;
1115
1116 /*
1117 * Some cd writers don't write sectors that aren't
1118 * used. Also, there's no point in reading sectors
1119 * we'll never look at. So, if we're asked to go
1120 * beyond the end of a file, truncate to the length
1121 * of that file.
1122 *
1123 * Additionally, this behaviour is required by section
1124 * 6.4.5 of ISO 9660:1988(E).
1125 */
1126 len = MIN(extension ? extension : PAGESIZE,
1127 filsiz - off);
1128
1129 /* A little paranoia. */
1130 ASSERT(len > 0);
1131
1132 /*
1133 * After all that, make sure we're asking for things
1134 * in units that bdev_strategy() will understand
1135 * (see bug 4202551).
1136 */
1137 len = roundup(len, DEV_BSIZE);
1138 calcdone = 1;
1139 }
1140
1141 pp = pvn_read_kluster(vp, off, seg, addr, &io_off_tmp,
1142 &io_len_tmp, off, len, 0);
1143
1144 if (pp == NULL) {
1145 /*
1146 * Pressure on memory, roll back readahead
1147 */
1148 hp->hs_num_contig = 0;
1149 hp->hs_ra_bytes = 0;
1150 hp->hs_prev_offset = 0;
1151 goto again;
1152 }
1153
1154 io_off = (uint_t)io_off_tmp;
1155 io_len = (uint_t)io_len_tmp;
1156
1157 /* check for truncation */
1158 /*
1159 * xxx Clean up and return EIO instead?
1160 * xxx Ought to go to u_offset_t for everything, but we
1161 * xxx call lots of things that want uint_t arguments.
1162 */
1163 ASSERT(io_off == io_off_tmp);
1164
1165 /*
1166 * get enough buffers for worst-case scenario
1167 * (i.e., no coalescing possible).
1168 */
1169 bufcnt = (len + secsize - 1) / secsize;
1170 bufs = kmem_zalloc(bufcnt * sizeof (struct buf), KM_SLEEP);
1171 vas = kmem_alloc(bufcnt * sizeof (caddr_t), KM_SLEEP);
1172
1173 /*
1174 * Allocate a array of semaphores if we are doing I/O
1175 * scheduling.
1176 */
1177 if (fsp->hqueue != NULL)
1178 fio_done = kmem_alloc(bufcnt * sizeof (ksema_t),
1179 KM_SLEEP);
1180 for (count = 0; count < bufcnt; count++) {
1181 bioinit(&bufs[count]);
1182 bufs[count].b_edev = devvp->v_rdev;
1183 bufs[count].b_dev = cmpdev(devvp->v_rdev);
1184 bufs[count].b_flags = B_NOCACHE|B_BUSY|B_READ;
1185 bufs[count].b_iodone = hsfs_iodone;
1186 bufs[count].b_vp = vp;
1187 bufs[count].b_file = vp;
1188 }
1189
1190 /*
1191 * If our filesize is not an integer multiple of PAGESIZE,
1192 * we zero that part of the last page that's between EOF and
1193 * the PAGESIZE boundary.
1194 */
1195 xlen = io_len & PAGEOFFSET;
1196 if (xlen != 0)
1197 pagezero(pp->p_prev, xlen, PAGESIZE - xlen);
1198
1199 va = NULL;
1200 lastp = NULL;
1201 searchp = pp;
1202 io_end = io_off + io_len;
1203 for (count = 0, byte_offset = io_off;
1204 byte_offset < io_end; count++) {
1205 ASSERT(count < bufcnt);
1206
1207 /* Compute disk address for interleaving. */
1208
1209 /* considered without skips */
1210 which_chunk_lbn = byte_offset / chunk_data_bytes;
1211
1212 /* factor in skips */
1213 offset_lbn = which_chunk_lbn * chunk_lbn_count;
1214
1215 /* convert to physical byte offset for lbn */
1216 offset_bytes = LBN_TO_BYTE(offset_lbn, vp->v_vfsp);
1217
1218 /* don't forget offset into lbn */
1219 offset_extra = byte_offset % chunk_data_bytes;
1220
1221 /* get virtual block number for driver */
1222 driver_block =
1223 lbtodb(bof + xarsiz + offset_bytes + offset_extra);
1224
1225 if (lastp != searchp) {
1226 /* this branch taken first time through loop */
1227 va = vas[count] =
1228 ppmapin(searchp, PROT_WRITE, (caddr_t)-1);
1229 /* ppmapin() guarantees not to return NULL */
1230 } else {
1231 vas[count] = NULL;
1232 }
1233
1234 bufs[count].b_un.b_addr = va + byte_offset % PAGESIZE;
1235 bufs[count].b_offset =
1236 (offset_t)(byte_offset - io_off + off);
1237
1238 /*
1239 * We specifically use the b_lblkno member here
1240 * as even in the 32 bit world driver_block can
1241 * get very large in line with the ISO9660 spec.
1242 */
1243
1244 bufs[count].b_lblkno = driver_block;
1245
1246 remaining_bytes =
1247 ((which_chunk_lbn + 1) * chunk_data_bytes)
1248 - byte_offset;
1249
1250 /*
1251 * remaining_bytes can't be zero, as we derived
1252 * which_chunk_lbn directly from byte_offset.
1253 */
1254 if ((remaining_bytes + byte_offset) < (off + len)) {
1255 /* coalesce-read the rest of the chunk */
1256 bufs[count].b_bcount = remaining_bytes;
1257 } else {
1258 /* get the final bits */
1259 bufs[count].b_bcount = off + len - byte_offset;
1260 }
1261
1262 /*
1263 * It would be nice to do multiple pages'
1264 * worth at once here when the opportunity
1265 * arises, as that has been shown to improve
1266 * our wall time. However, to do that
1267 * requires that we use the pageio subsystem,
1268 * which doesn't mix well with what we're
1269 * already using here. We can't use pageio
1270 * all the time, because that subsystem
1271 * assumes that a page is stored in N
1272 * contiguous blocks on the device.
1273 * Interleaving violates that assumption.
1274 *
1275 * Update: This is now not so big a problem
1276 * because of the I/O scheduler sitting below
1277 * that can re-order and coalesce I/O requests.
1278 */
1279
1280 remainder = PAGESIZE - (byte_offset % PAGESIZE);
1281 if (bufs[count].b_bcount > remainder) {
1282 bufs[count].b_bcount = remainder;
1283 }
1284
1285 bufs[count].b_bufsize = bufs[count].b_bcount;
1286 if (((offset_t)byte_offset + bufs[count].b_bcount) >
1287 HS_MAXFILEOFF) {
1288 break;
1289 }
1290 byte_offset += bufs[count].b_bcount;
1291
1292 if (fsp->hqueue == NULL) {
1293 (void) bdev_strategy(&bufs[count]);
1294
1295 } else {
1296 /*
1297 * We are scheduling I/O so we need to enqueue
1298 * requests rather than calling bdev_strategy
1299 * here. A later invocation of the scheduling
1300 * function will take care of doing the actual
1301 * I/O as it selects requests from the queue as
1302 * per the scheduling logic.
1303 */
1304 struct hio *hsio = kmem_cache_alloc(hio_cache,
1305 KM_SLEEP);
1306
1307 sema_init(&fio_done[count], 0, NULL,
1308 SEMA_DEFAULT, NULL);
1309 hsio->bp = &bufs[count];
1310 hsio->sema = &fio_done[count];
1311 hsio->io_lblkno = bufs[count].b_lblkno;
1312 hsio->nblocks = howmany(hsio->bp->b_bcount,
1313 DEV_BSIZE);
1314
1315 /* used for deadline */
1316 hsio->io_timestamp =
1317 drv_hztousec(ddi_get_lbolt());
1318
1319 /* for I/O coalescing */
1320 hsio->contig_chain = NULL;
1321 hsched_enqueue_io(fsp, hsio, 0);
1322 }
1323
1324 lwp_stat_update(LWP_STAT_INBLK, 1);
1325 lastp = searchp;
1326 if ((remainder - bufs[count].b_bcount) < 1) {
1327 searchp = searchp->p_next;
1328 }
1329 }
1330
1331 bufsused = count;
1332 /* Now wait for everything to come in */
1333 if (fsp->hqueue == NULL) {
1334 for (count = 0; count < bufsused; count++) {
1335 if (err == 0) {
1336 err = biowait(&bufs[count]);
1337 } else
1338 (void) biowait(&bufs[count]);
1339 }
1340 } else {
1341 for (count = 0; count < bufsused; count++) {
1342 struct buf *wbuf;
1343
1344 /*
1345 * Invoke scheduling function till our buf
1346 * is processed. In doing this it might
1347 * process bufs enqueued by other threads
1348 * which is good.
1349 */
1350 wbuf = &bufs[count];
1351 DTRACE_PROBE1(hsfs_io_wait, struct buf *, wbuf);
1352 while (sema_tryp(&fio_done[count]) == 0) {
1353 /*
1354 * hsched_invoke_strategy will return 1
1355 * if the I/O queue is empty. This means
1356 * that there is another thread who has
1357 * issued our buf and is waiting. So we
1358 * just block instead of spinning.
1359 */
1360 if (hsched_invoke_strategy(fsp)) {
1361 sema_p(&fio_done[count]);
1362 break;
1363 }
1364 }
1365 sema_destroy(&fio_done[count]);
1366 DTRACE_PROBE1(hsfs_io_done, struct buf *, wbuf);
1367
1368 if (err == 0) {
1369 err = geterror(wbuf);
1370 }
1371 }
1372 kmem_free(fio_done, bufcnt * sizeof (ksema_t));
1373 }
1374
1375 /* Don't leak resources */
1376 for (count = 0; count < bufcnt; count++) {
1377 biofini(&bufs[count]);
1378 if (count < bufsused && vas[count] != NULL) {
1379 ppmapout(vas[count]);
1380 }
1381 }
1382
1383 kmem_free(vas, bufcnt * sizeof (caddr_t));
1384 kmem_free(bufs, bufcnt * sizeof (struct buf));
1385 }
1386
1387 if (err) {
1388 pvn_read_done(pp, B_ERROR);
1389 return (err);
1390 }
1391
1392 /*
1393 * Lock the requested page, and the one after it if possible.
1394 * Don't bother if our caller hasn't given us a place to stash
1395 * the page pointers, since otherwise we'd lock pages that would
1396 * never get unlocked.
1397 */
1398 if (pagefound) {
1399 int index;
1400 ulong_t soff;
1401
1402 /*
1403 * Make sure it's in memory before we say it's here.
1404 */
1405 if ((pp = page_lookup(vp, off, SE_SHARED)) == NULL) {
1406 hsfs_lostpage++;
1407 goto reread;
1408 }
1409
1410 pl[0] = pp;
1411 index = 1;
1412 atomic_inc_64(&fsp->cache_read_pages);
1413
1414 /*
1415 * Try to lock the next page, if it exists, without
1416 * blocking.
1417 */
1418 plsz -= PAGESIZE;
1419 /* LINTED (plsz is unsigned) */
1420 for (soff = off + PAGESIZE; plsz > 0;
1421 soff += PAGESIZE, plsz -= PAGESIZE) {
1422 pp = page_lookup_nowait(vp, (u_offset_t)soff,
1423 SE_SHARED);
1424 if (pp == NULL)
1425 break;
1426 pl[index++] = pp;
1427 }
1428 pl[index] = NULL;
1429
1430 /*
1431 * Schedule a semi-asynchronous readahead if we are
1432 * accessing the last cached page for the current
1433 * file.
1434 *
1435 * Doing this here means that readaheads will be
1436 * issued only if cache-hits occur. This is an advantage
1437 * since cache-hits would mean that readahead is giving
1438 * the desired benefit. If cache-hits do not occur there
1439 * is no point in reading ahead of time - the system
1440 * is loaded anyway.
1441 */
1442 if (fsp->hqueue != NULL &&
1443 hp->hs_prev_offset - off == PAGESIZE &&
1444 hp->hs_prev_offset < filsiz &&
1445 hp->hs_ra_bytes > 0 &&
1446 !page_exists(vp, hp->hs_prev_offset)) {
1447 (void) hsfs_getpage_ra(vp, hp->hs_prev_offset, seg,
1448 addr + PAGESIZE, hp, fsp, xarsiz, bof,
1449 chunk_lbn_count, chunk_data_bytes);
1450 }
1451
1452 return (0);
1453 }
1454
1455 if (pp != NULL) {
1456 pvn_plist_init(pp, pl, plsz, off, io_len, rw);
1457 }
1458
1459 return (err);
1460 }
1461
1462 /*ARGSUSED*/
1463 static int
hsfs_getpage(struct vnode * vp,offset_t off,size_t len,uint_t * protp,struct page * pl[],size_t plsz,struct seg * seg,caddr_t addr,enum seg_rw rw,struct cred * cred,caller_context_t * ct)1464 hsfs_getpage(struct vnode *vp, offset_t off, size_t len, uint_t *protp,
1465 struct page *pl[], size_t plsz, struct seg *seg, caddr_t addr,
1466 enum seg_rw rw, struct cred *cred, caller_context_t *ct)
1467 {
1468 uint_t filsiz;
1469 struct hsfs *fsp;
1470 struct hsnode *hp;
1471
1472 fsp = VFS_TO_HSFS(vp->v_vfsp);
1473 hp = VTOH(vp);
1474
1475 /* does not support write */
1476 if (rw == S_WRITE) {
1477 return (EROFS);
1478 }
1479
1480 if (vp->v_flag & VNOMAP) {
1481 return (ENOSYS);
1482 }
1483
1484 ASSERT(off <= HS_MAXFILEOFF);
1485
1486 /*
1487 * Determine file data size for EOF check.
1488 */
1489 filsiz = hp->hs_dirent.ext_size;
1490 if ((off + len) > (offset_t)(filsiz + PAGEOFFSET) && seg != segkmap)
1491 return (EFAULT); /* beyond EOF */
1492
1493 /*
1494 * Async Read-ahead computation.
1495 * This attempts to detect sequential access pattern and
1496 * enables reading extra pages ahead of time.
1497 */
1498 if (fsp->hqueue != NULL) {
1499 /*
1500 * This check for sequential access also takes into
1501 * account segmap weirdness when reading in chunks
1502 * less than the segmap size of 8K.
1503 */
1504 if (hp->hs_prev_offset == off || (off <
1505 hp->hs_prev_offset && off + MAX(len, PAGESIZE)
1506 >= hp->hs_prev_offset)) {
1507 if (hp->hs_num_contig <
1508 (seq_contig_requests - 1)) {
1509 hp->hs_num_contig++;
1510
1511 } else {
1512 /*
1513 * We increase readahead quantum till
1514 * a predefined max. max_readahead_bytes
1515 * is a multiple of PAGESIZE.
1516 */
1517 if (hp->hs_ra_bytes <
1518 fsp->hqueue->max_ra_bytes) {
1519 hp->hs_ra_bytes += PAGESIZE;
1520 }
1521 }
1522 } else {
1523 /*
1524 * Not contiguous so reduce read ahead counters.
1525 */
1526 if (hp->hs_ra_bytes > 0)
1527 hp->hs_ra_bytes -= PAGESIZE;
1528
1529 if (hp->hs_ra_bytes <= 0) {
1530 hp->hs_ra_bytes = 0;
1531 if (hp->hs_num_contig > 0)
1532 hp->hs_num_contig--;
1533 }
1534 }
1535 /*
1536 * Length must be rounded up to page boundary.
1537 * since we read in units of pages.
1538 */
1539 hp->hs_prev_offset = off + roundup(len, PAGESIZE);
1540 DTRACE_PROBE1(hsfs_compute_ra, struct hsnode *, hp);
1541 }
1542 if (protp != NULL)
1543 *protp = PROT_ALL;
1544
1545 return (pvn_getpages(hsfs_getapage, vp, off, len, protp, pl, plsz,
1546 seg, addr, rw, cred));
1547 }
1548
1549
1550
1551 /*
1552 * This function should never be called. We need to have it to pass
1553 * it as an argument to other functions.
1554 */
1555 /*ARGSUSED*/
1556 int
hsfs_putapage(vnode_t * vp,page_t * pp,u_offset_t * offp,size_t * lenp,int flags,cred_t * cr)1557 hsfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, size_t *lenp,
1558 int flags, cred_t *cr)
1559 {
1560 /* should never happen - just destroy it */
1561 cmn_err(CE_NOTE, "hsfs_putapage: dirty HSFS page");
1562 pvn_write_done(pp, B_ERROR | B_WRITE | B_INVAL | B_FORCE | flags);
1563 return (0);
1564 }
1565
1566
1567 /*
1568 * The only flags we support are B_INVAL, B_FREE and B_DONTNEED.
1569 * B_INVAL is set by:
1570 *
1571 * 1) the MC_SYNC command of memcntl(2) to support the MS_INVALIDATE flag.
1572 * 2) the MC_ADVISE command of memcntl(2) with the MADV_DONTNEED advice
1573 * which translates to an MC_SYNC with the MS_INVALIDATE flag.
1574 *
1575 * The B_FREE (as well as the B_DONTNEED) flag is set when the
1576 * MADV_SEQUENTIAL advice has been used. VOP_PUTPAGE is invoked
1577 * from SEGVN to release pages behind a pagefault.
1578 */
1579 /*ARGSUSED*/
1580 static int
hsfs_putpage(struct vnode * vp,offset_t off,size_t len,int flags,struct cred * cr,caller_context_t * ct)1581 hsfs_putpage(struct vnode *vp, offset_t off, size_t len, int flags,
1582 struct cred *cr, caller_context_t *ct)
1583 {
1584 int error = 0;
1585
1586 if (vp->v_count == 0) {
1587 panic("hsfs_putpage: bad v_count");
1588 /*NOTREACHED*/
1589 }
1590
1591 if (vp->v_flag & VNOMAP)
1592 return (ENOSYS);
1593
1594 ASSERT(off <= HS_MAXFILEOFF);
1595
1596 if (!vn_has_cached_data(vp)) /* no pages mapped */
1597 return (0);
1598
1599 if (len == 0) { /* from 'off' to EOF */
1600 error = pvn_vplist_dirty(vp, off, hsfs_putapage, flags, cr);
1601 } else {
1602 offset_t end_off = off + len;
1603 offset_t file_size = VTOH(vp)->hs_dirent.ext_size;
1604 offset_t io_off;
1605
1606 file_size = (file_size + PAGESIZE - 1) & PAGEMASK;
1607 if (end_off > file_size)
1608 end_off = file_size;
1609
1610 for (io_off = off; io_off < end_off; io_off += PAGESIZE) {
1611 page_t *pp;
1612
1613 /*
1614 * We insist on getting the page only if we are
1615 * about to invalidate, free or write it and
1616 * the B_ASYNC flag is not set.
1617 */
1618 if ((flags & B_INVAL) || ((flags & B_ASYNC) == 0)) {
1619 pp = page_lookup(vp, io_off,
1620 (flags & (B_INVAL | B_FREE)) ?
1621 SE_EXCL : SE_SHARED);
1622 } else {
1623 pp = page_lookup_nowait(vp, io_off,
1624 (flags & B_FREE) ? SE_EXCL : SE_SHARED);
1625 }
1626
1627 if (pp == NULL)
1628 continue;
1629
1630 /*
1631 * Normally pvn_getdirty() should return 0, which
1632 * impies that it has done the job for us.
1633 * The shouldn't-happen scenario is when it returns 1.
1634 * This means that the page has been modified and
1635 * needs to be put back.
1636 * Since we can't write on a CD, we fake a failed
1637 * I/O and force pvn_write_done() to destroy the page.
1638 */
1639 if (pvn_getdirty(pp, flags) == 1) {
1640 cmn_err(CE_NOTE,
1641 "hsfs_putpage: dirty HSFS page");
1642 pvn_write_done(pp, flags |
1643 B_ERROR | B_WRITE | B_INVAL | B_FORCE);
1644 }
1645 }
1646 }
1647 return (error);
1648 }
1649
1650
1651 /*ARGSUSED*/
1652 static int
hsfs_map(struct vnode * vp,offset_t off,struct as * as,caddr_t * addrp,size_t len,uchar_t prot,uchar_t maxprot,uint_t flags,struct cred * cred,caller_context_t * ct)1653 hsfs_map(struct vnode *vp, offset_t off, struct as *as, caddr_t *addrp,
1654 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, struct cred *cred,
1655 caller_context_t *ct)
1656 {
1657 struct segvn_crargs vn_a;
1658 int error;
1659
1660 /* VFS_RECORD(vp->v_vfsp, VS_MAP, VS_CALL); */
1661
1662 if (vp->v_flag & VNOMAP)
1663 return (ENOSYS);
1664
1665 if ((prot & PROT_WRITE) && (flags & MAP_SHARED))
1666 return (ENOSYS);
1667
1668 if (off > HS_MAXFILEOFF || off < 0 ||
1669 (off + len) < 0 || (off + len) > HS_MAXFILEOFF)
1670 return (ENXIO);
1671
1672 if (vp->v_type != VREG) {
1673 return (ENODEV);
1674 }
1675
1676 /*
1677 * If file is being locked, disallow mapping.
1678 */
1679 if (vn_has_mandatory_locks(vp, VTOH(vp)->hs_dirent.mode))
1680 return (EAGAIN);
1681
1682 as_rangelock(as);
1683 error = choose_addr(as, addrp, len, off, ADDR_VACALIGN, flags);
1684 if (error != 0) {
1685 as_rangeunlock(as);
1686 return (error);
1687 }
1688
1689 vn_a.vp = vp;
1690 vn_a.offset = off;
1691 vn_a.type = flags & MAP_TYPE;
1692 vn_a.prot = prot;
1693 vn_a.maxprot = maxprot;
1694 vn_a.flags = flags & ~MAP_TYPE;
1695 vn_a.cred = cred;
1696 vn_a.amp = NULL;
1697 vn_a.szc = 0;
1698 vn_a.lgrp_mem_policy_flags = 0;
1699
1700 error = as_map(as, *addrp, len, segvn_create, &vn_a);
1701 as_rangeunlock(as);
1702 return (error);
1703 }
1704
1705 /* ARGSUSED */
1706 static int
hsfs_addmap(struct vnode * vp,offset_t off,struct as * as,caddr_t addr,size_t len,uchar_t prot,uchar_t maxprot,uint_t flags,struct cred * cr,caller_context_t * ct)1707 hsfs_addmap(struct vnode *vp, offset_t off, struct as *as, caddr_t addr,
1708 size_t len, uchar_t prot, uchar_t maxprot, uint_t flags, struct cred *cr,
1709 caller_context_t *ct)
1710 {
1711 struct hsnode *hp;
1712
1713 if (vp->v_flag & VNOMAP)
1714 return (ENOSYS);
1715
1716 hp = VTOH(vp);
1717 mutex_enter(&hp->hs_contents_lock);
1718 hp->hs_mapcnt += btopr(len);
1719 mutex_exit(&hp->hs_contents_lock);
1720 return (0);
1721 }
1722
1723 /*ARGSUSED*/
1724 static int
hsfs_delmap(struct vnode * vp,offset_t off,struct as * as,caddr_t addr,size_t len,uint_t prot,uint_t maxprot,uint_t flags,struct cred * cr,caller_context_t * ct)1725 hsfs_delmap(struct vnode *vp, offset_t off, struct as *as, caddr_t addr,
1726 size_t len, uint_t prot, uint_t maxprot, uint_t flags, struct cred *cr,
1727 caller_context_t *ct)
1728 {
1729 struct hsnode *hp;
1730
1731 if (vp->v_flag & VNOMAP)
1732 return (ENOSYS);
1733
1734 hp = VTOH(vp);
1735 mutex_enter(&hp->hs_contents_lock);
1736 hp->hs_mapcnt -= btopr(len); /* Count released mappings */
1737 ASSERT(hp->hs_mapcnt >= 0);
1738 mutex_exit(&hp->hs_contents_lock);
1739 return (0);
1740 }
1741
1742 /* ARGSUSED */
1743 static int
hsfs_seek(struct vnode * vp,offset_t ooff,offset_t * noffp,caller_context_t * ct)1744 hsfs_seek(struct vnode *vp, offset_t ooff, offset_t *noffp,
1745 caller_context_t *ct)
1746 {
1747 return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
1748 }
1749
1750 /* ARGSUSED */
1751 static int
hsfs_frlock(struct vnode * vp,int cmd,struct flock64 * bfp,int flag,offset_t offset,struct flk_callback * flk_cbp,cred_t * cr,caller_context_t * ct)1752 hsfs_frlock(struct vnode *vp, int cmd, struct flock64 *bfp, int flag,
1753 offset_t offset, struct flk_callback *flk_cbp, cred_t *cr,
1754 caller_context_t *ct)
1755 {
1756 struct hsnode *hp = VTOH(vp);
1757
1758 /*
1759 * If the file is being mapped, disallow fs_frlock.
1760 * We are not holding the hs_contents_lock while checking
1761 * hs_mapcnt because the current locking strategy drops all
1762 * locks before calling fs_frlock.
1763 * So, hs_mapcnt could change before we enter fs_frlock making
1764 * it meaningless to have held hs_contents_lock in the first place.
1765 */
1766 if (hp->hs_mapcnt > 0 && MANDLOCK(vp, hp->hs_dirent.mode))
1767 return (EAGAIN);
1768
1769 return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
1770 }
1771
1772 static int
hsched_deadline_compare(const void * x1,const void * x2)1773 hsched_deadline_compare(const void *x1, const void *x2)
1774 {
1775 const struct hio *h1 = x1;
1776 const struct hio *h2 = x2;
1777
1778 if (h1->io_timestamp < h2->io_timestamp)
1779 return (-1);
1780 if (h1->io_timestamp > h2->io_timestamp)
1781 return (1);
1782
1783 if (h1->io_lblkno < h2->io_lblkno)
1784 return (-1);
1785 if (h1->io_lblkno > h2->io_lblkno)
1786 return (1);
1787
1788 if (h1 < h2)
1789 return (-1);
1790 if (h1 > h2)
1791 return (1);
1792
1793 return (0);
1794 }
1795
1796 static int
hsched_offset_compare(const void * x1,const void * x2)1797 hsched_offset_compare(const void *x1, const void *x2)
1798 {
1799 const struct hio *h1 = x1;
1800 const struct hio *h2 = x2;
1801
1802 if (h1->io_lblkno < h2->io_lblkno)
1803 return (-1);
1804 if (h1->io_lblkno > h2->io_lblkno)
1805 return (1);
1806
1807 if (h1 < h2)
1808 return (-1);
1809 if (h1 > h2)
1810 return (1);
1811
1812 return (0);
1813 }
1814
1815 void
hsched_init_caches(void)1816 hsched_init_caches(void)
1817 {
1818 hio_cache = kmem_cache_create("hsfs_hio_cache",
1819 sizeof (struct hio), 0, NULL,
1820 NULL, NULL, NULL, NULL, 0);
1821
1822 hio_info_cache = kmem_cache_create("hsfs_hio_info_cache",
1823 sizeof (struct hio_info), 0, NULL,
1824 NULL, NULL, NULL, NULL, 0);
1825 }
1826
1827 void
hsched_fini_caches(void)1828 hsched_fini_caches(void)
1829 {
1830 kmem_cache_destroy(hio_cache);
1831 kmem_cache_destroy(hio_info_cache);
1832 }
1833
1834 /*
1835 * Initialize I/O scheduling structures. This is called via hsfs_mount
1836 */
1837 void
hsched_init(struct hsfs * fsp,int fsid,struct modlinkage * modlinkage)1838 hsched_init(struct hsfs *fsp, int fsid, struct modlinkage *modlinkage)
1839 {
1840 struct hsfs_queue *hqueue = fsp->hqueue;
1841 struct vnode *vp = fsp->hsfs_devvp;
1842
1843 /* TaskQ name of the form: hsched_task_ + stringof(int) */
1844 char namebuf[23];
1845 int error, err;
1846 struct dk_cinfo info;
1847 ldi_handle_t lh;
1848 ldi_ident_t li;
1849
1850 /*
1851 * Default maxtransfer = 16k chunk
1852 */
1853 hqueue->dev_maxtransfer = 16384;
1854
1855 /*
1856 * Try to fetch the maximum device transfer size. This is used to
1857 * ensure that a coalesced block does not exceed the maxtransfer.
1858 */
1859 err = ldi_ident_from_mod(modlinkage, &li);
1860 if (err) {
1861 cmn_err(CE_NOTE, "hsched_init: Querying device failed");
1862 cmn_err(CE_NOTE, "hsched_init: ldi_ident_from_mod err=%d\n",
1863 err);
1864 goto set_ra;
1865 }
1866
1867 err = ldi_open_by_dev(&(vp->v_rdev), OTYP_CHR, FREAD, CRED(), &lh, li);
1868 ldi_ident_release(li);
1869 if (err) {
1870 cmn_err(CE_NOTE, "hsched_init: Querying device failed");
1871 cmn_err(CE_NOTE, "hsched_init: ldi_open err=%d\n", err);
1872 goto set_ra;
1873 }
1874
1875 error = ldi_ioctl(lh, DKIOCINFO, (intptr_t)&info, FKIOCTL,
1876 CRED(), &err);
1877 err = ldi_close(lh, FREAD, CRED());
1878 if (err) {
1879 cmn_err(CE_NOTE, "hsched_init: Querying device failed");
1880 cmn_err(CE_NOTE, "hsched_init: ldi_close err=%d\n", err);
1881 }
1882
1883 if (error == 0) {
1884 hqueue->dev_maxtransfer = ldbtob(info.dki_maxtransfer);
1885 }
1886
1887 set_ra:
1888 /*
1889 * Max size of data to read ahead for sequential access pattern.
1890 * Conservative to avoid letting the underlying CD drive to spin
1891 * down, in case the application is reading slowly.
1892 * We read ahead upto a max of 4 pages.
1893 */
1894 hqueue->max_ra_bytes = PAGESIZE * 8;
1895
1896 mutex_init(&(hqueue->hsfs_queue_lock), NULL, MUTEX_DEFAULT, NULL);
1897 mutex_init(&(hqueue->strategy_lock), NULL, MUTEX_DEFAULT, NULL);
1898 avl_create(&(hqueue->read_tree), hsched_offset_compare,
1899 sizeof (struct hio), offsetof(struct hio, io_offset_node));
1900 avl_create(&(hqueue->deadline_tree), hsched_deadline_compare,
1901 sizeof (struct hio), offsetof(struct hio, io_deadline_node));
1902
1903 (void) snprintf(namebuf, sizeof (namebuf), "hsched_task_%d", fsid);
1904 hqueue->ra_task = taskq_create(namebuf, hsfs_taskq_nthreads,
1905 minclsyspri + 2, 1, 104857600 / PAGESIZE, TASKQ_DYNAMIC);
1906
1907 hqueue->next = NULL;
1908 hqueue->nbuf = kmem_zalloc(sizeof (struct buf), KM_SLEEP);
1909 }
1910
1911 void
hsched_fini(struct hsfs_queue * hqueue)1912 hsched_fini(struct hsfs_queue *hqueue)
1913 {
1914 if (hqueue != NULL) {
1915 /*
1916 * Remove the sentinel if there was one.
1917 */
1918 if (hqueue->next != NULL) {
1919 avl_remove(&hqueue->read_tree, hqueue->next);
1920 kmem_cache_free(hio_cache, hqueue->next);
1921 }
1922 avl_destroy(&(hqueue->read_tree));
1923 avl_destroy(&(hqueue->deadline_tree));
1924 mutex_destroy(&(hqueue->hsfs_queue_lock));
1925 mutex_destroy(&(hqueue->strategy_lock));
1926
1927 /*
1928 * If there are any existing readahead threads running
1929 * taskq_destroy will wait for them to finish.
1930 */
1931 taskq_destroy(hqueue->ra_task);
1932 kmem_free(hqueue->nbuf, sizeof (struct buf));
1933 }
1934 }
1935
1936 /*
1937 * Determine if two I/O requests are adjacent to each other so
1938 * that they can coalesced.
1939 */
1940 #define IS_ADJACENT(io, nio) \
1941 (((io)->io_lblkno + (io)->nblocks == (nio)->io_lblkno) && \
1942 (io)->bp->b_edev == (nio)->bp->b_edev)
1943
1944 /*
1945 * This performs the actual I/O scheduling logic. We use the Circular
1946 * Look algorithm here. Sort the I/O requests in ascending order of
1947 * logical block number and process them starting with the lowest
1948 * numbered block and progressing towards higher block numbers in the
1949 * queue. Once there are no more higher numbered blocks, start again
1950 * with the lowest one. This is good for CD/DVD as you keep moving
1951 * the head in one direction along the outward spiral track and avoid
1952 * too many seeks as much as possible. The re-ordering also allows
1953 * us to coalesce adjacent requests into one larger request.
1954 * This is thus essentially a 1-way Elevator with front merging.
1955 *
1956 * In addition each read request here has a deadline and will be
1957 * processed out of turn if the deadline (500ms) expires.
1958 *
1959 * This function is necessarily serialized via hqueue->strategy_lock.
1960 * This function sits just below hsfs_getapage and processes all read
1961 * requests orginating from that function.
1962 */
1963 int
hsched_invoke_strategy(struct hsfs * fsp)1964 hsched_invoke_strategy(struct hsfs *fsp)
1965 {
1966 struct hsfs_queue *hqueue;
1967 struct buf *nbuf;
1968 struct hio *fio, *nio, *tio, *prev, *last;
1969 size_t bsize, soffset, offset, data;
1970 int bioret, bufcount;
1971 struct vnode *fvp;
1972 ksema_t *io_done;
1973 caddr_t iodata;
1974
1975 hqueue = fsp->hqueue;
1976 mutex_enter(&hqueue->strategy_lock);
1977 mutex_enter(&hqueue->hsfs_queue_lock);
1978
1979 /*
1980 * Check for Deadline expiration first
1981 */
1982 fio = avl_first(&hqueue->deadline_tree);
1983
1984 /*
1985 * Paranoid check for empty I/O queue. Both deadline
1986 * and read trees contain same data sorted in different
1987 * ways. So empty deadline tree = empty read tree.
1988 */
1989 if (fio == NULL) {
1990 /*
1991 * Remove the sentinel if there was one.
1992 */
1993 if (hqueue->next != NULL) {
1994 avl_remove(&hqueue->read_tree, hqueue->next);
1995 kmem_cache_free(hio_cache, hqueue->next);
1996 hqueue->next = NULL;
1997 }
1998 mutex_exit(&hqueue->hsfs_queue_lock);
1999 mutex_exit(&hqueue->strategy_lock);
2000 return (1);
2001 }
2002
2003 if (drv_hztousec(ddi_get_lbolt()) - fio->io_timestamp
2004 < HSFS_READ_DEADLINE) {
2005 /*
2006 * Apply standard scheduling logic. This uses the
2007 * C-LOOK approach. Process I/O requests in ascending
2008 * order of logical block address till no subsequent
2009 * higher numbered block request remains. Then start
2010 * again from the lowest numbered block in the queue.
2011 *
2012 * We do this cheaply here by means of a sentinel.
2013 * The last processed I/O structure from the previous
2014 * invocation of this func, is left dangling in the
2015 * read_tree so that we can easily scan to the next
2016 * higher numbered request and remove the sentinel.
2017 */
2018 fio = NULL;
2019 if (hqueue->next != NULL) {
2020 fio = AVL_NEXT(&hqueue->read_tree, hqueue->next);
2021 avl_remove(&hqueue->read_tree, hqueue->next);
2022 kmem_cache_free(hio_cache, hqueue->next);
2023 hqueue->next = NULL;
2024 }
2025 if (fio == NULL) {
2026 fio = avl_first(&hqueue->read_tree);
2027 }
2028 } else if (hqueue->next != NULL) {
2029 DTRACE_PROBE1(hsfs_deadline_expiry, struct hio *, fio);
2030
2031 avl_remove(&hqueue->read_tree, hqueue->next);
2032 kmem_cache_free(hio_cache, hqueue->next);
2033 hqueue->next = NULL;
2034 }
2035
2036 /*
2037 * In addition we try to coalesce contiguous
2038 * requests into one bigger request.
2039 */
2040 bufcount = 1;
2041 bsize = ldbtob(fio->nblocks);
2042 fvp = fio->bp->b_file;
2043 nio = AVL_NEXT(&hqueue->read_tree, fio);
2044 tio = fio;
2045 while (nio != NULL && IS_ADJACENT(tio, nio) &&
2046 bsize < hqueue->dev_maxtransfer) {
2047 avl_remove(&hqueue->deadline_tree, tio);
2048 avl_remove(&hqueue->read_tree, tio);
2049 tio->contig_chain = nio;
2050 bsize += ldbtob(nio->nblocks);
2051 prev = tio;
2052 tio = nio;
2053
2054 /*
2055 * This check is required to detect the case where
2056 * we are merging adjacent buffers belonging to
2057 * different files. fvp is used to set the b_file
2058 * parameter in the coalesced buf. b_file is used
2059 * by DTrace so we do not want DTrace to accrue
2060 * requests to two different files to any one file.
2061 */
2062 if (fvp && tio->bp->b_file != fvp) {
2063 fvp = NULL;
2064 }
2065
2066 nio = AVL_NEXT(&hqueue->read_tree, nio);
2067 bufcount++;
2068 }
2069
2070 /*
2071 * tio is not removed from the read_tree as it serves as a sentinel
2072 * to cheaply allow us to scan to the next higher numbered I/O
2073 * request.
2074 */
2075 hqueue->next = tio;
2076 avl_remove(&hqueue->deadline_tree, tio);
2077 mutex_exit(&hqueue->hsfs_queue_lock);
2078 DTRACE_PROBE3(hsfs_io_dequeued, struct hio *, fio, int, bufcount,
2079 size_t, bsize);
2080
2081 /*
2082 * The benefit of coalescing occurs if the the savings in I/O outweighs
2083 * the cost of doing the additional work below.
2084 * It was observed that coalescing 2 buffers results in diminishing
2085 * returns, so we do coalescing if we have >2 adjacent bufs.
2086 */
2087 if (bufcount > hsched_coalesce_min) {
2088 /*
2089 * We have coalesced blocks. First allocate mem and buf for
2090 * the entire coalesced chunk.
2091 * Since we are guaranteed single-threaded here we pre-allocate
2092 * one buf at mount time and that is re-used every time. This
2093 * is a synthesized buf structure that uses kmem_alloced chunk.
2094 * Not quite a normal buf attached to pages.
2095 */
2096 fsp->coalesced_bytes += bsize;
2097 nbuf = hqueue->nbuf;
2098 bioinit(nbuf);
2099 nbuf->b_edev = fio->bp->b_edev;
2100 nbuf->b_dev = fio->bp->b_dev;
2101 nbuf->b_flags = fio->bp->b_flags;
2102 nbuf->b_iodone = fio->bp->b_iodone;
2103 iodata = kmem_alloc(bsize, KM_SLEEP);
2104 nbuf->b_un.b_addr = iodata;
2105 nbuf->b_lblkno = fio->bp->b_lblkno;
2106 nbuf->b_vp = fvp;
2107 nbuf->b_file = fvp;
2108 nbuf->b_bcount = bsize;
2109 nbuf->b_bufsize = bsize;
2110
2111 DTRACE_PROBE3(hsfs_coalesced_io_start, struct hio *, fio, int,
2112 bufcount, size_t, bsize);
2113
2114 /*
2115 * Perform I/O for the coalesced block.
2116 */
2117 (void) bdev_strategy(nbuf);
2118
2119 /*
2120 * Duplicate the last IO node to leave the sentinel alone.
2121 * The sentinel is freed in the next invocation of this
2122 * function.
2123 */
2124 prev->contig_chain = kmem_cache_alloc(hio_cache, KM_SLEEP);
2125 prev->contig_chain->bp = tio->bp;
2126 prev->contig_chain->sema = tio->sema;
2127 tio = prev->contig_chain;
2128 tio->contig_chain = NULL;
2129 soffset = ldbtob(fio->bp->b_lblkno);
2130 nio = fio;
2131
2132 bioret = biowait(nbuf);
2133 data = bsize - nbuf->b_resid;
2134 biofini(nbuf);
2135 mutex_exit(&hqueue->strategy_lock);
2136
2137 /*
2138 * We use the b_resid parameter to detect how much
2139 * data was succesfully transferred. We will signal
2140 * a success to all the fully retrieved actual bufs
2141 * before coalescing, rest is signaled as error,
2142 * if any.
2143 */
2144 tio = nio;
2145 DTRACE_PROBE3(hsfs_coalesced_io_done, struct hio *, nio,
2146 int, bioret, size_t, data);
2147
2148 /*
2149 * Copy data and signal success to all the bufs
2150 * which can be fully satisfied from b_resid.
2151 */
2152 while (nio != NULL && data >= nio->bp->b_bcount) {
2153 offset = ldbtob(nio->bp->b_lblkno) - soffset;
2154 bcopy(iodata + offset, nio->bp->b_un.b_addr,
2155 nio->bp->b_bcount);
2156 data -= nio->bp->b_bcount;
2157 bioerror(nio->bp, 0);
2158 biodone(nio->bp);
2159 sema_v(nio->sema);
2160 tio = nio;
2161 nio = nio->contig_chain;
2162 kmem_cache_free(hio_cache, tio);
2163 }
2164
2165 /*
2166 * Signal error to all the leftover bufs (if any)
2167 * after b_resid data is exhausted.
2168 */
2169 while (nio != NULL) {
2170 nio->bp->b_resid = nio->bp->b_bcount - data;
2171 bzero(nio->bp->b_un.b_addr + data, nio->bp->b_resid);
2172 bioerror(nio->bp, bioret);
2173 biodone(nio->bp);
2174 sema_v(nio->sema);
2175 tio = nio;
2176 nio = nio->contig_chain;
2177 kmem_cache_free(hio_cache, tio);
2178 data = 0;
2179 }
2180 kmem_free(iodata, bsize);
2181 } else {
2182
2183 nbuf = tio->bp;
2184 io_done = tio->sema;
2185 nio = fio;
2186 last = tio;
2187
2188 while (nio != NULL) {
2189 (void) bdev_strategy(nio->bp);
2190 nio = nio->contig_chain;
2191 }
2192 nio = fio;
2193 mutex_exit(&hqueue->strategy_lock);
2194
2195 while (nio != NULL) {
2196 if (nio == last) {
2197 (void) biowait(nbuf);
2198 sema_v(io_done);
2199 break;
2200 /* sentinel last not freed. See above. */
2201 } else {
2202 (void) biowait(nio->bp);
2203 sema_v(nio->sema);
2204 }
2205 tio = nio;
2206 nio = nio->contig_chain;
2207 kmem_cache_free(hio_cache, tio);
2208 }
2209 }
2210 return (0);
2211 }
2212
2213 /*
2214 * Insert an I/O request in the I/O scheduler's pipeline
2215 * Using AVL tree makes it easy to reorder the I/O request
2216 * based on logical block number.
2217 */
2218 static void
hsched_enqueue_io(struct hsfs * fsp,struct hio * hsio,int ra)2219 hsched_enqueue_io(struct hsfs *fsp, struct hio *hsio, int ra)
2220 {
2221 struct hsfs_queue *hqueue = fsp->hqueue;
2222
2223 mutex_enter(&hqueue->hsfs_queue_lock);
2224
2225 fsp->physical_read_bytes += hsio->bp->b_bcount;
2226 if (ra)
2227 fsp->readahead_bytes += hsio->bp->b_bcount;
2228
2229 avl_add(&hqueue->deadline_tree, hsio);
2230 avl_add(&hqueue->read_tree, hsio);
2231
2232 DTRACE_PROBE3(hsfs_io_enqueued, struct hio *, hsio,
2233 struct hsfs_queue *, hqueue, int, ra);
2234
2235 mutex_exit(&hqueue->hsfs_queue_lock);
2236 }
2237
2238 /* ARGSUSED */
2239 static int
hsfs_pathconf(struct vnode * vp,int cmd,ulong_t * valp,struct cred * cr,caller_context_t * ct)2240 hsfs_pathconf(struct vnode *vp, int cmd, ulong_t *valp, struct cred *cr,
2241 caller_context_t *ct)
2242 {
2243 struct hsfs *fsp;
2244
2245 int error = 0;
2246
2247 switch (cmd) {
2248
2249 case _PC_NAME_MAX:
2250 fsp = VFS_TO_HSFS(vp->v_vfsp);
2251 *valp = fsp->hsfs_namemax;
2252 break;
2253
2254 case _PC_FILESIZEBITS:
2255 *valp = 33; /* Without multi extent support: 4 GB - 2k */
2256 break;
2257
2258 case _PC_TIMESTAMP_RESOLUTION:
2259 /*
2260 * HSFS keeps, at best, 1/100 second timestamp resolution.
2261 */
2262 *valp = 10000000L;
2263 break;
2264
2265 default:
2266 error = fs_pathconf(vp, cmd, valp, cr, ct);
2267 break;
2268 }
2269
2270 return (error);
2271 }
2272
2273
2274
2275 const fs_operation_def_t hsfs_vnodeops_template[] = {
2276 VOPNAME_OPEN, { .vop_open = hsfs_open },
2277 VOPNAME_CLOSE, { .vop_close = hsfs_close },
2278 VOPNAME_READ, { .vop_read = hsfs_read },
2279 VOPNAME_GETATTR, { .vop_getattr = hsfs_getattr },
2280 VOPNAME_ACCESS, { .vop_access = hsfs_access },
2281 VOPNAME_LOOKUP, { .vop_lookup = hsfs_lookup },
2282 VOPNAME_READDIR, { .vop_readdir = hsfs_readdir },
2283 VOPNAME_READLINK, { .vop_readlink = hsfs_readlink },
2284 VOPNAME_FSYNC, { .vop_fsync = hsfs_fsync },
2285 VOPNAME_INACTIVE, { .vop_inactive = hsfs_inactive },
2286 VOPNAME_FID, { .vop_fid = hsfs_fid },
2287 VOPNAME_SEEK, { .vop_seek = hsfs_seek },
2288 VOPNAME_FRLOCK, { .vop_frlock = hsfs_frlock },
2289 VOPNAME_GETPAGE, { .vop_getpage = hsfs_getpage },
2290 VOPNAME_PUTPAGE, { .vop_putpage = hsfs_putpage },
2291 VOPNAME_MAP, { .vop_map = hsfs_map },
2292 VOPNAME_ADDMAP, { .vop_addmap = hsfs_addmap },
2293 VOPNAME_DELMAP, { .vop_delmap = hsfs_delmap },
2294 VOPNAME_PATHCONF, { .vop_pathconf = hsfs_pathconf },
2295 NULL, NULL
2296 };
2297
2298 struct vnodeops *hsfs_vnodeops;
2299