xref: /titanic_52/usr/src/uts/common/fs/udfs/udf_inode.c (revision 1cb6af97c6f66f456d4f726ef056e1ebc0f73305)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/t_lock.h>
31 #include <sys/param.h>
32 #include <sys/time.h>
33 #include <sys/systm.h>
34 #include <sys/sysmacros.h>
35 #include <sys/resource.h>
36 #include <sys/signal.h>
37 #include <sys/cred.h>
38 #include <sys/user.h>
39 #include <sys/buf.h>
40 #include <sys/vfs.h>
41 #include <sys/stat.h>
42 #include <sys/vnode.h>
43 #include <sys/mode.h>
44 #include <sys/proc.h>
45 #include <sys/disp.h>
46 #include <sys/file.h>
47 #include <sys/fcntl.h>
48 #include <sys/flock.h>
49 #include <sys/kmem.h>
50 #include <sys/uio.h>
51 #include <sys/dnlc.h>
52 #include <sys/conf.h>
53 #include <sys/errno.h>
54 #include <sys/mman.h>
55 #include <sys/fbuf.h>
56 #include <sys/pathname.h>
57 #include <sys/debug.h>
58 #include <sys/vmsystm.h>
59 #include <sys/cmn_err.h>
60 #include <sys/dirent.h>
61 #include <sys/errno.h>
62 #include <sys/modctl.h>
63 #include <sys/statvfs.h>
64 #include <sys/mount.h>
65 #include <sys/sunddi.h>
66 #include <sys/bootconf.h>
67 #include <sys/policy.h>
68 
69 #include <vm/hat.h>
70 #include <vm/page.h>
71 #include <vm/pvn.h>
72 #include <vm/as.h>
73 #include <vm/seg.h>
74 #include <vm/seg_map.h>
75 #include <vm/seg_kmem.h>
76 #include <vm/seg_vn.h>
77 #include <vm/rm.h>
78 #include <vm/page.h>
79 #include <sys/swap.h>
80 
81 
82 #include <fs/fs_subr.h>
83 
84 
85 #include <sys/fs/udf_volume.h>
86 #include <sys/fs/udf_inode.h>
87 
88 extern struct vnodeops *udf_vnodeops;
89 
90 kmutex_t ud_sync_busy;
91 /*
92  * udf_vfs list manipulation routines
93  */
94 kmutex_t udf_vfs_mutex;
95 struct udf_vfs *udf_vfs_instances;
96 #ifndef	__lint
97 _NOTE(MUTEX_PROTECTS_DATA(udf_vfs_mutex, udf_vfs_instances))
98 #endif
99 
100 union ihead ud_ihead[UD_HASH_SZ];
101 kmutex_t ud_icache_lock;
102 
103 #define	UD_BEGIN	0x0
104 #define	UD_END		0x1
105 #define	UD_UNKN		0x2
106 struct ud_inode *udf_ifreeh, *udf_ifreet;
107 kmutex_t udf_ifree_lock;
108 #ifndef	__lint
109 _NOTE(MUTEX_PROTECTS_DATA(udf_ifree_lock, udf_ifreeh))
110 _NOTE(MUTEX_PROTECTS_DATA(udf_ifree_lock, udf_ifreet))
111 #endif
112 
113 kmutex_t ud_nino_lock;
114 int32_t ud_max_inodes = 512;
115 int32_t ud_cur_inodes = 0;
116 #ifndef	__lint
117 _NOTE(MUTEX_PROTECTS_DATA(ud_nino_lock, ud_cur_inodes))
118 #endif
119 
120 uid_t ud_default_uid = 0;
121 gid_t ud_default_gid = 3;
122 
123 int32_t ud_updat_ext4(struct ud_inode *, struct file_entry *);
124 int32_t ud_updat_ext4096(struct ud_inode *, struct file_entry *);
125 void ud_make_sad(struct icb_ext *, struct short_ad *, int32_t);
126 void ud_make_lad(struct icb_ext *, struct long_ad *, int32_t);
127 void ud_trunc_ext4(struct ud_inode *, u_offset_t);
128 void ud_trunc_ext4096(struct ud_inode *, u_offset_t);
129 void ud_add_to_free_list(struct ud_inode *, uint32_t);
130 void ud_remove_from_free_list(struct ud_inode *, uint32_t);
131 
132 
133 #ifdef	DEBUG
134 struct ud_inode *
135 ud_search_icache(struct vfs *vfsp, uint16_t prn, uint32_t ploc)
136 {
137 	int32_t hno;
138 	union ihead *ih;
139 	struct ud_inode *ip;
140 	struct udf_vfs *udf_vfsp;
141 	uint32_t loc, dummy;
142 
143 	udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
144 	loc = ud_xlate_to_daddr(udf_vfsp, prn, ploc, 1, &dummy);
145 
146 	mutex_enter(&ud_icache_lock);
147 	hno = UD_INOHASH(vfsp->vfs_dev, loc);
148 	ih = &ud_ihead[hno];
149 	for (ip = ih->ih_chain[0];
150 			ip != (struct ud_inode *)ih;
151 			ip = ip->i_forw) {
152 		if ((prn == ip->i_icb_prn) &&
153 			(ploc == ip->i_icb_block) &&
154 			(vfsp->vfs_dev == ip->i_dev)) {
155 			mutex_exit(&ud_icache_lock);
156 			return (ip);
157 		}
158 	}
159 	mutex_exit(&ud_icache_lock);
160 	return (0);
161 }
162 #endif
163 
164 /* ARGSUSED */
165 int
166 ud_iget(struct vfs *vfsp, uint16_t prn, uint32_t ploc,
167 	struct ud_inode **ipp, struct buf *pbp, struct cred *cred)
168 {
169 	int32_t hno, nomem = 0, icb_tag_flags;
170 	union ihead *ih;
171 	struct ud_inode *ip;
172 	struct vnode *vp;
173 	struct buf *bp = NULL;
174 	struct file_entry *fe;
175 	struct udf_vfs *udf_vfsp;
176 	struct ext_attr_hdr *eah;
177 	struct attr_hdr *ah;
178 	int32_t ea_len, ea_off;
179 	daddr_t loc;
180 	uint64_t offset = 0;
181 	struct icb_ext *iext, *con;
182 	uint32_t length, dummy;
183 	int32_t ndesc, ftype;
184 	uint16_t old_prn;
185 	uint32_t old_block, old_lbano;
186 
187 	ud_printf("ud_iget\n");
188 	udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
189 	old_prn = 0;
190 	old_block = old_lbano = 0;
191 	ftype = 0;
192 	loc = ud_xlate_to_daddr(udf_vfsp, prn, ploc, 1, &dummy);
193 loop:
194 	mutex_enter(&ud_icache_lock);
195 	hno = UD_INOHASH(vfsp->vfs_dev, loc);
196 
197 	ih = &ud_ihead[hno];
198 	for (ip = ih->ih_chain[0];
199 			ip != (struct ud_inode *)ih;
200 			ip = ip->i_forw) {
201 
202 		if ((prn == ip->i_icb_prn) &&
203 			(ploc == ip->i_icb_block) &&
204 			(vfsp->vfs_dev == ip->i_dev)) {
205 
206 			vp = ITOV(ip);
207 			VN_HOLD(vp);
208 			mutex_exit(&ud_icache_lock);
209 
210 			rw_enter(&ip->i_contents, RW_READER);
211 			mutex_enter(&ip->i_tlock);
212 			if ((ip->i_flag & IREF) == 0) {
213 				mutex_enter(&udf_ifree_lock);
214 				ud_remove_from_free_list(ip, UD_UNKN);
215 				mutex_exit(&udf_ifree_lock);
216 			}
217 			ip->i_flag |= IREF;
218 			mutex_exit(&ip->i_tlock);
219 			rw_exit(&ip->i_contents);
220 
221 			*ipp = ip;
222 
223 			if (pbp != NULL) {
224 				brelse(pbp);
225 			}
226 
227 			return (0);
228 		}
229 	}
230 
231 	/*
232 	 * We don't have it in the cache
233 	 * Allocate a new entry
234 	 */
235 tryagain:
236 	mutex_enter(&udf_ifree_lock);
237 	mutex_enter(&ud_nino_lock);
238 	if (ud_cur_inodes > ud_max_inodes) {
239 		int32_t purged;
240 
241 		mutex_exit(&ud_nino_lock);
242 		while (udf_ifreeh == NULL ||
243 		    vn_has_cached_data(ITOV(udf_ifreeh))) {
244 			/*
245 			 * Try to put an inode on the freelist that's
246 			 * sitting in the dnlc.
247 			 */
248 			mutex_exit(&udf_ifree_lock);
249 			purged = dnlc_fs_purge1(udf_vnodeops);
250 			mutex_enter(&udf_ifree_lock);
251 			if (!purged) {
252 				break;
253 			}
254 		}
255 		mutex_enter(&ud_nino_lock);
256 	}
257 
258 	/*
259 	 * If there's a free one available and it has no pages attached
260 	 * take it. If we're over the high water mark, take it even if
261 	 * it has attached pages. Otherwise, make a new one.
262 	 */
263 	if (udf_ifreeh &&
264 		(nomem || !vn_has_cached_data(ITOV(udf_ifreeh)) ||
265 		ud_cur_inodes >= ud_max_inodes)) {
266 
267 		mutex_exit(&ud_nino_lock);
268 		ip = udf_ifreeh;
269 		vp = ITOV(ip);
270 
271 		ud_remove_from_free_list(ip, UD_BEGIN);
272 
273 		mutex_exit(&udf_ifree_lock);
274 		if (ip->i_flag & IREF) {
275 			cmn_err(CE_WARN, "ud_iget: bad i_flag\n");
276 			mutex_exit(&ud_icache_lock);
277 			if (pbp != NULL) {
278 				brelse(pbp);
279 			}
280 			return (EINVAL);
281 		}
282 		rw_enter(&ip->i_contents, RW_WRITER);
283 
284 		/*
285 		 * We call udf_syncip() to synchronously destroy all pages
286 		 * associated with the vnode before re-using it. The pageout
287 		 * thread may have beat us to this page so our v_count can
288 		 * be > 0 at this point even though we are on the freelist.
289 		 */
290 		mutex_enter(&ip->i_tlock);
291 		ip->i_flag = (ip->i_flag & IMODTIME) | IREF;
292 		mutex_exit(&ip->i_tlock);
293 
294 		VN_HOLD(vp);
295 		if (ud_syncip(ip, B_INVAL, I_SYNC) != 0) {
296 			ud_idrop(ip);
297 			rw_exit(&ip->i_contents);
298 			mutex_exit(&ud_icache_lock);
299 			goto loop;
300 		}
301 
302 		mutex_enter(&ip->i_tlock);
303 		ip->i_flag &= ~IMODTIME;
304 		mutex_exit(&ip->i_tlock);
305 
306 		if (ip->i_ext) {
307 			kmem_free(ip->i_ext,
308 				sizeof (struct icb_ext) * ip->i_ext_count);
309 			ip->i_ext = 0;
310 			ip->i_ext_count = ip->i_ext_used = 0;
311 		}
312 
313 		if (ip->i_con) {
314 			kmem_free(ip->i_con,
315 				sizeof (struct icb_ext) * ip->i_con_count);
316 			ip->i_con = 0;
317 			ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
318 		}
319 
320 		/*
321 		 * The pageout thread may not have had a chance to release
322 		 * its hold on the vnode (if it was active with this vp),
323 		 * but the pages should all be invalidated.
324 		 */
325 	} else {
326 		mutex_exit(&ud_nino_lock);
327 		mutex_exit(&udf_ifree_lock);
328 		/*
329 		 * Try to get memory for this inode without blocking.
330 		 * If we can't and there is something on the freelist,
331 		 * go ahead and use it, otherwise block waiting for
332 		 * memory holding the hash_lock. We expose a potential
333 		 * deadlock if all users of memory have to do a ud_iget()
334 		 * before releasing memory.
335 		 */
336 		ip = (struct ud_inode *)kmem_zalloc(sizeof (struct ud_inode),
337 				KM_NOSLEEP);
338 		vp = vn_alloc(KM_NOSLEEP);
339 		if ((ip == NULL) || (vp == NULL)) {
340 			mutex_enter(&udf_ifree_lock);
341 			if (udf_ifreeh) {
342 				mutex_exit(&udf_ifree_lock);
343 				if (ip != NULL)
344 					kmem_free(ip, sizeof (struct ud_inode));
345 				if (vp != NULL)
346 					vn_free(vp);
347 				nomem = 1;
348 				goto tryagain;
349 			} else {
350 				mutex_exit(&udf_ifree_lock);
351 				if (ip == NULL)
352 					ip = (struct ud_inode *)
353 					    kmem_zalloc(
354 						sizeof (struct ud_inode),
355 						KM_SLEEP);
356 				if (vp == NULL)
357 					vp = vn_alloc(KM_SLEEP);
358 			}
359 		}
360 		ip->i_vnode = vp;
361 
362 		ip->i_marker1 = (uint32_t)0xAAAAAAAA;
363 		ip->i_marker2 = (uint32_t)0xBBBBBBBB;
364 		ip->i_marker3 = (uint32_t)0xCCCCCCCC;
365 
366 		rw_init(&ip->i_rwlock, NULL, RW_DEFAULT, NULL);
367 		rw_init(&ip->i_contents, NULL, RW_DEFAULT, NULL);
368 		mutex_init(&ip->i_tlock, NULL, MUTEX_DEFAULT, NULL);
369 
370 		ip->i_forw = ip;
371 		ip->i_back = ip;
372 		vp->v_data = (caddr_t)ip;
373 		vn_setops(vp, udf_vnodeops);
374 		ip->i_flag = IREF;
375 		cv_init(&ip->i_wrcv, NULL, CV_DRIVER, NULL);
376 		mutex_enter(&ud_nino_lock);
377 		ud_cur_inodes++;
378 		mutex_exit(&ud_nino_lock);
379 
380 		rw_enter(&ip->i_contents, RW_WRITER);
381 	}
382 
383 	if (vp->v_count < 1) {
384 		cmn_err(CE_WARN, "ud_iget: v_count < 1\n");
385 		mutex_exit(&ud_icache_lock);
386 		rw_exit(&ip->i_contents);
387 		if (pbp != NULL) {
388 			brelse(pbp);
389 		}
390 		return (EINVAL);
391 	}
392 	if (vn_has_cached_data(vp)) {
393 		cmn_err(CE_WARN, "ud_iget: v_pages not NULL\n");
394 		mutex_exit(&ud_icache_lock);
395 		rw_exit(&ip->i_contents);
396 		if (pbp != NULL) {
397 			brelse(pbp);
398 		}
399 		return (EINVAL);
400 	}
401 
402 	/*
403 	 * Move the inode on the chain for its new (ino, dev) pair
404 	 */
405 	remque(ip);
406 	ip->i_forw = ip;
407 	ip->i_back = ip;
408 	insque(ip, ih);
409 
410 	ip->i_dev = vfsp->vfs_dev;
411 	ip->i_udf = udf_vfsp;
412 	ip->i_diroff = 0;
413 	ip->i_devvp = ip->i_udf->udf_devvp;
414 	ip->i_icb_prn = prn;
415 	ip->i_icb_block = ploc;
416 	ip->i_icb_lbano = loc;
417 	ip->i_nextr = 0;
418 	ip->i_seq = 0;
419 	mutex_exit(&ud_icache_lock);
420 
421 read_de:
422 	if (pbp != NULL) {
423 		/*
424 		 * assumption is that we will not
425 		 * create a 4096 file
426 		 */
427 		bp = pbp;
428 	} else {
429 		bp = ud_bread(ip->i_dev,
430 			ip->i_icb_lbano << udf_vfsp->udf_l2d_shift,
431 			udf_vfsp->udf_lbsize);
432 	}
433 
434 	/*
435 	 * Check I/O errors
436 	 */
437 	fe = (struct file_entry *)bp->b_un.b_addr;
438 	if ((bp->b_flags & B_ERROR) ||
439 	    (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY,
440 	    ip->i_icb_block, 1, udf_vfsp->udf_lbsize) != 0)) {
441 
442 		if (((bp->b_flags & B_ERROR) == 0) &&
443 			(ftype == STRAT_TYPE4096)) {
444 			if (ud_check_te_unrec(udf_vfsp,
445 				bp->b_un.b_addr, ip->i_icb_block) == 0) {
446 
447 				brelse(bp);
448 
449 				/*
450 				 * restore old file entry location
451 				 */
452 				ip->i_icb_prn = old_prn;
453 				ip->i_icb_block = old_block;
454 				ip->i_icb_lbano = old_lbano;
455 
456 				/*
457 				 * reread old file entry
458 				 */
459 				bp = ud_bread(ip->i_dev,
460 					old_lbano << udf_vfsp->udf_l2d_shift,
461 					udf_vfsp->udf_lbsize);
462 				if ((bp->b_flags & B_ERROR) == 0) {
463 					fe = (struct file_entry *)
464 						bp->b_un.b_addr;
465 					if (ud_verify_tag_and_desc(&fe->fe_tag,
466 					    UD_FILE_ENTRY, ip->i_icb_block,
467 					    1,
468 					    udf_vfsp->udf_lbsize) == 0) {
469 						goto end_4096;
470 					}
471 				}
472 			}
473 		}
474 error_ret:
475 		brelse(bp);
476 		/*
477 		 * The inode may not contain anything useful. Mark it as
478 		 * having an error and let anyone else who was waiting for
479 		 * this know there was an error. Callers waiting for
480 		 * access to this inode in ud_iget will find
481 		 * the i_icb_lbano == 0, so there won't be a match.
482 		 * It remains in the cache. Put it back on the freelist.
483 		 */
484 		mutex_enter(&vp->v_lock);
485 		vp->v_count--;
486 		mutex_exit(&vp->v_lock);
487 		ip->i_icb_lbano = 0;
488 
489 		/*
490 		 * The folowing two lines make
491 		 * it impossible for any one do
492 		 * a VN_HOLD and then a VN_RELE
493 		 * so avoiding a ud_iinactive
494 		 */
495 		ip->i_icb_prn = 0xffff;
496 		ip->i_icb_block = 0;
497 
498 		/*
499 		 * remove the bad inode from hash chains
500 		 * so that during unmount we will not
501 		 * go through this inode
502 		 */
503 		mutex_enter(&ud_icache_lock);
504 		remque(ip);
505 		ip->i_forw = ip;
506 		ip->i_back = ip;
507 		mutex_exit(&ud_icache_lock);
508 
509 		/* Put the inode at the front of the freelist */
510 		mutex_enter(&ip->i_tlock);
511 		mutex_enter(&udf_ifree_lock);
512 		ud_add_to_free_list(ip, UD_BEGIN);
513 		mutex_exit(&udf_ifree_lock);
514 		ip->i_flag = 0;
515 		mutex_exit(&ip->i_tlock);
516 		rw_exit(&ip->i_contents);
517 		return (EIO);
518 	}
519 
520 	if (fe->fe_icb_tag.itag_strategy == SWAP_16(STRAT_TYPE4096)) {
521 		struct buf *ibp = NULL;
522 		struct indirect_entry *ie;
523 
524 		/*
525 		 * save old file_entry location
526 		 */
527 		old_prn = ip->i_icb_prn;
528 		old_block = ip->i_icb_block;
529 		old_lbano = ip->i_icb_lbano;
530 
531 		ftype = STRAT_TYPE4096;
532 
533 		/*
534 		 * If astrat is 4096 different versions
535 		 * of the file exist on the media.
536 		 * we are supposed to get to the latest
537 		 * version of the file
538 		 */
539 
540 		/*
541 		 * IE is supposed to be in the next block
542 		 * of DE
543 		 */
544 		ibp = ud_bread(ip->i_dev, (ip->i_icb_lbano + 1) <<
545 				udf_vfsp->udf_l2d_shift,
546 				udf_vfsp->udf_lbsize);
547 		if (ibp->b_flags & B_ERROR) {
548 			/*
549 			 * Get rid of current ibp and
550 			 * then goto error on DE's bp
551 			 */
552 ie_error:
553 			brelse(ibp);
554 			goto error_ret;
555 		}
556 
557 		ie = (struct indirect_entry *)ibp->b_un.b_addr;
558 		if (ud_verify_tag_and_desc(&ie->ie_tag,
559 		    UD_INDIRECT_ENT,
560 		    ip->i_icb_block + 1,
561 		    1, udf_vfsp->udf_lbsize) == 0) {
562 			struct long_ad *lad;
563 
564 
565 			lad = &ie->ie_indirecticb;
566 			ip->i_icb_prn = SWAP_16(lad->lad_ext_prn);
567 			ip->i_icb_block = SWAP_32(lad->lad_ext_loc);
568 			ip->i_icb_lbano = ud_xlate_to_daddr(udf_vfsp,
569 				ip->i_icb_prn, ip->i_icb_block,
570 				1, &dummy);
571 			brelse(ibp);
572 			brelse(bp);
573 			goto read_de;
574 		}
575 
576 		/*
577 		 * If this block is TE or unrecorded we
578 		 * are at the last entry
579 		 */
580 		if (ud_check_te_unrec(udf_vfsp, ibp->b_un.b_addr,
581 				ip->i_icb_block + 1) != 0) {
582 			/*
583 			 * This is not an unrecorded block
584 			 * Check if it a valid IE and
585 			 * get the address of DE that
586 			 * this IE points to
587 			 */
588 			goto ie_error;
589 		}
590 		/*
591 		 * If ud_check_unrec returns "0"
592 		 * this is the last in the chain
593 		 * Latest file_entry
594 		 */
595 		brelse(ibp);
596 	}
597 
598 end_4096:
599 
600 	ip->i_uid = SWAP_32(fe->fe_uid);
601 	if (ip->i_uid == -1) {
602 		ip->i_uid = ud_default_uid;
603 	}
604 	ip->i_gid = SWAP_32(fe->fe_gid);
605 	if (ip->i_gid == -1) {
606 		ip->i_gid = ud_default_gid;
607 	}
608 	ip->i_perm = SWAP_32(fe->fe_perms) & 0xFFFF;
609 	if (fe->fe_icb_tag.itag_strategy ==
610 			SWAP_16(STRAT_TYPE4096)) {
611 		ip->i_perm &= ~(IWRITE | (IWRITE >> 5) | (IWRITE >> 10));
612 	}
613 
614 	ip->i_nlink = SWAP_16(fe->fe_lcount);
615 	ip->i_size = SWAP_64(fe->fe_info_len);
616 	ip->i_lbr = SWAP_64(fe->fe_lbr);
617 
618 	ud_dtime2utime(&ip->i_atime, &fe->fe_acc_time);
619 	ud_dtime2utime(&ip->i_mtime, &fe->fe_mod_time);
620 	ud_dtime2utime(&ip->i_ctime, &fe->fe_attr_time);
621 
622 
623 	ip->i_uniqid = SWAP_64(fe->fe_uniq_id);
624 	icb_tag_flags = SWAP_16(fe->fe_icb_tag.itag_flags);
625 
626 
627 	if ((fe->fe_icb_tag.itag_ftype == FTYPE_CHAR_DEV) ||
628 		(fe->fe_icb_tag.itag_ftype == FTYPE_BLOCK_DEV)) {
629 
630 		eah = (struct ext_attr_hdr *)fe->fe_spec;
631 		ea_off = GET_32(&eah->eah_ial);
632 		ea_len = GET_32(&fe->fe_len_ear);
633 		if (ea_len && (ud_verify_tag_and_desc(&eah->eah_tag,
634 		    UD_EXT_ATTR_HDR,
635 		    ip->i_icb_block,
636 		    1,
637 		    sizeof (struct file_entry) -
638 		    offsetof(struct file_entry, fe_spec)) == 0)) {
639 
640 			while (ea_off < ea_len) {
641 				/*
642 				 * We now check the validity of ea_off.
643 				 * (ea_len - ea_off) should be large enough to
644 				 * hold the attribute header atleast.
645 				 */
646 				if ((ea_len - ea_off) <
647 				    sizeof (struct attr_hdr)) {
648 					cmn_err(CE_NOTE,
649 "ea_len(0x%x) - ea_off(0x%x) is too small to hold attr. info. blockno 0x%x\n",
650 					    ea_len, ea_off, ip->i_icb_block);
651 					goto error_ret;
652 				}
653 				ah = (struct attr_hdr *)&fe->fe_spec[ea_off];
654 
655 				/*
656 				 * Device Specification EA
657 				 */
658 				if ((GET_32(&ah->ahdr_atype) == 12) &&
659 					(ah->ahdr_astype == 1)) {
660 					struct dev_spec_ear *ds;
661 
662 				    if ((ea_len - ea_off) <
663 					sizeof (struct dev_spec_ear)) {
664 					cmn_err(CE_NOTE,
665 "ea_len(0x%x) - ea_off(0x%x) is too small to hold dev_spec_ear. blockno 0x%x\n",
666 					    ea_len, ea_off, ip->i_icb_block);
667 					goto error_ret;
668 				    }
669 				    ds = (struct dev_spec_ear *)ah;
670 				    ip->i_major = GET_32(&ds->ds_major_id);
671 				    ip->i_minor = GET_32(&ds->ds_minor_id);
672 				}
673 
674 				/*
675 				 * Impl Use EA
676 				 */
677 				if ((GET_32(&ah->ahdr_atype) == 2048) &&
678 					(ah->ahdr_astype == 1)) {
679 					struct iu_ea *iuea;
680 					struct copy_mgt_info *cmi;
681 
682 					if ((ea_len - ea_off) <
683 					    sizeof (struct iu_ea)) {
684 						cmn_err(CE_NOTE,
685 "ea_len(0x%x) - ea_off(0x%x) is too small to hold iu_ea. blockno 0x%x\n",
686 						    ea_len, ea_off,
687 						    ip->i_icb_block);
688 						goto error_ret;
689 					}
690 					iuea = (struct iu_ea *)ah;
691 					if (strncmp(iuea->iuea_ii.reg_id,
692 					    UDF_FREEEASPACE,
693 					    sizeof (iuea->iuea_ii.reg_id))
694 					    == 0) {
695 						/* skip it */
696 						iuea = iuea;
697 					} else if (strncmp(iuea->iuea_ii.reg_id,
698 					    UDF_CGMS_INFO,
699 					    sizeof (iuea->iuea_ii.reg_id))
700 					    == 0) {
701 						cmi = (struct copy_mgt_info *)
702 							iuea->iuea_iu;
703 						cmi = cmi;
704 					}
705 				}
706 				/* ??? PARANOIA */
707 				if (GET_32(&ah->ahdr_length) == 0) {
708 					break;
709 				}
710 				ea_off += GET_32(&ah->ahdr_length);
711 			}
712 		}
713 	}
714 
715 	ip->i_nextr = 0;
716 
717 	ip->i_maxent = SWAP_16(fe->fe_icb_tag.itag_max_ent);
718 	ip->i_astrat = SWAP_16(fe->fe_icb_tag.itag_strategy);
719 
720 	ip->i_desc_type = icb_tag_flags & 0x7;
721 
722 	/* Strictly Paranoia */
723 	ip->i_ext = NULL;
724 	ip->i_ext_count = ip->i_ext_used = 0;
725 	ip->i_con = 0;
726 	ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
727 
728 	ip->i_data_off = 0xB0 + SWAP_32(fe->fe_len_ear);
729 	ip->i_max_emb =  udf_vfsp->udf_lbsize - ip->i_data_off;
730 	if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
731 		/* Short allocation desc */
732 		struct short_ad *sad;
733 
734 		ip->i_ext_used = 0;
735 		ip->i_ext_count = ndesc =
736 			SWAP_32(fe->fe_len_adesc) / sizeof (struct short_ad);
737 		ip->i_ext_count = ((ip->i_ext_count / EXT_PER_MALLOC) + 1) *
738 					EXT_PER_MALLOC;
739 		ip->i_ext = (struct icb_ext  *)kmem_zalloc(ip->i_ext_count *
740 					sizeof (struct icb_ext), KM_SLEEP);
741 		ip->i_cur_max_ext = ip->i_max_emb / sizeof (struct short_ad);
742 		ip->i_cur_max_ext --;
743 
744 
745 		if ((ip->i_astrat != STRAT_TYPE4) &&
746 			(ip->i_astrat != STRAT_TYPE4096)) {
747 			goto error_ret;
748 		}
749 
750 		sad = (struct short_ad *)
751 				(fe->fe_spec + SWAP_32(fe->fe_len_ear));
752 		iext = ip->i_ext;
753 		while (ndesc --) {
754 			length = SWAP_32(sad->sad_ext_len);
755 			if ((length & 0x3FFFFFFF) == 0) {
756 				break;
757 			}
758 			if (((length >> 30) & IB_MASK) == IB_CON) {
759 				if (ip->i_con == NULL) {
760 					ip->i_con_count = EXT_PER_MALLOC;
761 					ip->i_con_used = 0;
762 					ip->i_con_read = 0;
763 					ip->i_con = kmem_zalloc(
764 						ip->i_con_count *
765 						sizeof (struct icb_ext),
766 						KM_SLEEP);
767 				}
768 				con = &ip->i_con[ip->i_con_used];
769 				con->ib_prn = 0;
770 				con->ib_block = SWAP_32(sad->sad_ext_loc);
771 				con->ib_count = length & 0x3FFFFFFF;
772 				con->ib_flags = (length >> 30) & IB_MASK;
773 				ip->i_con_used++;
774 				sad ++;
775 				break;
776 			}
777 			iext->ib_prn = 0;
778 			iext->ib_block = SWAP_32(sad->sad_ext_loc);
779 			length = SWAP_32(sad->sad_ext_len);
780 			iext->ib_count = length & 0x3FFFFFFF;
781 			iext->ib_offset = offset;
782 			iext->ib_marker1 = (uint32_t)0xAAAAAAAA;
783 			iext->ib_marker2 = (uint32_t)0xBBBBBBBB;
784 			offset += (iext->ib_count + udf_vfsp->udf_lbmask) &
785 					(~udf_vfsp->udf_lbmask);
786 
787 			iext->ib_flags = (length >> 30) & IB_MASK;
788 
789 			ip->i_ext_used++;
790 			iext++;
791 			sad ++;
792 		}
793 	} else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
794 		/* Long allocation desc */
795 		struct long_ad *lad;
796 
797 		ip->i_ext_used = 0;
798 		ip->i_ext_count = ndesc =
799 			SWAP_32(fe->fe_len_adesc) / sizeof (struct long_ad);
800 		ip->i_ext_count = ((ip->i_ext_count / EXT_PER_MALLOC) + 1) *
801 					EXT_PER_MALLOC;
802 		ip->i_ext = (struct icb_ext  *)kmem_zalloc(ip->i_ext_count *
803 					sizeof (struct icb_ext), KM_SLEEP);
804 
805 		ip->i_cur_max_ext = ip->i_max_emb / sizeof (struct long_ad);
806 		ip->i_cur_max_ext --;
807 
808 		if ((ip->i_astrat != STRAT_TYPE4) &&
809 			(ip->i_astrat != STRAT_TYPE4096)) {
810 			goto error_ret;
811 		}
812 
813 		lad = (struct long_ad *)
814 				(fe->fe_spec + SWAP_32(fe->fe_len_ear));
815 		iext = ip->i_ext;
816 		while (ndesc --) {
817 			length = SWAP_32(lad->lad_ext_len);
818 			if ((length & 0x3FFFFFFF) == 0) {
819 				break;
820 			}
821 			if (((length >> 30) & IB_MASK) == IB_CON) {
822 				if (ip->i_con == NULL) {
823 					ip->i_con_count = EXT_PER_MALLOC;
824 					ip->i_con_used = 0;
825 					ip->i_con_read = 0;
826 					ip->i_con = kmem_zalloc(
827 						ip->i_con_count *
828 						sizeof (struct icb_ext),
829 						KM_SLEEP);
830 				}
831 				con = &ip->i_con[ip->i_con_used];
832 				con->ib_prn = SWAP_16(lad->lad_ext_prn);
833 				con->ib_block = SWAP_32(lad->lad_ext_loc);
834 				con->ib_count = length & 0x3FFFFFFF;
835 				con->ib_flags = (length >> 30) & IB_MASK;
836 				ip->i_con_used++;
837 				lad ++;
838 				break;
839 			}
840 			iext->ib_prn = SWAP_16(lad->lad_ext_prn);
841 			iext->ib_block = SWAP_32(lad->lad_ext_loc);
842 			iext->ib_count = length & 0x3FFFFFFF;
843 			iext->ib_offset = offset;
844 			iext->ib_marker1 = (uint32_t)0xAAAAAAAA;
845 			iext->ib_marker2 = (uint32_t)0xBBBBBBBB;
846 			offset += (iext->ib_count + udf_vfsp->udf_lbmask) &
847 				(~udf_vfsp->udf_lbmask);
848 
849 			iext->ib_flags = (length >> 30) & IB_MASK;
850 
851 			ip->i_ext_used++;
852 			iext++;
853 			lad ++;
854 		}
855 	} else if (ip->i_desc_type == ICB_FLAG_ONE_AD) {
856 		ASSERT(SWAP_32(fe->fe_len_ear) < udf_vfsp->udf_lbsize);
857 
858 		if (SWAP_32(fe->fe_len_ear) > udf_vfsp->udf_lbsize) {
859 			goto error_ret;
860 		}
861 	} else {
862 		/* Not to be used in UDF 1.50 */
863 		cmn_err(CE_NOTE, "Invalid Allocation Descriptor type %x\n",
864 				ip->i_desc_type);
865 		goto error_ret;
866 	}
867 
868 
869 	if (icb_tag_flags & ICB_FLAG_SETUID) {
870 		ip->i_char = ISUID;
871 	} else {
872 		ip->i_char = 0;
873 	}
874 	if (icb_tag_flags & ICB_FLAG_SETGID) {
875 		ip->i_char |= ISGID;
876 	}
877 	if (icb_tag_flags & ICB_FLAG_STICKY) {
878 		ip->i_char |= ISVTX;
879 	}
880 	switch (fe->fe_icb_tag.itag_ftype) {
881 		case FTYPE_DIRECTORY :
882 			ip->i_type = VDIR;
883 			break;
884 		case FTYPE_FILE :
885 			ip->i_type = VREG;
886 			break;
887 		case FTYPE_BLOCK_DEV :
888 			ip->i_type = VBLK;
889 			break;
890 		case FTYPE_CHAR_DEV :
891 			ip->i_type = VCHR;
892 			break;
893 		case FTYPE_FIFO :
894 			ip->i_type = VFIFO;
895 			break;
896 		case FTYPE_C_ISSOCK :
897 			ip->i_type = VSOCK;
898 			break;
899 		case FTYPE_SYMLINK :
900 			ip->i_type = VLNK;
901 			break;
902 		default :
903 			ip->i_type = VNON;
904 			break;
905 	}
906 
907 	if (ip->i_type == VBLK || ip->i_type == VCHR) {
908 		ip->i_rdev = makedevice(ip->i_major, ip->i_minor);
909 	}
910 
911 	/*
912 	 * Fill in the rest.  Don't bother with the vnode lock because nobody
913 	 * should be looking at this vnode.  We have already invalidated the
914 	 * pages if it had any so pageout shouldn't be referencing this vnode
915 	 * and we are holding the write contents lock so a look up can't use
916 	 * the vnode.
917 	 */
918 	vp->v_vfsp = vfsp;
919 	vp->v_type = ip->i_type;
920 	vp->v_rdev = ip->i_rdev;
921 	if (ip->i_udf->udf_root_blkno == loc) {
922 		vp->v_flag = VROOT;
923 	} else {
924 		vp->v_flag = 0;
925 	}
926 
927 	brelse(bp);
928 	*ipp = ip;
929 	rw_exit(&ip->i_contents);
930 	vn_exists(vp);
931 	return (0);
932 }
933 
934 void
935 ud_iinactive(struct ud_inode *ip, struct cred *cr)
936 {
937 	int32_t busy = 0;
938 	struct vnode *vp;
939 	vtype_t type;
940 	caddr_t addr, addr1;
941 	size_t size, size1;
942 
943 
944 	ud_printf("ud_iinactive\n");
945 
946 	/*
947 	 * Get exclusive access to inode data.
948 	 */
949 	rw_enter(&ip->i_contents, RW_WRITER);
950 
951 	/*
952 	 * Make sure no one reclaimed the inode before we put
953 	 * it on the freelist or destroy it. We keep our 'hold'
954 	 * on the vnode from vn_rele until we are ready to
955 	 * do something with the inode (freelist/destroy).
956 	 *
957 	 * Pageout may put a VN_HOLD/VN_RELE at anytime during this
958 	 * operation via an async putpage, so we must make sure
959 	 * we don't free/destroy the inode more than once. ud_iget
960 	 * may also put a VN_HOLD on the inode before it grabs
961 	 * the i_contents lock. This is done so we don't kmem_free
962 	 * an inode that a thread is waiting on.
963 	 */
964 	vp = ITOV(ip);
965 
966 	mutex_enter(&vp->v_lock);
967 	if (vp->v_count < 1) {
968 		cmn_err(CE_WARN, "ud_iinactive: v_count < 1\n");
969 		return;
970 	}
971 	if ((vp->v_count > 1) ||
972 		((ip->i_flag & IREF) == 0)) {
973 		vp->v_count--;		/* release our hold from vn_rele */
974 		mutex_exit(&vp->v_lock);
975 		rw_exit(&ip->i_contents);
976 		return;
977 	}
978 	mutex_exit(&vp->v_lock);
979 
980 	/*
981 	 * For forced umount case: if i_udf is NULL, the contents of
982 	 * the inode and all the pages have already been pushed back
983 	 * to disk. It can be safely destroyed.
984 	 */
985 	if (ip->i_udf == NULL) {
986 		addr = (caddr_t)ip->i_ext;
987 		size = sizeof (struct icb_ext) * ip->i_ext_count;
988 		ip->i_ext = 0;
989 		ip->i_ext_count = ip->i_ext_used = 0;
990 		addr1 = (caddr_t)ip->i_con;
991 		size1 = sizeof (struct icb_ext) * ip->i_con_count;
992 		ip->i_con = 0;
993 		ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
994 		rw_exit(&ip->i_contents);
995 		vn_invalid(vp);
996 
997 		mutex_enter(&ud_nino_lock);
998 		ud_cur_inodes--;
999 		mutex_exit(&ud_nino_lock);
1000 
1001 		cv_destroy(&ip->i_wrcv);  /* throttling */
1002 		rw_destroy(&ip->i_rwlock);
1003 		rw_exit(&ip->i_contents);
1004 		rw_destroy(&ip->i_contents);
1005 		kmem_free(addr, size);
1006 		kmem_free(addr1, size1);
1007 		vn_free(vp);
1008 		kmem_free(ip, sizeof (struct ud_inode));
1009 		return;
1010 	}
1011 
1012 	if ((ip->i_udf->udf_flags & UDF_FL_RDONLY) == 0) {
1013 		if (ip->i_nlink <= 0) {
1014 			ip->i_marker3 = (uint32_t)0xDDDD0000;
1015 			ip->i_nlink = 1;	/* prevent free-ing twice */
1016 			(void) ud_itrunc(ip, 0, 0, cr);
1017 			type = ip->i_type;
1018 			ip->i_perm = 0;
1019 			ip->i_uid = 0;
1020 			ip->i_gid = 0;
1021 			ip->i_rdev = 0;	/* Zero in core version of rdev */
1022 			mutex_enter(&ip->i_tlock);
1023 			ip->i_flag |= IUPD|ICHG;
1024 			mutex_exit(&ip->i_tlock);
1025 			ud_ifree(ip, type);
1026 			ip->i_icb_prn = 0xFFFF;
1027 		} else if (!IS_SWAPVP(vp)) {
1028 			/*
1029 			 * Write the inode out if dirty. Pages are
1030 			 * written back and put on the freelist.
1031 			 */
1032 			(void) ud_syncip(ip, B_FREE | B_ASYNC, 0);
1033 			/*
1034 			 * Do nothing if inode is now busy -- inode may
1035 			 * have gone busy because ud_syncip
1036 			 * releases/reacquires the i_contents lock
1037 			 */
1038 			mutex_enter(&vp->v_lock);
1039 			if (vp->v_count > 1) {
1040 				vp->v_count--;
1041 				mutex_exit(&vp->v_lock);
1042 				rw_exit(&ip->i_contents);
1043 				return;
1044 			}
1045 			mutex_exit(&vp->v_lock);
1046 		} else {
1047 			ud_iupdat(ip, 0);
1048 		}
1049 	}
1050 
1051 
1052 	/*
1053 	 * Put the inode on the end of the free list.
1054 	 * Possibly in some cases it would be better to
1055 	 * put the inode at the head of the free list,
1056 	 * (e.g.: where i_perm == 0 || i_number == 0)
1057 	 * but I will think about that later.
1058 	 * (i_number is rarely 0 - only after an i/o error in ud_iget,
1059 	 * where i_perm == 0, the inode will probably be wanted
1060 	 * again soon for an ialloc, so possibly we should keep it)
1061 	 */
1062 	/*
1063 	 * If inode is invalid or there is no page associated with
1064 	 * this inode, put the inode in the front of the free list.
1065 	 * Since we have a VN_HOLD on the vnode, and checked that it
1066 	 * wasn't already on the freelist when we entered, we can safely
1067 	 * put it on the freelist even if another thread puts a VN_HOLD
1068 	 * on it (pageout/ud_iget).
1069 	 */
1070 tryagain:
1071 	mutex_enter(&ud_nino_lock);
1072 	if (vn_has_cached_data(vp)) {
1073 		mutex_exit(&ud_nino_lock);
1074 		mutex_enter(&vp->v_lock);
1075 		vp->v_count--;
1076 		mutex_exit(&vp->v_lock);
1077 		mutex_enter(&ip->i_tlock);
1078 		mutex_enter(&udf_ifree_lock);
1079 		ud_add_to_free_list(ip, UD_END);
1080 		mutex_exit(&udf_ifree_lock);
1081 		ip->i_flag &= IMODTIME;
1082 		mutex_exit(&ip->i_tlock);
1083 		rw_exit(&ip->i_contents);
1084 	} else if (busy || ud_cur_inodes < ud_max_inodes) {
1085 		mutex_exit(&ud_nino_lock);
1086 		/*
1087 		 * We're not over our high water mark, or it's
1088 		 * not safe to kmem_free the inode, so put it
1089 		 * on the freelist.
1090 		 */
1091 		mutex_enter(&vp->v_lock);
1092 		if (vn_has_cached_data(vp)) {
1093 			cmn_err(CE_WARN, "ud_iinactive: v_pages not NULL\n");
1094 		}
1095 		vp->v_count--;
1096 		mutex_exit(&vp->v_lock);
1097 
1098 	mutex_enter(&ip->i_tlock);
1099 		mutex_enter(&udf_ifree_lock);
1100 		ud_add_to_free_list(ip, UD_BEGIN);
1101 		mutex_exit(&udf_ifree_lock);
1102 	ip->i_flag &= IMODTIME;
1103 	mutex_exit(&ip->i_tlock);
1104 		rw_exit(&ip->i_contents);
1105 	} else {
1106 		mutex_exit(&ud_nino_lock);
1107 		if (vn_has_cached_data(vp)) {
1108 			cmn_err(CE_WARN, "ud_iinactive: v_pages not NULL\n");
1109 		}
1110 		/*
1111 		 * Try to free the inode. We must make sure
1112 		 * it's o.k. to destroy this inode. We can't destroy
1113 		 * if a thread is waiting for this inode. If we can't get the
1114 		 * cache now, put it back on the freelist.
1115 		 */
1116 		if (!mutex_tryenter(&ud_icache_lock)) {
1117 			busy = 1;
1118 			goto tryagain;
1119 		}
1120 		mutex_enter(&vp->v_lock);
1121 		if (vp->v_count > 1) {
1122 			/* inode is wanted in ud_iget */
1123 			busy = 1;
1124 			mutex_exit(&vp->v_lock);
1125 			mutex_exit(&ud_icache_lock);
1126 			goto tryagain;
1127 		}
1128 		mutex_exit(&vp->v_lock);
1129 		remque(ip);
1130 		ip->i_forw = ip;
1131 		ip->i_back = ip;
1132 		mutex_enter(&ud_nino_lock);
1133 		ud_cur_inodes--;
1134 		mutex_exit(&ud_nino_lock);
1135 		mutex_exit(&ud_icache_lock);
1136 		if (ip->i_icb_prn != 0xFFFF) {
1137 			ud_iupdat(ip, 0);
1138 		}
1139 		addr = (caddr_t)ip->i_ext;
1140 		size = sizeof (struct icb_ext) * ip->i_ext_count;
1141 		ip->i_ext = 0;
1142 		ip->i_ext_count = ip->i_ext_used = 0;
1143 		addr1 = (caddr_t)ip->i_con;
1144 		size1 = sizeof (struct icb_ext) * ip->i_con_count;
1145 		ip->i_con = 0;
1146 		ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
1147 		cv_destroy(&ip->i_wrcv);  /* throttling */
1148 		rw_destroy(&ip->i_rwlock);
1149 		rw_exit(&ip->i_contents);
1150 		rw_destroy(&ip->i_contents);
1151 		kmem_free(addr, size);
1152 		kmem_free(addr1, size1);
1153 		ip->i_marker3 = (uint32_t)0xDDDDDDDD;
1154 		vn_free(vp);
1155 		kmem_free(ip, sizeof (struct ud_inode));
1156 	}
1157 }
1158 
1159 
1160 void
1161 ud_iupdat(struct ud_inode *ip, int32_t waitfor)
1162 {
1163 	uint16_t flag, tag_flags;
1164 	int32_t error, crc_len = 0;
1165 	struct buf *bp;
1166 	struct udf_vfs *udf_vfsp;
1167 	struct file_entry *fe;
1168 
1169 	ASSERT(RW_WRITE_HELD(&ip->i_contents));
1170 
1171 	ud_printf("ud_iupdat\n");
1172 	/*
1173 	 * Return if file system has been forcibly umounted.
1174 	 */
1175 	if (ip->i_udf == NULL) {
1176 		return;
1177 	}
1178 
1179 	udf_vfsp = ip->i_udf;
1180 	flag = ip->i_flag;	/* Atomic read */
1181 	if ((flag & (IUPD|IACC|ICHG|IMOD|IMODACC)) != 0) {
1182 		if (udf_vfsp->udf_flags & UDF_FL_RDONLY) {
1183 			ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD|IMODACC|IATTCHG);
1184 			return;
1185 		}
1186 
1187 		bp = ud_bread(ip->i_dev,
1188 			ip->i_icb_lbano << udf_vfsp->udf_l2d_shift,
1189 			ip->i_udf->udf_lbsize);
1190 		if (bp->b_flags & B_ERROR) {
1191 			brelse(bp);
1192 			return;
1193 		}
1194 		fe = (struct file_entry *)bp->b_un.b_addr;
1195 		if (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY,
1196 		    ip->i_icb_block,
1197 		    1, ip->i_udf->udf_lbsize) != 0) {
1198 			brelse(bp);
1199 			return;
1200 		}
1201 
1202 		mutex_enter(&ip->i_tlock);
1203 		if (ip->i_flag & (IUPD|IACC|ICHG)) {
1204 			IMARK(ip);
1205 		}
1206 		ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD|IMODACC);
1207 		mutex_exit(&ip->i_tlock);
1208 
1209 		fe->fe_uid = SWAP_32(ip->i_uid);
1210 		fe->fe_gid = SWAP_32(ip->i_gid);
1211 
1212 		fe->fe_perms = SWAP_32(ip->i_perm);
1213 
1214 		fe->fe_lcount = SWAP_16(ip->i_nlink);
1215 		fe->fe_info_len = SWAP_64(ip->i_size);
1216 		fe->fe_lbr = SWAP_64(ip->i_lbr);
1217 
1218 		ud_utime2dtime(&ip->i_atime, &fe->fe_acc_time);
1219 		ud_utime2dtime(&ip->i_mtime, &fe->fe_mod_time);
1220 		ud_utime2dtime(&ip->i_ctime, &fe->fe_attr_time);
1221 
1222 		if (ip->i_char & ISUID) {
1223 			tag_flags = ICB_FLAG_SETUID;
1224 		} else {
1225 			tag_flags = 0;
1226 		}
1227 		if (ip->i_char & ISGID) {
1228 			tag_flags |= ICB_FLAG_SETGID;
1229 		}
1230 		if (ip->i_char & ISVTX) {
1231 			tag_flags |= ICB_FLAG_STICKY;
1232 		}
1233 		tag_flags |= ip->i_desc_type;
1234 
1235 		/*
1236 		 * Remove the following it is no longer contig
1237 		 * if (ip->i_astrat  == STRAT_TYPE4) {
1238 		 *	tag_flags |= ICB_FLAG_CONTIG;
1239 		 * }
1240 		 */
1241 
1242 		fe->fe_icb_tag.itag_flags &= ~SWAP_16((uint16_t)0x3C3);
1243 		fe->fe_icb_tag.itag_strategy = SWAP_16(ip->i_astrat);
1244 		fe->fe_icb_tag.itag_flags |= SWAP_16(tag_flags);
1245 
1246 		ud_update_regid(&fe->fe_impl_id);
1247 
1248 		crc_len = ((uint32_t)&((struct file_entry *)0)->fe_spec) +
1249 				SWAP_32(fe->fe_len_ear);
1250 		if (ip->i_desc_type == ICB_FLAG_ONE_AD) {
1251 			crc_len += ip->i_size;
1252 			fe->fe_len_adesc = SWAP_32(((uint32_t)ip->i_size));
1253 		} else if ((ip->i_size != 0) &&
1254 				(ip->i_ext != NULL) &&
1255 				(ip->i_ext_used != 0)) {
1256 
1257 			if ((error = ud_read_icb_till_off(ip,
1258 					ip->i_size)) == 0) {
1259 				if (ip->i_astrat == STRAT_TYPE4) {
1260 					error = ud_updat_ext4(ip, fe);
1261 				} else if (ip->i_astrat == STRAT_TYPE4096) {
1262 					error = ud_updat_ext4096(ip, fe);
1263 				}
1264 				if (error) {
1265 					udf_vfsp->udf_mark_bad = 1;
1266 				}
1267 			}
1268 			crc_len += SWAP_32(fe->fe_len_adesc);
1269 		} else {
1270 			fe->fe_len_adesc = 0;
1271 		}
1272 
1273 		/*
1274 		 * Zero out the rest of the block
1275 		 */
1276 		bzero(bp->b_un.b_addr + crc_len,
1277 			ip->i_udf->udf_lbsize - crc_len);
1278 
1279 		ud_make_tag(ip->i_udf, &fe->fe_tag,
1280 			UD_FILE_ENTRY, ip->i_icb_block, crc_len);
1281 
1282 
1283 		if (waitfor) {
1284 			BWRITE(bp);
1285 
1286 			/*
1287 			 * Synchronous write has guaranteed that inode
1288 			 * has been written on disk so clear the flag
1289 			 */
1290 			ip->i_flag &= ~(IBDWRITE);
1291 		} else {
1292 			bdwrite(bp);
1293 
1294 			/*
1295 			 * This write hasn't guaranteed that inode has been
1296 			 * written on the disk.
1297 			 * Since, all updat flags on indoe are cleared, we must
1298 			 * remember the condition in case inode is to be updated
1299 			 * synchronously later (e.g.- fsync()/fdatasync())
1300 			 * and inode has not been modified yet.
1301 			 */
1302 			ip->i_flag |= (IBDWRITE);
1303 		}
1304 	} else {
1305 		/*
1306 		 * In case previous inode update was done asynchronously
1307 		 * (IBDWRITE) and this inode update request wants guaranteed
1308 		 * (synchronous) disk update, flush the inode.
1309 		 */
1310 		if (waitfor && (flag & IBDWRITE)) {
1311 			blkflush(ip->i_dev, (daddr_t)
1312 				fsbtodb(udf_vfsp, ip->i_icb_lbano));
1313 			ip->i_flag &= ~(IBDWRITE);
1314 		}
1315 	}
1316 }
1317 
1318 int32_t
1319 ud_updat_ext4(struct ud_inode *ip, struct file_entry *fe)
1320 {
1321 	uint32_t dummy;
1322 	int32_t elen, ndent, index, count, con_index;
1323 	daddr_t bno;
1324 	struct buf *bp;
1325 	struct short_ad *sad;
1326 	struct long_ad *lad;
1327 	struct icb_ext *iext, *icon;
1328 
1329 
1330 	ASSERT(ip);
1331 	ASSERT(fe);
1332 	ASSERT((ip->i_desc_type == ICB_FLAG_SHORT_AD) ||
1333 			(ip->i_desc_type == ICB_FLAG_LONG_AD));
1334 
1335 	if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1336 		elen = sizeof (struct short_ad);
1337 		sad = (struct short_ad *)
1338 			(fe->fe_spec + SWAP_32(fe->fe_len_ear));
1339 	} else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
1340 		elen = sizeof (struct long_ad);
1341 		lad = (struct long_ad *)
1342 			(fe->fe_spec + SWAP_32(fe->fe_len_ear));
1343 	} else {
1344 		/* This cannot happen return */
1345 		return (EINVAL);
1346 	}
1347 
1348 	ndent = ip->i_max_emb / elen;
1349 
1350 	if (ip->i_ext_used < ndent) {
1351 
1352 		if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1353 			ud_make_sad(ip->i_ext, sad, ip->i_ext_used);
1354 		} else {
1355 			ud_make_lad(ip->i_ext, lad, ip->i_ext_used);
1356 		}
1357 		fe->fe_len_adesc = SWAP_32(ip->i_ext_used * elen);
1358 		con_index = 0;
1359 	} else {
1360 
1361 		con_index = index = 0;
1362 
1363 		while (index < ip->i_ext_used) {
1364 			if (index == 0) {
1365 				/*
1366 				 * bp is already read
1367 				 * First few extents will go
1368 				 * into the file_entry
1369 				 */
1370 				count = ndent - 1;
1371 				fe->fe_len_adesc =
1372 					SWAP_32(ndent * elen);
1373 				bp = NULL;
1374 
1375 				/*
1376 				 * Last entry to be cont ext
1377 				 */
1378 				icon = &ip->i_con[con_index];
1379 			} else {
1380 				/*
1381 				 * Read the buffer
1382 				 */
1383 				icon = &ip->i_con[con_index];
1384 
1385 				bno = ud_xlate_to_daddr(ip->i_udf,
1386 					icon->ib_prn, icon->ib_block,
1387 					icon->ib_count >>
1388 					ip->i_udf->udf_l2d_shift, &dummy);
1389 				bp = ud_bread(ip->i_dev, bno  <<
1390 						ip->i_udf->udf_l2d_shift,
1391 						ip->i_udf->udf_lbsize);
1392 				if (bp->b_flags & B_ERROR) {
1393 					brelse(bp);
1394 					return (EIO);
1395 				}
1396 
1397 				/*
1398 				 * Figure out how many extents in
1399 				 * this time
1400 				 */
1401 				count = (bp->b_bcount -
1402 					sizeof (struct alloc_ext_desc)) / elen;
1403 				if (count > (ip->i_ext_used - index)) {
1404 					count = ip->i_ext_used - index;
1405 				} else {
1406 					count --;
1407 				}
1408 				con_index++;
1409 				if (con_index >= ip->i_con_used) {
1410 					icon = NULL;
1411 				} else {
1412 					icon = &ip->i_con[con_index];
1413 				}
1414 			}
1415 
1416 
1417 
1418 			/*
1419 			 * convert to on disk form and
1420 			 * update
1421 			 */
1422 			iext = &ip->i_ext[index];
1423 			if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1424 				if (index != 0) {
1425 					sad = (struct short_ad *)
1426 						(bp->b_un.b_addr +
1427 						sizeof (struct alloc_ext_desc));
1428 				}
1429 				ud_make_sad(iext, sad, count);
1430 				sad += count;
1431 				if (icon != NULL) {
1432 					ud_make_sad(icon, sad, 1);
1433 				}
1434 			} else {
1435 				if (index != 0) {
1436 					lad = (struct long_ad *)
1437 						(bp->b_un.b_addr +
1438 						sizeof (struct alloc_ext_desc));
1439 				}
1440 				ud_make_lad(iext, lad, count);
1441 				lad += count;
1442 				if (icon != NULL) {
1443 					ud_make_lad(icon, lad, 1);
1444 				}
1445 			}
1446 
1447 			if (con_index != 0) {
1448 				struct alloc_ext_desc *aed;
1449 				int32_t sz;
1450 				struct icb_ext *oicon;
1451 
1452 				oicon = &ip->i_con[con_index - 1];
1453 				sz = count * elen;
1454 				if (icon != NULL) {
1455 					sz += elen;
1456 				}
1457 				aed = (struct alloc_ext_desc *)bp->b_un.b_addr;
1458 				aed->aed_len_aed = SWAP_32(sz);
1459 				if (con_index == 1) {
1460 					aed->aed_rev_ael =
1461 						SWAP_32(ip->i_icb_block);
1462 				} else {
1463 					aed->aed_rev_ael =
1464 						SWAP_32(oicon->ib_block);
1465 				}
1466 				sz += sizeof (struct alloc_ext_desc);
1467 				ud_make_tag(ip->i_udf, &aed->aed_tag,
1468 					UD_ALLOC_EXT_DESC, oicon->ib_block, sz);
1469 			}
1470 
1471 			/*
1472 			 * Write back to disk
1473 			 */
1474 			if (bp != NULL) {
1475 				BWRITE(bp);
1476 			}
1477 			index += count;
1478 		}
1479 
1480 	}
1481 
1482 	if (con_index != ip->i_con_used) {
1483 		int32_t lbmask, l2b, temp;
1484 
1485 		temp = con_index;
1486 		lbmask = ip->i_udf->udf_lbmask;
1487 		l2b = ip->i_udf->udf_l2b_shift;
1488 		/*
1489 		 * Free unused continuation extents
1490 		 */
1491 		for (; con_index < ip->i_con_used; con_index++) {
1492 			icon = &ip->i_con[con_index];
1493 			count = (icon->ib_count + lbmask) >> l2b;
1494 			ud_free_space(ip->i_udf->udf_vfs, icon->ib_prn,
1495 					icon->ib_block, count);
1496 			count = (count << l2b) - sizeof (struct alloc_ext_desc);
1497 			ip->i_cur_max_ext -= (count / elen) - 1;
1498 		}
1499 		ip->i_con_used = temp;
1500 	}
1501 	return (0);
1502 }
1503 
1504 /* ARGSUSED */
1505 int32_t
1506 ud_updat_ext4096(struct ud_inode *ip, struct file_entry *fe)
1507 {
1508 	return (ENXIO);
1509 }
1510 
1511 void
1512 ud_make_sad(struct icb_ext *iext, struct short_ad *sad, int32_t count)
1513 {
1514 	int32_t index = 0, scount;
1515 
1516 	ASSERT(iext);
1517 	ASSERT(sad);
1518 
1519 	if (count != 0) {
1520 		ASSERT(count > 0);
1521 		while (index < count) {
1522 			scount = (iext->ib_count & 0x3FFFFFFF) |
1523 					(iext->ib_flags << 30);
1524 			sad->sad_ext_len = SWAP_32(scount);
1525 			sad->sad_ext_loc = SWAP_32(iext->ib_block);
1526 			sad++;
1527 			iext++;
1528 			index++;
1529 		}
1530 	}
1531 }
1532 
1533 void
1534 ud_make_lad(struct icb_ext *iext, struct long_ad *lad, int32_t count)
1535 {
1536 	int32_t index = 0, scount;
1537 
1538 	ASSERT(iext);
1539 	ASSERT(lad);
1540 
1541 	if (count != 0) {
1542 		ASSERT(count > 0);
1543 
1544 		while (index < count) {
1545 			lad->lad_ext_prn = SWAP_16(iext->ib_prn);
1546 			scount = (iext->ib_count & 0x3FFFFFFF) |
1547 				(iext->ib_flags << 30);
1548 			lad->lad_ext_len = SWAP_32(scount);
1549 			lad->lad_ext_loc = SWAP_32(iext->ib_block);
1550 			lad++;
1551 			iext++;
1552 			index++;
1553 		}
1554 	}
1555 }
1556 
1557 /*
1558  * Truncate the inode ip to at most length size.
1559  * Free affected disk blocks -- the blocks of the
1560  * file are removed in reverse order.
1561  */
1562 /* ARGSUSED */
1563 int
1564 ud_itrunc(struct ud_inode *oip, u_offset_t length,
1565 	int32_t flags, struct cred *cr)
1566 {
1567 	int32_t error, boff;
1568 	off_t bsize;
1569 	mode_t mode;
1570 	struct udf_vfs *udf_vfsp;
1571 
1572 	ud_printf("ud_itrunc\n");
1573 
1574 	ASSERT(RW_WRITE_HELD(&oip->i_contents));
1575 	udf_vfsp = oip->i_udf;
1576 	bsize = udf_vfsp->udf_lbsize;
1577 
1578 	/*
1579 	 * We only allow truncation of regular files and directories
1580 	 * to arbritary lengths here.  In addition, we allow symbolic
1581 	 * links to be truncated only to zero length.  Other inode
1582 	 * types cannot have their length set here.
1583 	 */
1584 	mode = oip->i_type;
1585 	if (mode == VFIFO) {
1586 		return (0);
1587 	}
1588 	if ((mode != VREG) && (mode != VDIR) &&
1589 		(!(mode == VLNK && length == 0))) {
1590 		return (EINVAL);
1591 	}
1592 	if (length == oip->i_size) {
1593 		/* update ctime and mtime to please POSIX tests */
1594 		mutex_enter(&oip->i_tlock);
1595 		oip->i_flag |= ICHG |IUPD;
1596 		mutex_exit(&oip->i_tlock);
1597 		return (0);
1598 	}
1599 
1600 	boff = blkoff(udf_vfsp, length);
1601 
1602 	if (length > oip->i_size) {
1603 		/*
1604 		 * Trunc up case.ud_bmap_write will insure that the right blocks
1605 		 * are allocated.  This includes doing any work needed for
1606 		 * allocating the last block.
1607 		 */
1608 		if (boff == 0) {
1609 			error = ud_bmap_write(oip, length - 1,
1610 				(int)bsize, 0, cr);
1611 		} else {
1612 			error = ud_bmap_write(oip, length - 1, boff, 0, cr);
1613 		}
1614 		if (error == 0) {
1615 			u_offset_t osize = oip->i_size;
1616 			oip->i_size  = length;
1617 
1618 			/*
1619 			 * Make sure we zero out the remaining bytes of
1620 			 * the page in case a mmap scribbled on it. We
1621 			 * can't prevent a mmap from writing beyond EOF
1622 			 * on the last page of a file.
1623 			 */
1624 			if ((boff = blkoff(udf_vfsp, osize)) != 0) {
1625 				pvn_vpzero(ITOV(oip), osize,
1626 						(uint32_t)(bsize - boff));
1627 			}
1628 			mutex_enter(&oip->i_tlock);
1629 			oip->i_flag |= ICHG;
1630 			ITIMES_NOLOCK(oip);
1631 			mutex_exit(&oip->i_tlock);
1632 		}
1633 		return (error);
1634 	}
1635 
1636 	/*
1637 	 * Update the pages of the file.  If the file is not being
1638 	 * truncated to a block boundary, the contents of the
1639 	 * pages following the end of the file must be zero'ed
1640 	 * in case it ever become accessable again because
1641 	 * of subsequent file growth.
1642 	 */
1643 	if (boff == 0) {
1644 		(void) pvn_vplist_dirty(ITOV(oip), length,
1645 				ud_putapage, B_INVAL | B_TRUNC, CRED());
1646 	} else {
1647 		/*
1648 		 * Make sure that the last block is properly allocated.
1649 		 * We only really have to do this if the last block is
1650 		 * actually allocated.  Just to be sure, we do it now
1651 		 * independent of current allocation.
1652 		 */
1653 		error = ud_bmap_write(oip, length - 1, boff, 0, cr);
1654 		if (error) {
1655 			return (error);
1656 		}
1657 
1658 		pvn_vpzero(ITOV(oip), length, (uint32_t)(bsize - boff));
1659 
1660 		(void) pvn_vplist_dirty(ITOV(oip), length,
1661 				ud_putapage, B_INVAL | B_TRUNC, CRED());
1662 	}
1663 
1664 
1665 	/* Free the blocks */
1666 	if (oip->i_desc_type == ICB_FLAG_ONE_AD) {
1667 		if (length > oip->i_max_emb) {
1668 			return (EFBIG);
1669 		}
1670 		oip->i_size = length;
1671 		mutex_enter(&oip->i_tlock);
1672 		oip->i_flag |= ICHG|IUPD;
1673 		mutex_exit(&oip->i_tlock);
1674 		ud_iupdat(oip, 1);
1675 	} else {
1676 		if ((error = ud_read_icb_till_off(oip, oip->i_size)) != 0) {
1677 			return (error);
1678 		}
1679 
1680 		if (oip->i_astrat == STRAT_TYPE4) {
1681 			ud_trunc_ext4(oip, length);
1682 		} else if (oip->i_astrat == STRAT_TYPE4096) {
1683 			ud_trunc_ext4096(oip, length);
1684 		}
1685 	}
1686 
1687 done:
1688 	return (0);
1689 }
1690 
1691 void
1692 ud_trunc_ext4(struct ud_inode *ip, u_offset_t length)
1693 {
1694 	int32_t index, l2b, count, ecount;
1695 	int32_t elen, ndent, nient;
1696 	u_offset_t ext_beg, ext_end;
1697 	struct icb_ext *iext, *icon;
1698 	int32_t lbmask, ext_used;
1699 	uint32_t loc;
1700 	struct icb_ext text;
1701 	uint32_t con_freed;
1702 
1703 	ASSERT((ip->i_desc_type == ICB_FLAG_SHORT_AD) ||
1704 			(ip->i_desc_type == ICB_FLAG_LONG_AD));
1705 
1706 	if (ip->i_ext_used == 0) {
1707 		return;
1708 	}
1709 
1710 	ext_used = ip->i_ext_used;
1711 
1712 	lbmask = ip->i_udf->udf_lbmask;
1713 	l2b = ip->i_udf->udf_l2b_shift;
1714 
1715 	ASSERT(ip->i_ext);
1716 
1717 	ip->i_lbr = 0;
1718 	for (index = 0; index < ext_used; index++) {
1719 		iext = &ip->i_ext[index];
1720 
1721 		/*
1722 		 * Find the begining and end
1723 		 * of current extent
1724 		 */
1725 		ext_beg = iext->ib_offset;
1726 		ext_end = iext->ib_offset +
1727 			((iext->ib_count + lbmask) & ~lbmask);
1728 
1729 		/*
1730 		 * This is the extent that has offset "length"
1731 		 * make a copy of this extent and
1732 		 * remember the index. We can use
1733 		 * it to free blocks
1734 		 */
1735 		if ((length <= ext_end) &&
1736 			(length >= ext_beg)) {
1737 			text = *iext;
1738 
1739 			iext->ib_count = length - ext_beg;
1740 			ip->i_ext_used = index + 1;
1741 			break;
1742 		}
1743 		if (iext->ib_flags != IB_UN_RE_AL) {
1744 			ip->i_lbr += iext->ib_count >> l2b;
1745 		}
1746 	}
1747 	if (ip->i_ext_used != index) {
1748 		if (iext->ib_flags != IB_UN_RE_AL) {
1749 			ip->i_lbr +=
1750 			((iext->ib_count + lbmask) & ~lbmask) >> l2b;
1751 		}
1752 	}
1753 
1754 	ip->i_size = length;
1755 	mutex_enter(&ip->i_tlock);
1756 	ip->i_flag |= ICHG|IUPD;
1757 	mutex_exit(&ip->i_tlock);
1758 	ud_iupdat(ip, 1);
1759 
1760 	/*
1761 	 * Free the unused space
1762 	 */
1763 	if (text.ib_flags != IB_UN_RE_AL) {
1764 		count = (ext_end - length) >> l2b;
1765 		if (count) {
1766 			loc = text.ib_block +
1767 			(((length - text.ib_offset) + lbmask) >> l2b);
1768 			ud_free_space(ip->i_udf->udf_vfs, text.ib_prn,
1769 					loc, count);
1770 		}
1771 	}
1772 	for (index = ip->i_ext_used; index < ext_used; index++) {
1773 		iext = &ip->i_ext[index];
1774 		if (iext->ib_flags != IB_UN_RE_AL) {
1775 			count = (iext->ib_count + lbmask) >> l2b;
1776 			ud_free_space(ip->i_udf->udf_vfs, iext->ib_prn,
1777 					iext->ib_block, count);
1778 		}
1779 		bzero(iext, sizeof (struct icb_ext));
1780 		continue;
1781 	}
1782 
1783 	/*
1784 	 * release any continuation blocks
1785 	 */
1786 	if (ip->i_con) {
1787 
1788 		ASSERT(ip->i_con_count >= ip->i_con_used);
1789 
1790 		/*
1791 		 * Find out how many indirect blocks
1792 		 * are required and release the rest
1793 		 */
1794 		if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1795 			elen = sizeof (struct short_ad);
1796 		} else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
1797 			elen = sizeof (struct long_ad);
1798 		}
1799 		ndent = ip->i_max_emb / elen;
1800 		if (ip->i_ext_used > ndent) {
1801 			ecount = ip->i_ext_used - ndent;
1802 		} else {
1803 			ecount = 0;
1804 		}
1805 		con_freed = 0;
1806 		for (index = 0; index < ip->i_con_used; index++) {
1807 			icon = &ip->i_con[index];
1808 			nient = icon->ib_count -
1809 				(sizeof (struct alloc_ext_desc) + elen);
1810 				/* Header + 1 indirect extent */
1811 			nient /= elen;
1812 			if (ecount) {
1813 				if (ecount > nient) {
1814 					ecount -= nient;
1815 				} else {
1816 					ecount = 0;
1817 				}
1818 			} else {
1819 				count = ((icon->ib_count + lbmask) &
1820 						~lbmask) >> l2b;
1821 				ud_free_space(ip->i_udf->udf_vfs,
1822 					icon->ib_prn, icon->ib_block,
1823 					count);
1824 				con_freed++;
1825 				ip->i_cur_max_ext -= nient;
1826 			}
1827 		}
1828 		/*
1829 		 * set the continuation extents used(i_con_used)i to correct
1830 		 * value. It is possible for i_con_used to be zero,
1831 		 * if we free up all continuation extents. This happens
1832 		 * when ecount is 0 before entering the for loop above.
1833 		 */
1834 		ip->i_con_used -= con_freed;
1835 		if (ip->i_con_read > ip->i_con_used) {
1836 			ip->i_con_read = ip->i_con_used;
1837 		}
1838 	}
1839 }
1840 
1841 void
1842 ud_trunc_ext4096(struct ud_inode *ip, u_offset_t length)
1843 {
1844 	/*
1845 	 * Truncate code is the same for
1846 	 * both file of type 4 and 4096
1847 	 */
1848 	ud_trunc_ext4(ip, length);
1849 }
1850 
1851 /*
1852  * Remove any inodes in the inode cache belonging to dev
1853  *
1854  * There should not be any active ones, return error if any are found but
1855  * still invalidate others (N.B.: this is a user error, not a system error).
1856  *
1857  * Also, count the references to dev by block devices - this really
1858  * has nothing to do with the object of the procedure, but as we have
1859  * to scan the inode table here anyway, we might as well get the
1860  * extra benefit.
1861  */
1862 int32_t
1863 ud_iflush(struct vfs *vfsp)
1864 {
1865 	int32_t index, busy = 0;
1866 	union ihead *ih;
1867 	struct udf_vfs *udf_vfsp;
1868 	dev_t dev;
1869 	struct vnode *rvp, *vp;
1870 	struct ud_inode *ip, *next;
1871 
1872 	ud_printf("ud_iflush\n");
1873 	udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
1874 	rvp = udf_vfsp->udf_root;
1875 	dev = vfsp->vfs_dev;
1876 
1877 	mutex_enter(&ud_icache_lock);
1878 	for (index = 0; index < UD_HASH_SZ; index++) {
1879 		ih = &ud_ihead[index];
1880 
1881 		next = ih->ih_chain[0];
1882 		while (next != (struct ud_inode *)ih) {
1883 			ip = next;
1884 			next = ip->i_forw;
1885 			if (ip->i_dev != dev) {
1886 				continue;
1887 			}
1888 			vp = ITOV(ip);
1889 			/*
1890 			 * root inode is processed by the caller
1891 			 */
1892 			if (vp == rvp) {
1893 				if (vp->v_count > 1) {
1894 					busy = -1;
1895 				}
1896 				continue;
1897 			}
1898 			if (ip->i_flag & IREF) {
1899 				/*
1900 				 * Set error indicator for return value,
1901 				 * but continue invalidating other
1902 				 * inodes.
1903 				 */
1904 				busy = -1;
1905 				continue;
1906 			}
1907 
1908 			rw_enter(&ip->i_contents, RW_WRITER);
1909 			remque(ip);
1910 			ip->i_forw = ip;
1911 			ip->i_back = ip;
1912 			/*
1913 			 * Hold the vnode since its not done
1914 			 * in VOP_PUTPAGE anymore.
1915 			 */
1916 			VN_HOLD(vp);
1917 			/*
1918 			 * XXX Synchronous write holding
1919 			 * cache lock
1920 			 */
1921 			(void) ud_syncip(ip, B_INVAL, I_SYNC);
1922 			rw_exit(&ip->i_contents);
1923 			VN_RELE(vp);
1924 		}
1925 	}
1926 	mutex_exit(&ud_icache_lock);
1927 
1928 	return (busy);
1929 }
1930 
1931 
1932 /*
1933  * Check mode permission on inode.  Mode is READ, WRITE or EXEC.
1934  * In the case of WRITE, the read-only status of the file system
1935  * is checked.  The applicable mode bits are compared with the
1936  * requested form of access.  If bits are missing, the secpolicy
1937  * function will check for privileges.
1938  */
1939 int
1940 ud_iaccess(struct ud_inode *ip, int32_t mode, struct cred *cr)
1941 {
1942 	int shift = 0;
1943 /*
1944  *	ASSERT(RW_READ_HELD(&ip->i_contents));
1945  */
1946 
1947 	ud_printf("ud_iaccess\n");
1948 	if (mode & IWRITE) {
1949 		/*
1950 		 * Disallow write attempts on read-only
1951 		 * file systems, unless the file is a block
1952 		 * or character device or a FIFO.
1953 		 */
1954 		if (ip->i_udf->udf_flags & UDF_FL_RDONLY) {
1955 			if ((ip->i_type != VCHR) &&
1956 			    (ip->i_type != VBLK) &&
1957 			    (ip->i_type != VFIFO)) {
1958 				return (EROFS);
1959 			}
1960 		}
1961 	}
1962 
1963 	/*
1964 	 * Access check is based on only
1965 	 * one of owner, group, public.
1966 	 * If not owner, then check group.
1967 	 * If not a member of the group, then
1968 	 * check public access.
1969 	 */
1970 	if (crgetuid(cr) != ip->i_uid) {
1971 		shift += 5;
1972 		if (!groupmember((uid_t)ip->i_gid, cr))
1973 			shift += 5;
1974 	}
1975 	mode &= ~(ip->i_perm << shift);
1976 
1977 	if (mode == 0)
1978 		return (0);
1979 
1980 	return (secpolicy_vnode_access(cr, ITOV(ip), ip->i_uid,
1981 							UD2VA_PERM(mode)));
1982 }
1983 
1984 void
1985 ud_imark(struct ud_inode *ip)
1986 {
1987 	timestruc_t	now;
1988 
1989 	gethrestime(&now);
1990 	ud_printf("ud_imark\n");
1991 	if (ip->i_flag & IACC) {
1992 		ip->i_atime.tv_sec = now.tv_sec;
1993 		ip->i_atime.tv_nsec = now.tv_nsec;
1994 	}
1995 	if (ip->i_flag & IUPD) {
1996 		ip->i_mtime.tv_sec = now.tv_sec;
1997 		ip->i_mtime.tv_nsec = now.tv_nsec;
1998 		ip->i_flag |= IMODTIME;
1999 	}
2000 	if (ip->i_flag & ICHG) {
2001 		ip->i_diroff = 0;
2002 		ip->i_ctime.tv_sec = now.tv_sec;
2003 		ip->i_ctime.tv_nsec = now.tv_nsec;
2004 	}
2005 }
2006 
2007 
2008 void
2009 ud_itimes_nolock(struct ud_inode *ip)
2010 {
2011 	ud_printf("ud_itimes_nolock\n");
2012 
2013 	if (ip->i_flag & (IUPD|IACC|ICHG)) {
2014 		if (ip->i_flag & ICHG) {
2015 			ip->i_flag |= IMOD;
2016 		} else {
2017 			ip->i_flag |= IMODACC;
2018 		}
2019 		ud_imark(ip);
2020 		ip->i_flag &= ~(IACC|IUPD|ICHG);
2021 	}
2022 }
2023 
2024 void
2025 ud_delcache(struct ud_inode *ip)
2026 {
2027 	ud_printf("ud_delcache\n");
2028 
2029 	mutex_enter(&ud_icache_lock);
2030 	remque(ip);
2031 	ip->i_forw = ip;
2032 	ip->i_back = ip;
2033 	mutex_exit(&ud_icache_lock);
2034 }
2035 
2036 void
2037 ud_idrop(struct ud_inode *ip)
2038 {
2039 	struct vnode *vp = ITOV(ip);
2040 
2041 	ASSERT(RW_WRITE_HELD(&ip->i_contents));
2042 
2043 	ud_printf("ud_idrop\n");
2044 
2045 	mutex_enter(&vp->v_lock);
2046 	if (vp->v_count > 1) {
2047 		vp->v_count--;
2048 		mutex_exit(&vp->v_lock);
2049 		return;
2050 	}
2051 	vp->v_count = 0;
2052 	mutex_exit(&vp->v_lock);
2053 
2054 
2055 	/*
2056 	 *  if inode is invalid or there is no page associated with
2057 	 *  this inode, put the inode in the front of the free list
2058 	 */
2059 	mutex_enter(&ip->i_tlock);
2060 	mutex_enter(&udf_ifree_lock);
2061 	if (!vn_has_cached_data(vp) || ip->i_perm == 0) {
2062 		ud_add_to_free_list(ip, UD_BEGIN);
2063 	} else {
2064 		/*
2065 		 * Otherwise, put the inode back on the end of the free list.
2066 		 */
2067 		ud_add_to_free_list(ip, UD_END);
2068 	}
2069 	mutex_exit(&udf_ifree_lock);
2070 	ip->i_flag &= IMODTIME;
2071 	mutex_exit(&ip->i_tlock);
2072 }
2073 
2074 void
2075 ud_add_to_free_list(struct ud_inode *ip, uint32_t at)
2076 {
2077 	ASSERT(ip);
2078 	ASSERT(mutex_owned(&udf_ifree_lock));
2079 
2080 #ifdef	DEBUG
2081 	/* Search if the element is already in the list */
2082 	if (udf_ifreeh != NULL) {
2083 		struct ud_inode *iq;
2084 
2085 		iq = udf_ifreeh;
2086 		while (iq) {
2087 			if (iq == ip) {
2088 				cmn_err(CE_WARN, "Duplicate %p\n", (void *)ip);
2089 			}
2090 			iq = iq->i_freef;
2091 		}
2092 	}
2093 #endif
2094 
2095 	ip->i_freef = NULL;
2096 	ip->i_freeb = NULL;
2097 	if (udf_ifreeh == NULL) {
2098 		/*
2099 		 * Nothing on the list just add it
2100 		 */
2101 		udf_ifreeh = ip;
2102 		udf_ifreet = ip;
2103 	} else {
2104 		if (at == UD_BEGIN) {
2105 			/*
2106 			 * Add at the begining of the list
2107 			 */
2108 			ip->i_freef = udf_ifreeh;
2109 			udf_ifreeh->i_freeb = ip;
2110 			udf_ifreeh = ip;
2111 		} else {
2112 			/*
2113 			 * Add at the end of the list
2114 			 */
2115 			ip->i_freeb = udf_ifreet;
2116 			udf_ifreet->i_freef = ip;
2117 			udf_ifreet = ip;
2118 		}
2119 	}
2120 }
2121 
2122 void
2123 ud_remove_from_free_list(struct ud_inode *ip, uint32_t at)
2124 {
2125 	ASSERT(ip);
2126 	ASSERT(mutex_owned(&udf_ifree_lock));
2127 
2128 #ifdef	DEBUG
2129 	{
2130 		struct ud_inode *iq;
2131 		uint32_t found = 0;
2132 
2133 		iq = udf_ifreeh;
2134 		while (iq) {
2135 			if (iq == ip) {
2136 				found++;
2137 			}
2138 			iq = iq->i_freef;
2139 		}
2140 		if (found != 1) {
2141 			cmn_err(CE_WARN, "ip %p is found %x times\n",
2142 				(void *)ip,  found);
2143 		}
2144 	}
2145 #endif
2146 
2147 	if ((ip->i_freef == NULL) &&
2148 		(ip->i_freeb == NULL)) {
2149 		if (ip != udf_ifreeh) {
2150 			return;
2151 		}
2152 	}
2153 
2154 	if ((at == UD_BEGIN) ||
2155 		(ip == udf_ifreeh)) {
2156 		udf_ifreeh = ip->i_freef;
2157 		if (ip->i_freef == NULL) {
2158 			udf_ifreet = NULL;
2159 		} else {
2160 			udf_ifreeh->i_freeb = NULL;
2161 		}
2162 	} else {
2163 		ip->i_freeb->i_freef = ip->i_freef;
2164 		if (ip->i_freef) {
2165 			ip->i_freef->i_freeb = ip->i_freeb;
2166 		} else {
2167 			udf_ifreet = ip->i_freeb;
2168 		}
2169 	}
2170 	ip->i_freef = NULL;
2171 	ip->i_freeb = NULL;
2172 }
2173 
2174 void
2175 ud_init_inodes(void)
2176 {
2177 	union ihead *ih = ud_ihead;
2178 	int index;
2179 
2180 #ifndef	__lint
2181 	_NOTE(NO_COMPETING_THREADS_NOW);
2182 #endif
2183 	for (index = 0; index < UD_HASH_SZ; index++, ih++) {
2184 		ih->ih_head[0] = ih;
2185 		ih->ih_head[1] = ih;
2186 	}
2187 	mutex_init(&ud_icache_lock, NULL, MUTEX_DEFAULT, NULL);
2188 	mutex_init(&ud_nino_lock, NULL, MUTEX_DEFAULT, NULL);
2189 
2190 	udf_ifreeh = NULL;
2191 	udf_ifreet = NULL;
2192 	mutex_init(&udf_ifree_lock, NULL, MUTEX_DEFAULT, NULL);
2193 
2194 	mutex_init(&ud_sync_busy, NULL, MUTEX_DEFAULT, NULL);
2195 	udf_vfs_instances = NULL;
2196 	mutex_init(&udf_vfs_mutex, NULL, MUTEX_DEFAULT, NULL);
2197 
2198 #ifndef	__lint
2199 	_NOTE(COMPETING_THREADS_NOW);
2200 #endif
2201 }
2202