xref: /titanic_52/usr/src/uts/common/fs/udfs/udf_inode.c (revision c0dd49bdd68c0d758a67d56f07826f3b45cfc664)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 #include <sys/types.h>
26 #include <sys/t_lock.h>
27 #include <sys/param.h>
28 #include <sys/time.h>
29 #include <sys/systm.h>
30 #include <sys/sysmacros.h>
31 #include <sys/resource.h>
32 #include <sys/signal.h>
33 #include <sys/cred.h>
34 #include <sys/user.h>
35 #include <sys/buf.h>
36 #include <sys/vfs.h>
37 #include <sys/stat.h>
38 #include <sys/vnode.h>
39 #include <sys/mode.h>
40 #include <sys/proc.h>
41 #include <sys/disp.h>
42 #include <sys/file.h>
43 #include <sys/fcntl.h>
44 #include <sys/flock.h>
45 #include <sys/kmem.h>
46 #include <sys/uio.h>
47 #include <sys/dnlc.h>
48 #include <sys/conf.h>
49 #include <sys/errno.h>
50 #include <sys/mman.h>
51 #include <sys/fbuf.h>
52 #include <sys/pathname.h>
53 #include <sys/debug.h>
54 #include <sys/vmsystm.h>
55 #include <sys/cmn_err.h>
56 #include <sys/dirent.h>
57 #include <sys/errno.h>
58 #include <sys/modctl.h>
59 #include <sys/statvfs.h>
60 #include <sys/mount.h>
61 #include <sys/sunddi.h>
62 #include <sys/bootconf.h>
63 #include <sys/policy.h>
64 
65 #include <vm/hat.h>
66 #include <vm/page.h>
67 #include <vm/pvn.h>
68 #include <vm/as.h>
69 #include <vm/seg.h>
70 #include <vm/seg_map.h>
71 #include <vm/seg_kmem.h>
72 #include <vm/seg_vn.h>
73 #include <vm/rm.h>
74 #include <vm/page.h>
75 #include <sys/swap.h>
76 
77 
78 #include <fs/fs_subr.h>
79 
80 
81 #include <sys/fs/udf_volume.h>
82 #include <sys/fs/udf_inode.h>
83 
84 extern struct vnodeops *udf_vnodeops;
85 
86 kmutex_t ud_sync_busy;
87 /*
88  * udf_vfs list manipulation routines
89  */
90 kmutex_t udf_vfs_mutex;
91 struct udf_vfs *udf_vfs_instances;
92 #ifndef	__lint
93 _NOTE(MUTEX_PROTECTS_DATA(udf_vfs_mutex, udf_vfs_instances))
94 #endif
95 
96 union ihead ud_ihead[UD_HASH_SZ];
97 kmutex_t ud_icache_lock;
98 
99 #define	UD_BEGIN	0x0
100 #define	UD_END		0x1
101 #define	UD_UNKN		0x2
102 struct ud_inode *udf_ifreeh, *udf_ifreet;
103 kmutex_t udf_ifree_lock;
104 #ifndef	__lint
105 _NOTE(MUTEX_PROTECTS_DATA(udf_ifree_lock, udf_ifreeh))
106 _NOTE(MUTEX_PROTECTS_DATA(udf_ifree_lock, udf_ifreet))
107 #endif
108 
109 kmutex_t ud_nino_lock;
110 int32_t ud_max_inodes = 512;
111 int32_t ud_cur_inodes = 0;
112 #ifndef	__lint
113 _NOTE(MUTEX_PROTECTS_DATA(ud_nino_lock, ud_cur_inodes))
114 #endif
115 
116 uid_t ud_default_uid = 0;
117 gid_t ud_default_gid = 3;
118 
119 int32_t ud_updat_ext4(struct ud_inode *, struct file_entry *);
120 int32_t ud_updat_ext4096(struct ud_inode *, struct file_entry *);
121 void ud_make_sad(struct icb_ext *, struct short_ad *, int32_t);
122 void ud_make_lad(struct icb_ext *, struct long_ad *, int32_t);
123 void ud_trunc_ext4(struct ud_inode *, u_offset_t);
124 void ud_trunc_ext4096(struct ud_inode *, u_offset_t);
125 void ud_add_to_free_list(struct ud_inode *, uint32_t);
126 void ud_remove_from_free_list(struct ud_inode *, uint32_t);
127 
128 
129 #ifdef	DEBUG
130 struct ud_inode *
131 ud_search_icache(struct vfs *vfsp, uint16_t prn, uint32_t ploc)
132 {
133 	int32_t hno;
134 	union ihead *ih;
135 	struct ud_inode *ip;
136 	struct udf_vfs *udf_vfsp;
137 	uint32_t loc, dummy;
138 
139 	udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
140 	loc = ud_xlate_to_daddr(udf_vfsp, prn, ploc, 1, &dummy);
141 
142 	mutex_enter(&ud_icache_lock);
143 	hno = UD_INOHASH(vfsp->vfs_dev, loc);
144 	ih = &ud_ihead[hno];
145 	for (ip = ih->ih_chain[0];
146 	    ip != (struct ud_inode *)ih;
147 	    ip = ip->i_forw) {
148 		if ((prn == ip->i_icb_prn) && (ploc == ip->i_icb_block) &&
149 		    (vfsp->vfs_dev == ip->i_dev)) {
150 			mutex_exit(&ud_icache_lock);
151 			return (ip);
152 		}
153 	}
154 	mutex_exit(&ud_icache_lock);
155 	return (0);
156 }
157 #endif
158 
159 /* ARGSUSED */
160 int
161 ud_iget(struct vfs *vfsp, uint16_t prn, uint32_t ploc,
162 	struct ud_inode **ipp, struct buf *pbp, struct cred *cred)
163 {
164 	int32_t hno, nomem = 0, icb_tag_flags;
165 	union ihead *ih;
166 	struct ud_inode *ip;
167 	struct vnode *vp;
168 	struct buf *bp = NULL;
169 	struct file_entry *fe;
170 	struct udf_vfs *udf_vfsp;
171 	struct ext_attr_hdr *eah;
172 	struct attr_hdr *ah;
173 	int32_t ea_len, ea_off;
174 	daddr_t loc;
175 	uint64_t offset = 0;
176 	struct icb_ext *iext, *con;
177 	uint32_t length, dummy;
178 	int32_t ndesc, ftype;
179 	uint16_t old_prn;
180 	uint32_t old_block, old_lbano;
181 
182 	ud_printf("ud_iget\n");
183 	udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
184 	old_prn = 0;
185 	old_block = old_lbano = 0;
186 	ftype = 0;
187 	loc = ud_xlate_to_daddr(udf_vfsp, prn, ploc, 1, &dummy);
188 loop:
189 	mutex_enter(&ud_icache_lock);
190 	hno = UD_INOHASH(vfsp->vfs_dev, loc);
191 
192 	ih = &ud_ihead[hno];
193 	for (ip = ih->ih_chain[0];
194 	    ip != (struct ud_inode *)ih;
195 	    ip = ip->i_forw) {
196 
197 		if ((prn == ip->i_icb_prn) &&
198 		    (ploc == ip->i_icb_block) &&
199 		    (vfsp->vfs_dev == ip->i_dev)) {
200 
201 			vp = ITOV(ip);
202 			VN_HOLD(vp);
203 			mutex_exit(&ud_icache_lock);
204 
205 			rw_enter(&ip->i_contents, RW_READER);
206 			mutex_enter(&ip->i_tlock);
207 			if ((ip->i_flag & IREF) == 0) {
208 				mutex_enter(&udf_ifree_lock);
209 				ud_remove_from_free_list(ip, UD_UNKN);
210 				mutex_exit(&udf_ifree_lock);
211 			}
212 			ip->i_flag |= IREF;
213 			mutex_exit(&ip->i_tlock);
214 			rw_exit(&ip->i_contents);
215 
216 			*ipp = ip;
217 
218 			if (pbp != NULL) {
219 				brelse(pbp);
220 			}
221 
222 			return (0);
223 		}
224 	}
225 
226 	/*
227 	 * We don't have it in the cache
228 	 * Allocate a new entry
229 	 */
230 tryagain:
231 	mutex_enter(&udf_ifree_lock);
232 	mutex_enter(&ud_nino_lock);
233 	if (ud_cur_inodes > ud_max_inodes) {
234 		int32_t purged;
235 
236 		mutex_exit(&ud_nino_lock);
237 		while (udf_ifreeh == NULL ||
238 		    vn_has_cached_data(ITOV(udf_ifreeh))) {
239 			/*
240 			 * Try to put an inode on the freelist that's
241 			 * sitting in the dnlc.
242 			 */
243 			mutex_exit(&udf_ifree_lock);
244 			purged = dnlc_fs_purge1(udf_vnodeops);
245 			mutex_enter(&udf_ifree_lock);
246 			if (!purged) {
247 				break;
248 			}
249 		}
250 		mutex_enter(&ud_nino_lock);
251 	}
252 
253 	/*
254 	 * If there's a free one available and it has no pages attached
255 	 * take it. If we're over the high water mark, take it even if
256 	 * it has attached pages. Otherwise, make a new one.
257 	 */
258 	if (udf_ifreeh &&
259 	    (nomem || !vn_has_cached_data(ITOV(udf_ifreeh)) ||
260 	    ud_cur_inodes >= ud_max_inodes)) {
261 
262 		mutex_exit(&ud_nino_lock);
263 		ip = udf_ifreeh;
264 		vp = ITOV(ip);
265 
266 		ud_remove_from_free_list(ip, UD_BEGIN);
267 
268 		mutex_exit(&udf_ifree_lock);
269 		if (ip->i_flag & IREF) {
270 			cmn_err(CE_WARN, "ud_iget: bad i_flag\n");
271 			mutex_exit(&ud_icache_lock);
272 			if (pbp != NULL) {
273 				brelse(pbp);
274 			}
275 			return (EINVAL);
276 		}
277 		rw_enter(&ip->i_contents, RW_WRITER);
278 
279 		/*
280 		 * We call udf_syncip() to synchronously destroy all pages
281 		 * associated with the vnode before re-using it. The pageout
282 		 * thread may have beat us to this page so our v_count can
283 		 * be > 0 at this point even though we are on the freelist.
284 		 */
285 		mutex_enter(&ip->i_tlock);
286 		ip->i_flag = (ip->i_flag & IMODTIME) | IREF;
287 		mutex_exit(&ip->i_tlock);
288 
289 		VN_HOLD(vp);
290 		if (ud_syncip(ip, B_INVAL, I_SYNC) != 0) {
291 			ud_idrop(ip);
292 			rw_exit(&ip->i_contents);
293 			mutex_exit(&ud_icache_lock);
294 			goto loop;
295 		}
296 
297 		mutex_enter(&ip->i_tlock);
298 		ip->i_flag &= ~IMODTIME;
299 		mutex_exit(&ip->i_tlock);
300 
301 		if (ip->i_ext) {
302 			kmem_free(ip->i_ext,
303 			    sizeof (struct icb_ext) * ip->i_ext_count);
304 			ip->i_ext = 0;
305 			ip->i_ext_count = ip->i_ext_used = 0;
306 		}
307 
308 		if (ip->i_con) {
309 			kmem_free(ip->i_con,
310 			    sizeof (struct icb_ext) * ip->i_con_count);
311 			ip->i_con = 0;
312 			ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
313 		}
314 
315 		/*
316 		 * The pageout thread may not have had a chance to release
317 		 * its hold on the vnode (if it was active with this vp),
318 		 * but the pages should all be invalidated.
319 		 */
320 	} else {
321 		mutex_exit(&ud_nino_lock);
322 		mutex_exit(&udf_ifree_lock);
323 		/*
324 		 * Try to get memory for this inode without blocking.
325 		 * If we can't and there is something on the freelist,
326 		 * go ahead and use it, otherwise block waiting for
327 		 * memory holding the hash_lock. We expose a potential
328 		 * deadlock if all users of memory have to do a ud_iget()
329 		 * before releasing memory.
330 		 */
331 		ip = (struct ud_inode *)kmem_zalloc(sizeof (struct ud_inode),
332 		    KM_NOSLEEP);
333 		vp = vn_alloc(KM_NOSLEEP);
334 		if ((ip == NULL) || (vp == NULL)) {
335 			mutex_enter(&udf_ifree_lock);
336 			if (udf_ifreeh) {
337 				mutex_exit(&udf_ifree_lock);
338 				if (ip != NULL)
339 					kmem_free(ip, sizeof (struct ud_inode));
340 				if (vp != NULL)
341 					vn_free(vp);
342 				nomem = 1;
343 				goto tryagain;
344 			} else {
345 				mutex_exit(&udf_ifree_lock);
346 				if (ip == NULL)
347 					ip = (struct ud_inode *)
348 					    kmem_zalloc(
349 					    sizeof (struct ud_inode),
350 					    KM_SLEEP);
351 				if (vp == NULL)
352 					vp = vn_alloc(KM_SLEEP);
353 			}
354 		}
355 		ip->i_vnode = vp;
356 
357 		ip->i_marker1 = (uint32_t)0xAAAAAAAA;
358 		ip->i_marker2 = (uint32_t)0xBBBBBBBB;
359 		ip->i_marker3 = (uint32_t)0xCCCCCCCC;
360 
361 		rw_init(&ip->i_rwlock, NULL, RW_DEFAULT, NULL);
362 		rw_init(&ip->i_contents, NULL, RW_DEFAULT, NULL);
363 		mutex_init(&ip->i_tlock, NULL, MUTEX_DEFAULT, NULL);
364 
365 		ip->i_forw = ip;
366 		ip->i_back = ip;
367 		vp->v_data = (caddr_t)ip;
368 		vn_setops(vp, udf_vnodeops);
369 		ip->i_flag = IREF;
370 		cv_init(&ip->i_wrcv, NULL, CV_DRIVER, NULL);
371 		mutex_enter(&ud_nino_lock);
372 		ud_cur_inodes++;
373 		mutex_exit(&ud_nino_lock);
374 
375 		rw_enter(&ip->i_contents, RW_WRITER);
376 	}
377 
378 	if (vp->v_count < 1) {
379 		cmn_err(CE_WARN, "ud_iget: v_count < 1\n");
380 		mutex_exit(&ud_icache_lock);
381 		rw_exit(&ip->i_contents);
382 		if (pbp != NULL) {
383 			brelse(pbp);
384 		}
385 		return (EINVAL);
386 	}
387 	if (vn_has_cached_data(vp)) {
388 		cmn_err(CE_WARN, "ud_iget: v_pages not NULL\n");
389 		mutex_exit(&ud_icache_lock);
390 		rw_exit(&ip->i_contents);
391 		if (pbp != NULL) {
392 			brelse(pbp);
393 		}
394 		return (EINVAL);
395 	}
396 
397 	/*
398 	 * Move the inode on the chain for its new (ino, dev) pair
399 	 */
400 	remque(ip);
401 	ip->i_forw = ip;
402 	ip->i_back = ip;
403 	insque(ip, ih);
404 
405 	ip->i_dev = vfsp->vfs_dev;
406 	ip->i_udf = udf_vfsp;
407 	ip->i_diroff = 0;
408 	ip->i_devvp = ip->i_udf->udf_devvp;
409 	ip->i_icb_prn = prn;
410 	ip->i_icb_block = ploc;
411 	ip->i_icb_lbano = loc;
412 	ip->i_nextr = 0;
413 	ip->i_seq = 0;
414 	mutex_exit(&ud_icache_lock);
415 
416 read_de:
417 	if (pbp != NULL) {
418 		/*
419 		 * assumption is that we will not
420 		 * create a 4096 file
421 		 */
422 		bp = pbp;
423 	} else {
424 		bp = ud_bread(ip->i_dev,
425 		    ip->i_icb_lbano << udf_vfsp->udf_l2d_shift,
426 		    udf_vfsp->udf_lbsize);
427 	}
428 
429 	/*
430 	 * Check I/O errors
431 	 */
432 	fe = (struct file_entry *)bp->b_un.b_addr;
433 	if ((bp->b_flags & B_ERROR) ||
434 	    (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY,
435 	    ip->i_icb_block, 1, udf_vfsp->udf_lbsize) != 0)) {
436 
437 		if (((bp->b_flags & B_ERROR) == 0) &&
438 		    (ftype == STRAT_TYPE4096)) {
439 			if (ud_check_te_unrec(udf_vfsp,
440 			    bp->b_un.b_addr, ip->i_icb_block) == 0) {
441 
442 				brelse(bp);
443 
444 				/*
445 				 * restore old file entry location
446 				 */
447 				ip->i_icb_prn = old_prn;
448 				ip->i_icb_block = old_block;
449 				ip->i_icb_lbano = old_lbano;
450 
451 				/*
452 				 * reread old file entry
453 				 */
454 				bp = ud_bread(ip->i_dev,
455 				    old_lbano << udf_vfsp->udf_l2d_shift,
456 				    udf_vfsp->udf_lbsize);
457 				if ((bp->b_flags & B_ERROR) == 0) {
458 					fe = (struct file_entry *)
459 					    bp->b_un.b_addr;
460 					if (ud_verify_tag_and_desc(&fe->fe_tag,
461 					    UD_FILE_ENTRY, ip->i_icb_block, 1,
462 					    udf_vfsp->udf_lbsize) == 0) {
463 						goto end_4096;
464 					}
465 				}
466 			}
467 		}
468 error_ret:
469 		brelse(bp);
470 		/*
471 		 * The inode may not contain anything useful. Mark it as
472 		 * having an error and let anyone else who was waiting for
473 		 * this know there was an error. Callers waiting for
474 		 * access to this inode in ud_iget will find
475 		 * the i_icb_lbano == 0, so there won't be a match.
476 		 * It remains in the cache. Put it back on the freelist.
477 		 */
478 		mutex_enter(&vp->v_lock);
479 		vp->v_count--;
480 		mutex_exit(&vp->v_lock);
481 		ip->i_icb_lbano = 0;
482 
483 		/*
484 		 * The folowing two lines make
485 		 * it impossible for any one do
486 		 * a VN_HOLD and then a VN_RELE
487 		 * so avoiding a ud_iinactive
488 		 */
489 		ip->i_icb_prn = 0xffff;
490 		ip->i_icb_block = 0;
491 
492 		/*
493 		 * remove the bad inode from hash chains
494 		 * so that during unmount we will not
495 		 * go through this inode
496 		 */
497 		mutex_enter(&ud_icache_lock);
498 		remque(ip);
499 		ip->i_forw = ip;
500 		ip->i_back = ip;
501 		mutex_exit(&ud_icache_lock);
502 
503 		/* Put the inode at the front of the freelist */
504 		mutex_enter(&ip->i_tlock);
505 		mutex_enter(&udf_ifree_lock);
506 		ud_add_to_free_list(ip, UD_BEGIN);
507 		mutex_exit(&udf_ifree_lock);
508 		ip->i_flag = 0;
509 		mutex_exit(&ip->i_tlock);
510 		rw_exit(&ip->i_contents);
511 		return (EIO);
512 	}
513 
514 	if (fe->fe_icb_tag.itag_strategy == SWAP_16(STRAT_TYPE4096)) {
515 		struct buf *ibp = NULL;
516 		struct indirect_entry *ie;
517 
518 		/*
519 		 * save old file_entry location
520 		 */
521 		old_prn = ip->i_icb_prn;
522 		old_block = ip->i_icb_block;
523 		old_lbano = ip->i_icb_lbano;
524 
525 		ftype = STRAT_TYPE4096;
526 
527 		/*
528 		 * If astrat is 4096 different versions
529 		 * of the file exist on the media.
530 		 * we are supposed to get to the latest
531 		 * version of the file
532 		 */
533 
534 		/*
535 		 * IE is supposed to be in the next block
536 		 * of DE
537 		 */
538 		ibp = ud_bread(ip->i_dev,
539 		    (ip->i_icb_lbano + 1) << udf_vfsp->udf_l2d_shift,
540 		    udf_vfsp->udf_lbsize);
541 		if (ibp->b_flags & B_ERROR) {
542 			/*
543 			 * Get rid of current ibp and
544 			 * then goto error on DE's bp
545 			 */
546 ie_error:
547 			brelse(ibp);
548 			goto error_ret;
549 		}
550 
551 		ie = (struct indirect_entry *)ibp->b_un.b_addr;
552 		if (ud_verify_tag_and_desc(&ie->ie_tag,
553 		    UD_INDIRECT_ENT, ip->i_icb_block + 1,
554 		    1, udf_vfsp->udf_lbsize) == 0) {
555 			struct long_ad *lad;
556 
557 			lad = &ie->ie_indirecticb;
558 			ip->i_icb_prn = SWAP_16(lad->lad_ext_prn);
559 			ip->i_icb_block = SWAP_32(lad->lad_ext_loc);
560 			ip->i_icb_lbano = ud_xlate_to_daddr(udf_vfsp,
561 			    ip->i_icb_prn, ip->i_icb_block,
562 			    1, &dummy);
563 			brelse(ibp);
564 			brelse(bp);
565 			goto read_de;
566 		}
567 
568 		/*
569 		 * If this block is TE or unrecorded we
570 		 * are at the last entry
571 		 */
572 		if (ud_check_te_unrec(udf_vfsp, ibp->b_un.b_addr,
573 		    ip->i_icb_block + 1) != 0) {
574 			/*
575 			 * This is not an unrecorded block
576 			 * Check if it a valid IE and
577 			 * get the address of DE that
578 			 * this IE points to
579 			 */
580 			goto ie_error;
581 		}
582 		/*
583 		 * If ud_check_unrec returns "0"
584 		 * this is the last in the chain
585 		 * Latest file_entry
586 		 */
587 		brelse(ibp);
588 	}
589 
590 end_4096:
591 
592 	ip->i_uid = SWAP_32(fe->fe_uid);
593 	if (ip->i_uid == -1) {
594 		ip->i_uid = ud_default_uid;
595 	}
596 	ip->i_gid = SWAP_32(fe->fe_gid);
597 	if (ip->i_gid == -1) {
598 		ip->i_gid = ud_default_gid;
599 	}
600 	ip->i_perm = SWAP_32(fe->fe_perms) & 0xFFFF;
601 	if (fe->fe_icb_tag.itag_strategy == SWAP_16(STRAT_TYPE4096)) {
602 		ip->i_perm &= ~(IWRITE | (IWRITE >> 5) | (IWRITE >> 10));
603 	}
604 
605 	ip->i_nlink = SWAP_16(fe->fe_lcount);
606 	ip->i_size = SWAP_64(fe->fe_info_len);
607 	ip->i_lbr = SWAP_64(fe->fe_lbr);
608 
609 	ud_dtime2utime(&ip->i_atime, &fe->fe_acc_time);
610 	ud_dtime2utime(&ip->i_mtime, &fe->fe_mod_time);
611 	ud_dtime2utime(&ip->i_ctime, &fe->fe_attr_time);
612 
613 
614 	ip->i_uniqid = SWAP_64(fe->fe_uniq_id);
615 	icb_tag_flags = SWAP_16(fe->fe_icb_tag.itag_flags);
616 
617 	if ((fe->fe_icb_tag.itag_ftype == FTYPE_CHAR_DEV) ||
618 	    (fe->fe_icb_tag.itag_ftype == FTYPE_BLOCK_DEV)) {
619 
620 		eah = (struct ext_attr_hdr *)fe->fe_spec;
621 		ea_off = GET_32(&eah->eah_ial);
622 		ea_len = GET_32(&fe->fe_len_ear);
623 		if (ea_len && (ud_verify_tag_and_desc(&eah->eah_tag,
624 		    UD_EXT_ATTR_HDR, ip->i_icb_block, 1,
625 		    sizeof (struct file_entry) -
626 		    offsetof(struct file_entry, fe_spec)) == 0)) {
627 
628 			while (ea_off < ea_len) {
629 				/*
630 				 * We now check the validity of ea_off.
631 				 * (ea_len - ea_off) should be large enough to
632 				 * hold the attribute header atleast.
633 				 */
634 				if ((ea_len - ea_off) <
635 				    sizeof (struct attr_hdr)) {
636 					cmn_err(CE_NOTE,
637 					    "ea_len(0x%x) - ea_off(0x%x) is "
638 					    "too small to hold attr. info. "
639 					    "blockno 0x%x\n",
640 					    ea_len, ea_off, ip->i_icb_block);
641 					goto error_ret;
642 				}
643 				ah = (struct attr_hdr *)&fe->fe_spec[ea_off];
644 
645 				/*
646 				 * Device Specification EA
647 				 */
648 				if ((GET_32(&ah->ahdr_atype) == 12) &&
649 					(ah->ahdr_astype == 1)) {
650 					struct dev_spec_ear *ds;
651 
652 					if ((ea_len - ea_off) <
653 					    sizeof (struct dev_spec_ear)) {
654 						cmn_err(CE_NOTE,
655 						    "ea_len(0x%x) - "
656 						    "ea_off(0x%x) is too small "
657 						    "to hold dev_spec_ear."
658 						    " blockno 0x%x\n",
659 						    ea_len, ea_off,
660 						    ip->i_icb_block);
661 						goto error_ret;
662 					}
663 					ds = (struct dev_spec_ear *)ah;
664 					ip->i_major = GET_32(&ds->ds_major_id);
665 					ip->i_minor = GET_32(&ds->ds_minor_id);
666 				}
667 
668 				/*
669 				 * Impl Use EA
670 				 */
671 				if ((GET_32(&ah->ahdr_atype) == 2048) &&
672 					(ah->ahdr_astype == 1)) {
673 					struct iu_ea *iuea;
674 					struct copy_mgt_info *cmi;
675 
676 					if ((ea_len - ea_off) <
677 					    sizeof (struct iu_ea)) {
678 						cmn_err(CE_NOTE,
679 "ea_len(0x%x) - ea_off(0x%x) is too small to hold iu_ea. blockno 0x%x\n",
680 						    ea_len, ea_off,
681 						    ip->i_icb_block);
682 						goto error_ret;
683 					}
684 					iuea = (struct iu_ea *)ah;
685 					if (strncmp(iuea->iuea_ii.reg_id,
686 					    UDF_FREEEASPACE,
687 					    sizeof (iuea->iuea_ii.reg_id))
688 					    == 0) {
689 						/* skip it */
690 						iuea = iuea;
691 					} else if (strncmp(iuea->iuea_ii.reg_id,
692 					    UDF_CGMS_INFO,
693 					    sizeof (iuea->iuea_ii.reg_id))
694 					    == 0) {
695 						cmi = (struct copy_mgt_info *)
696 							iuea->iuea_iu;
697 						cmi = cmi;
698 					}
699 				}
700 				/* ??? PARANOIA */
701 				if (GET_32(&ah->ahdr_length) == 0) {
702 					break;
703 				}
704 				ea_off += GET_32(&ah->ahdr_length);
705 			}
706 		}
707 	}
708 
709 	ip->i_nextr = 0;
710 
711 	ip->i_maxent = SWAP_16(fe->fe_icb_tag.itag_max_ent);
712 	ip->i_astrat = SWAP_16(fe->fe_icb_tag.itag_strategy);
713 
714 	ip->i_desc_type = icb_tag_flags & 0x7;
715 
716 	/* Strictly Paranoia */
717 	ip->i_ext = NULL;
718 	ip->i_ext_count = ip->i_ext_used = 0;
719 	ip->i_con = 0;
720 	ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
721 
722 	ip->i_data_off = 0xB0 + SWAP_32(fe->fe_len_ear);
723 	ip->i_max_emb =  udf_vfsp->udf_lbsize - ip->i_data_off;
724 	if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
725 		/* Short allocation desc */
726 		struct short_ad *sad;
727 
728 		ip->i_ext_used = 0;
729 		ip->i_ext_count = ndesc =
730 		    SWAP_32(fe->fe_len_adesc) / sizeof (struct short_ad);
731 		ip->i_ext_count =
732 		    ((ip->i_ext_count / EXT_PER_MALLOC) + 1) * EXT_PER_MALLOC;
733 		ip->i_ext = (struct icb_ext  *)kmem_zalloc(ip->i_ext_count *
734 		    sizeof (struct icb_ext), KM_SLEEP);
735 		ip->i_cur_max_ext = ip->i_max_emb / sizeof (struct short_ad);
736 		ip->i_cur_max_ext --;
737 
738 		if ((ip->i_astrat != STRAT_TYPE4) &&
739 		    (ip->i_astrat != STRAT_TYPE4096)) {
740 			goto error_ret;
741 		}
742 
743 		sad = (struct short_ad *)
744 		    (fe->fe_spec + SWAP_32(fe->fe_len_ear));
745 		iext = ip->i_ext;
746 		while (ndesc --) {
747 			length = SWAP_32(sad->sad_ext_len);
748 			if ((length & 0x3FFFFFFF) == 0) {
749 				break;
750 			}
751 			if (((length >> 30) & IB_MASK) == IB_CON) {
752 				if (ip->i_con == NULL) {
753 					ip->i_con_count = EXT_PER_MALLOC;
754 					ip->i_con_used = 0;
755 					ip->i_con_read = 0;
756 					ip->i_con = kmem_zalloc(
757 					    ip->i_con_count *
758 					    sizeof (struct icb_ext),
759 					    KM_SLEEP);
760 				}
761 				con = &ip->i_con[ip->i_con_used];
762 				con->ib_prn = 0;
763 				con->ib_block = SWAP_32(sad->sad_ext_loc);
764 				con->ib_count = length & 0x3FFFFFFF;
765 				con->ib_flags = (length >> 30) & IB_MASK;
766 				ip->i_con_used++;
767 				sad ++;
768 				break;
769 			}
770 			iext->ib_prn = 0;
771 			iext->ib_block = SWAP_32(sad->sad_ext_loc);
772 			length = SWAP_32(sad->sad_ext_len);
773 			iext->ib_count = length & 0x3FFFFFFF;
774 			iext->ib_offset = offset;
775 			iext->ib_marker1 = (uint32_t)0xAAAAAAAA;
776 			iext->ib_marker2 = (uint32_t)0xBBBBBBBB;
777 			offset += (iext->ib_count + udf_vfsp->udf_lbmask) &
778 			    (~udf_vfsp->udf_lbmask);
779 
780 			iext->ib_flags = (length >> 30) & IB_MASK;
781 
782 			ip->i_ext_used++;
783 			iext++;
784 			sad ++;
785 		}
786 	} else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
787 		/* Long allocation desc */
788 		struct long_ad *lad;
789 
790 		ip->i_ext_used = 0;
791 		ip->i_ext_count = ndesc =
792 		    SWAP_32(fe->fe_len_adesc) / sizeof (struct long_ad);
793 		ip->i_ext_count =
794 		    ((ip->i_ext_count / EXT_PER_MALLOC) + 1) * EXT_PER_MALLOC;
795 		ip->i_ext = (struct icb_ext  *)kmem_zalloc(ip->i_ext_count *
796 		    sizeof (struct icb_ext), KM_SLEEP);
797 
798 		ip->i_cur_max_ext = ip->i_max_emb / sizeof (struct long_ad);
799 		ip->i_cur_max_ext --;
800 
801 		if ((ip->i_astrat != STRAT_TYPE4) &&
802 		    (ip->i_astrat != STRAT_TYPE4096)) {
803 			goto error_ret;
804 		}
805 
806 		lad = (struct long_ad *)
807 		    (fe->fe_spec + SWAP_32(fe->fe_len_ear));
808 		iext = ip->i_ext;
809 		while (ndesc --) {
810 			length = SWAP_32(lad->lad_ext_len);
811 			if ((length & 0x3FFFFFFF) == 0) {
812 				break;
813 			}
814 			if (((length >> 30) & IB_MASK) == IB_CON) {
815 				if (ip->i_con == NULL) {
816 					ip->i_con_count = EXT_PER_MALLOC;
817 					ip->i_con_used = 0;
818 					ip->i_con_read = 0;
819 					ip->i_con = kmem_zalloc(
820 					    ip->i_con_count *
821 					    sizeof (struct icb_ext),
822 					    KM_SLEEP);
823 				}
824 				con = &ip->i_con[ip->i_con_used];
825 				con->ib_prn = SWAP_16(lad->lad_ext_prn);
826 				con->ib_block = SWAP_32(lad->lad_ext_loc);
827 				con->ib_count = length & 0x3FFFFFFF;
828 				con->ib_flags = (length >> 30) & IB_MASK;
829 				ip->i_con_used++;
830 				lad ++;
831 				break;
832 			}
833 			iext->ib_prn = SWAP_16(lad->lad_ext_prn);
834 			iext->ib_block = SWAP_32(lad->lad_ext_loc);
835 			iext->ib_count = length & 0x3FFFFFFF;
836 			iext->ib_offset = offset;
837 			iext->ib_marker1 = (uint32_t)0xAAAAAAAA;
838 			iext->ib_marker2 = (uint32_t)0xBBBBBBBB;
839 			offset += (iext->ib_count + udf_vfsp->udf_lbmask) &
840 			    (~udf_vfsp->udf_lbmask);
841 
842 			iext->ib_flags = (length >> 30) & IB_MASK;
843 
844 			ip->i_ext_used++;
845 			iext++;
846 			lad ++;
847 		}
848 	} else if (ip->i_desc_type == ICB_FLAG_ONE_AD) {
849 		ASSERT(SWAP_32(fe->fe_len_ear) < udf_vfsp->udf_lbsize);
850 
851 		if (SWAP_32(fe->fe_len_ear) > udf_vfsp->udf_lbsize) {
852 			goto error_ret;
853 		}
854 	} else {
855 		/* Not to be used in UDF 1.50 */
856 		cmn_err(CE_NOTE, "Invalid Allocation Descriptor type %x\n",
857 		    ip->i_desc_type);
858 		goto error_ret;
859 	}
860 
861 
862 	if (icb_tag_flags & ICB_FLAG_SETUID) {
863 		ip->i_char = ISUID;
864 	} else {
865 		ip->i_char = 0;
866 	}
867 	if (icb_tag_flags & ICB_FLAG_SETGID) {
868 		ip->i_char |= ISGID;
869 	}
870 	if (icb_tag_flags & ICB_FLAG_STICKY) {
871 		ip->i_char |= ISVTX;
872 	}
873 	switch (fe->fe_icb_tag.itag_ftype) {
874 		case FTYPE_DIRECTORY :
875 			ip->i_type = VDIR;
876 			break;
877 		case FTYPE_FILE :
878 			ip->i_type = VREG;
879 			break;
880 		case FTYPE_BLOCK_DEV :
881 			ip->i_type = VBLK;
882 			break;
883 		case FTYPE_CHAR_DEV :
884 			ip->i_type = VCHR;
885 			break;
886 		case FTYPE_FIFO :
887 			ip->i_type = VFIFO;
888 			break;
889 		case FTYPE_C_ISSOCK :
890 			ip->i_type = VSOCK;
891 			break;
892 		case FTYPE_SYMLINK :
893 			ip->i_type = VLNK;
894 			break;
895 		default :
896 			ip->i_type = VNON;
897 			break;
898 	}
899 
900 	if (ip->i_type == VBLK || ip->i_type == VCHR) {
901 		ip->i_rdev = makedevice(ip->i_major, ip->i_minor);
902 	}
903 
904 	/*
905 	 * Fill in the rest.  Don't bother with the vnode lock because nobody
906 	 * should be looking at this vnode.  We have already invalidated the
907 	 * pages if it had any so pageout shouldn't be referencing this vnode
908 	 * and we are holding the write contents lock so a look up can't use
909 	 * the vnode.
910 	 */
911 	vp->v_vfsp = vfsp;
912 	vp->v_type = ip->i_type;
913 	vp->v_rdev = ip->i_rdev;
914 	if (ip->i_udf->udf_root_blkno == loc) {
915 		vp->v_flag = VROOT;
916 	} else {
917 		vp->v_flag = 0;
918 	}
919 
920 	brelse(bp);
921 	*ipp = ip;
922 	rw_exit(&ip->i_contents);
923 	vn_exists(vp);
924 	return (0);
925 }
926 
927 void
928 ud_iinactive(struct ud_inode *ip, struct cred *cr)
929 {
930 	int32_t busy = 0;
931 	struct vnode *vp;
932 	vtype_t type;
933 	caddr_t addr, addr1;
934 	size_t size, size1;
935 
936 
937 	ud_printf("ud_iinactive\n");
938 
939 	/*
940 	 * Get exclusive access to inode data.
941 	 */
942 	rw_enter(&ip->i_contents, RW_WRITER);
943 
944 	/*
945 	 * Make sure no one reclaimed the inode before we put
946 	 * it on the freelist or destroy it. We keep our 'hold'
947 	 * on the vnode from vn_rele until we are ready to
948 	 * do something with the inode (freelist/destroy).
949 	 *
950 	 * Pageout may put a VN_HOLD/VN_RELE at anytime during this
951 	 * operation via an async putpage, so we must make sure
952 	 * we don't free/destroy the inode more than once. ud_iget
953 	 * may also put a VN_HOLD on the inode before it grabs
954 	 * the i_contents lock. This is done so we don't kmem_free
955 	 * an inode that a thread is waiting on.
956 	 */
957 	vp = ITOV(ip);
958 
959 	mutex_enter(&vp->v_lock);
960 	if (vp->v_count < 1) {
961 		cmn_err(CE_WARN, "ud_iinactive: v_count < 1\n");
962 		return;
963 	}
964 	if ((vp->v_count > 1) || ((ip->i_flag & IREF) == 0)) {
965 		vp->v_count--;		/* release our hold from vn_rele */
966 		mutex_exit(&vp->v_lock);
967 		rw_exit(&ip->i_contents);
968 		return;
969 	}
970 	mutex_exit(&vp->v_lock);
971 
972 	/*
973 	 * For forced umount case: if i_udf is NULL, the contents of
974 	 * the inode and all the pages have already been pushed back
975 	 * to disk. It can be safely destroyed.
976 	 */
977 	if (ip->i_udf == NULL) {
978 		addr = (caddr_t)ip->i_ext;
979 		size = sizeof (struct icb_ext) * ip->i_ext_count;
980 		ip->i_ext = 0;
981 		ip->i_ext_count = ip->i_ext_used = 0;
982 		addr1 = (caddr_t)ip->i_con;
983 		size1 = sizeof (struct icb_ext) * ip->i_con_count;
984 		ip->i_con = 0;
985 		ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
986 		rw_exit(&ip->i_contents);
987 		vn_invalid(vp);
988 
989 		mutex_enter(&ud_nino_lock);
990 		ud_cur_inodes--;
991 		mutex_exit(&ud_nino_lock);
992 
993 		cv_destroy(&ip->i_wrcv);  /* throttling */
994 		rw_destroy(&ip->i_rwlock);
995 		rw_exit(&ip->i_contents);
996 		rw_destroy(&ip->i_contents);
997 		kmem_free(addr, size);
998 		kmem_free(addr1, size1);
999 		vn_free(vp);
1000 		kmem_free(ip, sizeof (struct ud_inode));
1001 		return;
1002 	}
1003 
1004 	if ((ip->i_udf->udf_flags & UDF_FL_RDONLY) == 0) {
1005 		if (ip->i_nlink <= 0) {
1006 			ip->i_marker3 = (uint32_t)0xDDDD0000;
1007 			ip->i_nlink = 1;	/* prevent free-ing twice */
1008 			(void) ud_itrunc(ip, 0, 0, cr);
1009 			type = ip->i_type;
1010 			ip->i_perm = 0;
1011 			ip->i_uid = 0;
1012 			ip->i_gid = 0;
1013 			ip->i_rdev = 0;	/* Zero in core version of rdev */
1014 			mutex_enter(&ip->i_tlock);
1015 			ip->i_flag |= IUPD|ICHG;
1016 			mutex_exit(&ip->i_tlock);
1017 			ud_ifree(ip, type);
1018 			ip->i_icb_prn = 0xFFFF;
1019 		} else if (!IS_SWAPVP(vp)) {
1020 			/*
1021 			 * Write the inode out if dirty. Pages are
1022 			 * written back and put on the freelist.
1023 			 */
1024 			(void) ud_syncip(ip, B_FREE | B_ASYNC, 0);
1025 			/*
1026 			 * Do nothing if inode is now busy -- inode may
1027 			 * have gone busy because ud_syncip
1028 			 * releases/reacquires the i_contents lock
1029 			 */
1030 			mutex_enter(&vp->v_lock);
1031 			if (vp->v_count > 1) {
1032 				vp->v_count--;
1033 				mutex_exit(&vp->v_lock);
1034 				rw_exit(&ip->i_contents);
1035 				return;
1036 			}
1037 			mutex_exit(&vp->v_lock);
1038 		} else {
1039 			ud_iupdat(ip, 0);
1040 		}
1041 	}
1042 
1043 
1044 	/*
1045 	 * Put the inode on the end of the free list.
1046 	 * Possibly in some cases it would be better to
1047 	 * put the inode at the head of the free list,
1048 	 * (e.g.: where i_perm == 0 || i_number == 0)
1049 	 * but I will think about that later.
1050 	 * (i_number is rarely 0 - only after an i/o error in ud_iget,
1051 	 * where i_perm == 0, the inode will probably be wanted
1052 	 * again soon for an ialloc, so possibly we should keep it)
1053 	 */
1054 	/*
1055 	 * If inode is invalid or there is no page associated with
1056 	 * this inode, put the inode in the front of the free list.
1057 	 * Since we have a VN_HOLD on the vnode, and checked that it
1058 	 * wasn't already on the freelist when we entered, we can safely
1059 	 * put it on the freelist even if another thread puts a VN_HOLD
1060 	 * on it (pageout/ud_iget).
1061 	 */
1062 tryagain:
1063 	mutex_enter(&ud_nino_lock);
1064 	if (vn_has_cached_data(vp)) {
1065 		mutex_exit(&ud_nino_lock);
1066 		mutex_enter(&vp->v_lock);
1067 		vp->v_count--;
1068 		mutex_exit(&vp->v_lock);
1069 		mutex_enter(&ip->i_tlock);
1070 		mutex_enter(&udf_ifree_lock);
1071 		ud_add_to_free_list(ip, UD_END);
1072 		mutex_exit(&udf_ifree_lock);
1073 		ip->i_flag &= IMODTIME;
1074 		mutex_exit(&ip->i_tlock);
1075 		rw_exit(&ip->i_contents);
1076 	} else if (busy || ud_cur_inodes < ud_max_inodes) {
1077 		mutex_exit(&ud_nino_lock);
1078 		/*
1079 		 * We're not over our high water mark, or it's
1080 		 * not safe to kmem_free the inode, so put it
1081 		 * on the freelist.
1082 		 */
1083 		mutex_enter(&vp->v_lock);
1084 		if (vn_has_cached_data(vp)) {
1085 			cmn_err(CE_WARN, "ud_iinactive: v_pages not NULL\n");
1086 		}
1087 		vp->v_count--;
1088 		mutex_exit(&vp->v_lock);
1089 
1090 	mutex_enter(&ip->i_tlock);
1091 		mutex_enter(&udf_ifree_lock);
1092 		ud_add_to_free_list(ip, UD_BEGIN);
1093 		mutex_exit(&udf_ifree_lock);
1094 	ip->i_flag &= IMODTIME;
1095 	mutex_exit(&ip->i_tlock);
1096 		rw_exit(&ip->i_contents);
1097 	} else {
1098 		mutex_exit(&ud_nino_lock);
1099 		if (vn_has_cached_data(vp)) {
1100 			cmn_err(CE_WARN, "ud_iinactive: v_pages not NULL\n");
1101 		}
1102 		/*
1103 		 * Try to free the inode. We must make sure
1104 		 * it's o.k. to destroy this inode. We can't destroy
1105 		 * if a thread is waiting for this inode. If we can't get the
1106 		 * cache now, put it back on the freelist.
1107 		 */
1108 		if (!mutex_tryenter(&ud_icache_lock)) {
1109 			busy = 1;
1110 			goto tryagain;
1111 		}
1112 		mutex_enter(&vp->v_lock);
1113 		if (vp->v_count > 1) {
1114 			/* inode is wanted in ud_iget */
1115 			busy = 1;
1116 			mutex_exit(&vp->v_lock);
1117 			mutex_exit(&ud_icache_lock);
1118 			goto tryagain;
1119 		}
1120 		mutex_exit(&vp->v_lock);
1121 		remque(ip);
1122 		ip->i_forw = ip;
1123 		ip->i_back = ip;
1124 		mutex_enter(&ud_nino_lock);
1125 		ud_cur_inodes--;
1126 		mutex_exit(&ud_nino_lock);
1127 		mutex_exit(&ud_icache_lock);
1128 		if (ip->i_icb_prn != 0xFFFF) {
1129 			ud_iupdat(ip, 0);
1130 		}
1131 		addr = (caddr_t)ip->i_ext;
1132 		size = sizeof (struct icb_ext) * ip->i_ext_count;
1133 		ip->i_ext = 0;
1134 		ip->i_ext_count = ip->i_ext_used = 0;
1135 		addr1 = (caddr_t)ip->i_con;
1136 		size1 = sizeof (struct icb_ext) * ip->i_con_count;
1137 		ip->i_con = 0;
1138 		ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
1139 		cv_destroy(&ip->i_wrcv);  /* throttling */
1140 		rw_destroy(&ip->i_rwlock);
1141 		rw_exit(&ip->i_contents);
1142 		rw_destroy(&ip->i_contents);
1143 		kmem_free(addr, size);
1144 		kmem_free(addr1, size1);
1145 		ip->i_marker3 = (uint32_t)0xDDDDDDDD;
1146 		vn_free(vp);
1147 		kmem_free(ip, sizeof (struct ud_inode));
1148 	}
1149 }
1150 
1151 
1152 void
1153 ud_iupdat(struct ud_inode *ip, int32_t waitfor)
1154 {
1155 	uint16_t flag, tag_flags;
1156 	int32_t error, crc_len = 0;
1157 	struct buf *bp;
1158 	struct udf_vfs *udf_vfsp;
1159 	struct file_entry *fe;
1160 
1161 	ASSERT(RW_WRITE_HELD(&ip->i_contents));
1162 
1163 	ud_printf("ud_iupdat\n");
1164 	/*
1165 	 * Return if file system has been forcibly umounted.
1166 	 */
1167 	if (ip->i_udf == NULL) {
1168 		return;
1169 	}
1170 
1171 	udf_vfsp = ip->i_udf;
1172 	flag = ip->i_flag;	/* Atomic read */
1173 	if ((flag & (IUPD|IACC|ICHG|IMOD|IMODACC)) != 0) {
1174 		if (udf_vfsp->udf_flags & UDF_FL_RDONLY) {
1175 			ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD|IMODACC|IATTCHG);
1176 			return;
1177 		}
1178 
1179 		bp = ud_bread(ip->i_dev,
1180 		    ip->i_icb_lbano << udf_vfsp->udf_l2d_shift,
1181 		    ip->i_udf->udf_lbsize);
1182 		if (bp->b_flags & B_ERROR) {
1183 			brelse(bp);
1184 			return;
1185 		}
1186 		fe = (struct file_entry *)bp->b_un.b_addr;
1187 		if (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY,
1188 		    ip->i_icb_block,
1189 		    1, ip->i_udf->udf_lbsize) != 0) {
1190 			brelse(bp);
1191 			return;
1192 		}
1193 
1194 		mutex_enter(&ip->i_tlock);
1195 		if (ip->i_flag & (IUPD|IACC|ICHG)) {
1196 			IMARK(ip);
1197 		}
1198 		ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD|IMODACC);
1199 		mutex_exit(&ip->i_tlock);
1200 
1201 		fe->fe_uid = SWAP_32(ip->i_uid);
1202 		fe->fe_gid = SWAP_32(ip->i_gid);
1203 
1204 		fe->fe_perms = SWAP_32(ip->i_perm);
1205 
1206 		fe->fe_lcount = SWAP_16(ip->i_nlink);
1207 		fe->fe_info_len = SWAP_64(ip->i_size);
1208 		fe->fe_lbr = SWAP_64(ip->i_lbr);
1209 
1210 		ud_utime2dtime(&ip->i_atime, &fe->fe_acc_time);
1211 		ud_utime2dtime(&ip->i_mtime, &fe->fe_mod_time);
1212 		ud_utime2dtime(&ip->i_ctime, &fe->fe_attr_time);
1213 
1214 		if (ip->i_char & ISUID) {
1215 			tag_flags = ICB_FLAG_SETUID;
1216 		} else {
1217 			tag_flags = 0;
1218 		}
1219 		if (ip->i_char & ISGID) {
1220 			tag_flags |= ICB_FLAG_SETGID;
1221 		}
1222 		if (ip->i_char & ISVTX) {
1223 			tag_flags |= ICB_FLAG_STICKY;
1224 		}
1225 		tag_flags |= ip->i_desc_type;
1226 
1227 		/*
1228 		 * Remove the following it is no longer contig
1229 		 * if (ip->i_astrat  == STRAT_TYPE4) {
1230 		 *	tag_flags |= ICB_FLAG_CONTIG;
1231 		 * }
1232 		 */
1233 
1234 		fe->fe_icb_tag.itag_flags &= ~SWAP_16((uint16_t)0x3C3);
1235 		fe->fe_icb_tag.itag_strategy = SWAP_16(ip->i_astrat);
1236 		fe->fe_icb_tag.itag_flags |= SWAP_16(tag_flags);
1237 
1238 		ud_update_regid(&fe->fe_impl_id);
1239 
1240 		crc_len = ((uint32_t)&((struct file_entry *)0)->fe_spec) +
1241 		    SWAP_32(fe->fe_len_ear);
1242 		if (ip->i_desc_type == ICB_FLAG_ONE_AD) {
1243 			crc_len += ip->i_size;
1244 			fe->fe_len_adesc = SWAP_32(((uint32_t)ip->i_size));
1245 		} else if ((ip->i_size != 0) && (ip->i_ext != NULL) &&
1246 		    (ip->i_ext_used != 0)) {
1247 
1248 			if ((error = ud_read_icb_till_off(ip,
1249 			    ip->i_size)) == 0) {
1250 				if (ip->i_astrat == STRAT_TYPE4) {
1251 					error = ud_updat_ext4(ip, fe);
1252 				} else if (ip->i_astrat == STRAT_TYPE4096) {
1253 					error = ud_updat_ext4096(ip, fe);
1254 				}
1255 				if (error) {
1256 					udf_vfsp->udf_mark_bad = 1;
1257 				}
1258 			}
1259 			crc_len += SWAP_32(fe->fe_len_adesc);
1260 		} else {
1261 			fe->fe_len_adesc = 0;
1262 		}
1263 
1264 		/*
1265 		 * Zero out the rest of the block
1266 		 */
1267 		bzero(bp->b_un.b_addr + crc_len,
1268 		    ip->i_udf->udf_lbsize - crc_len);
1269 
1270 		ud_make_tag(ip->i_udf, &fe->fe_tag,
1271 		    UD_FILE_ENTRY, ip->i_icb_block, crc_len);
1272 
1273 
1274 		if (waitfor) {
1275 			BWRITE(bp);
1276 
1277 			/*
1278 			 * Synchronous write has guaranteed that inode
1279 			 * has been written on disk so clear the flag
1280 			 */
1281 			ip->i_flag &= ~(IBDWRITE);
1282 		} else {
1283 			bdwrite(bp);
1284 
1285 			/*
1286 			 * This write hasn't guaranteed that inode has been
1287 			 * written on the disk.
1288 			 * Since, all updat flags on indoe are cleared, we must
1289 			 * remember the condition in case inode is to be updated
1290 			 * synchronously later (e.g.- fsync()/fdatasync())
1291 			 * and inode has not been modified yet.
1292 			 */
1293 			ip->i_flag |= (IBDWRITE);
1294 		}
1295 	} else {
1296 		/*
1297 		 * In case previous inode update was done asynchronously
1298 		 * (IBDWRITE) and this inode update request wants guaranteed
1299 		 * (synchronous) disk update, flush the inode.
1300 		 */
1301 		if (waitfor && (flag & IBDWRITE)) {
1302 			blkflush(ip->i_dev,
1303 			    (daddr_t)fsbtodb(udf_vfsp, ip->i_icb_lbano));
1304 			ip->i_flag &= ~(IBDWRITE);
1305 		}
1306 	}
1307 }
1308 
1309 int32_t
1310 ud_updat_ext4(struct ud_inode *ip, struct file_entry *fe)
1311 {
1312 	uint32_t dummy;
1313 	int32_t elen, ndent, index, count, con_index;
1314 	daddr_t bno;
1315 	struct buf *bp;
1316 	struct short_ad *sad;
1317 	struct long_ad *lad;
1318 	struct icb_ext *iext, *icon;
1319 
1320 
1321 	ASSERT(ip);
1322 	ASSERT(fe);
1323 	ASSERT((ip->i_desc_type == ICB_FLAG_SHORT_AD) ||
1324 	    (ip->i_desc_type == ICB_FLAG_LONG_AD));
1325 
1326 	if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1327 		elen = sizeof (struct short_ad);
1328 		sad = (struct short_ad *)
1329 		    (fe->fe_spec + SWAP_32(fe->fe_len_ear));
1330 	} else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
1331 		elen = sizeof (struct long_ad);
1332 		lad = (struct long_ad *)
1333 		    (fe->fe_spec + SWAP_32(fe->fe_len_ear));
1334 	} else {
1335 		/* This cannot happen return */
1336 		return (EINVAL);
1337 	}
1338 
1339 	ndent = ip->i_max_emb / elen;
1340 
1341 	if (ip->i_ext_used < ndent) {
1342 
1343 		if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1344 			ud_make_sad(ip->i_ext, sad, ip->i_ext_used);
1345 		} else {
1346 			ud_make_lad(ip->i_ext, lad, ip->i_ext_used);
1347 		}
1348 		fe->fe_len_adesc = SWAP_32(ip->i_ext_used * elen);
1349 		con_index = 0;
1350 	} else {
1351 
1352 		con_index = index = 0;
1353 
1354 		while (index < ip->i_ext_used) {
1355 			if (index == 0) {
1356 				/*
1357 				 * bp is already read
1358 				 * First few extents will go
1359 				 * into the file_entry
1360 				 */
1361 				count = ndent - 1;
1362 				fe->fe_len_adesc = SWAP_32(ndent * elen);
1363 				bp = NULL;
1364 
1365 				/*
1366 				 * Last entry to be cont ext
1367 				 */
1368 				icon = &ip->i_con[con_index];
1369 			} else {
1370 				/*
1371 				 * Read the buffer
1372 				 */
1373 				icon = &ip->i_con[con_index];
1374 
1375 				bno = ud_xlate_to_daddr(ip->i_udf,
1376 				    icon->ib_prn, icon->ib_block,
1377 				    icon->ib_count >> ip->i_udf->udf_l2d_shift,
1378 				    &dummy);
1379 				bp = ud_bread(ip->i_dev,
1380 				    bno << ip->i_udf->udf_l2d_shift,
1381 				    ip->i_udf->udf_lbsize);
1382 				if (bp->b_flags & B_ERROR) {
1383 					brelse(bp);
1384 					return (EIO);
1385 				}
1386 
1387 				/*
1388 				 * Figure out how many extents in
1389 				 * this time
1390 				 */
1391 				count = (bp->b_bcount -
1392 				    sizeof (struct alloc_ext_desc)) / elen;
1393 				if (count > (ip->i_ext_used - index)) {
1394 					count = ip->i_ext_used - index;
1395 				} else {
1396 					count --;
1397 				}
1398 				con_index++;
1399 				if (con_index >= ip->i_con_used) {
1400 					icon = NULL;
1401 				} else {
1402 					icon = &ip->i_con[con_index];
1403 				}
1404 			}
1405 
1406 
1407 
1408 			/*
1409 			 * convert to on disk form and
1410 			 * update
1411 			 */
1412 			iext = &ip->i_ext[index];
1413 			if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1414 				if (index != 0) {
1415 					sad = (struct short_ad *)
1416 					    (bp->b_un.b_addr +
1417 					    sizeof (struct alloc_ext_desc));
1418 				}
1419 				ud_make_sad(iext, sad, count);
1420 				sad += count;
1421 				if (icon != NULL) {
1422 					ud_make_sad(icon, sad, 1);
1423 				}
1424 			} else {
1425 				if (index != 0) {
1426 					lad = (struct long_ad *)
1427 					    (bp->b_un.b_addr +
1428 					    sizeof (struct alloc_ext_desc));
1429 				}
1430 				ud_make_lad(iext, lad, count);
1431 				lad += count;
1432 				if (icon != NULL) {
1433 					ud_make_lad(icon, lad, 1);
1434 				}
1435 			}
1436 
1437 			if (con_index != 0) {
1438 				struct alloc_ext_desc *aed;
1439 				int32_t sz;
1440 				struct icb_ext *oicon;
1441 
1442 				oicon = &ip->i_con[con_index - 1];
1443 				sz = count * elen;
1444 				if (icon != NULL) {
1445 					sz += elen;
1446 				}
1447 				aed = (struct alloc_ext_desc *)bp->b_un.b_addr;
1448 				aed->aed_len_aed = SWAP_32(sz);
1449 				if (con_index == 1) {
1450 					aed->aed_rev_ael =
1451 					    SWAP_32(ip->i_icb_block);
1452 				} else {
1453 					aed->aed_rev_ael =
1454 					    SWAP_32(oicon->ib_block);
1455 				}
1456 				sz += sizeof (struct alloc_ext_desc);
1457 				ud_make_tag(ip->i_udf, &aed->aed_tag,
1458 				    UD_ALLOC_EXT_DESC, oicon->ib_block, sz);
1459 			}
1460 
1461 			/*
1462 			 * Write back to disk
1463 			 */
1464 			if (bp != NULL) {
1465 				BWRITE(bp);
1466 			}
1467 			index += count;
1468 		}
1469 
1470 	}
1471 
1472 	if (con_index != ip->i_con_used) {
1473 		int32_t lbmask, l2b, temp;
1474 
1475 		temp = con_index;
1476 		lbmask = ip->i_udf->udf_lbmask;
1477 		l2b = ip->i_udf->udf_l2b_shift;
1478 		/*
1479 		 * Free unused continuation extents
1480 		 */
1481 		for (; con_index < ip->i_con_used; con_index++) {
1482 			icon = &ip->i_con[con_index];
1483 			count = (icon->ib_count + lbmask) >> l2b;
1484 			ud_free_space(ip->i_udf->udf_vfs, icon->ib_prn,
1485 			    icon->ib_block, count);
1486 			count = (count << l2b) - sizeof (struct alloc_ext_desc);
1487 			ip->i_cur_max_ext -= (count / elen) - 1;
1488 		}
1489 		ip->i_con_used = temp;
1490 	}
1491 	return (0);
1492 }
1493 
1494 /* ARGSUSED */
1495 int32_t
1496 ud_updat_ext4096(struct ud_inode *ip, struct file_entry *fe)
1497 {
1498 	return (ENXIO);
1499 }
1500 
1501 void
1502 ud_make_sad(struct icb_ext *iext, struct short_ad *sad, int32_t count)
1503 {
1504 	int32_t index = 0, scount;
1505 
1506 	ASSERT(iext);
1507 	ASSERT(sad);
1508 
1509 	if (count != 0) {
1510 		ASSERT(count > 0);
1511 		while (index < count) {
1512 			scount = (iext->ib_count & 0x3FFFFFFF) |
1513 			    (iext->ib_flags << 30);
1514 			sad->sad_ext_len = SWAP_32(scount);
1515 			sad->sad_ext_loc = SWAP_32(iext->ib_block);
1516 			sad++;
1517 			iext++;
1518 			index++;
1519 		}
1520 	}
1521 }
1522 
1523 void
1524 ud_make_lad(struct icb_ext *iext, struct long_ad *lad, int32_t count)
1525 {
1526 	int32_t index = 0, scount;
1527 
1528 	ASSERT(iext);
1529 	ASSERT(lad);
1530 
1531 	if (count != 0) {
1532 		ASSERT(count > 0);
1533 
1534 		while (index < count) {
1535 			lad->lad_ext_prn = SWAP_16(iext->ib_prn);
1536 			scount = (iext->ib_count & 0x3FFFFFFF) |
1537 			    (iext->ib_flags << 30);
1538 			lad->lad_ext_len = SWAP_32(scount);
1539 			lad->lad_ext_loc = SWAP_32(iext->ib_block);
1540 			lad++;
1541 			iext++;
1542 			index++;
1543 		}
1544 	}
1545 }
1546 
1547 /*
1548  * Truncate the inode ip to at most length size.
1549  * Free affected disk blocks -- the blocks of the
1550  * file are removed in reverse order.
1551  */
1552 /* ARGSUSED */
1553 int
1554 ud_itrunc(struct ud_inode *oip, u_offset_t length,
1555     int32_t flags, struct cred *cr)
1556 {
1557 	int32_t error, boff;
1558 	off_t bsize;
1559 	mode_t mode;
1560 	struct udf_vfs *udf_vfsp;
1561 
1562 	ud_printf("ud_itrunc\n");
1563 
1564 	ASSERT(RW_WRITE_HELD(&oip->i_contents));
1565 	udf_vfsp = oip->i_udf;
1566 	bsize = udf_vfsp->udf_lbsize;
1567 
1568 	/*
1569 	 * We only allow truncation of regular files and directories
1570 	 * to arbritary lengths here.  In addition, we allow symbolic
1571 	 * links to be truncated only to zero length.  Other inode
1572 	 * types cannot have their length set here.
1573 	 */
1574 	mode = oip->i_type;
1575 	if (mode == VFIFO) {
1576 		return (0);
1577 	}
1578 	if ((mode != VREG) && (mode != VDIR) &&
1579 	    (!(mode == VLNK && length == 0))) {
1580 		return (EINVAL);
1581 	}
1582 	if (length == oip->i_size) {
1583 		/* update ctime and mtime to please POSIX tests */
1584 		mutex_enter(&oip->i_tlock);
1585 		oip->i_flag |= ICHG |IUPD;
1586 		mutex_exit(&oip->i_tlock);
1587 		return (0);
1588 	}
1589 
1590 	boff = blkoff(udf_vfsp, length);
1591 
1592 	if (length > oip->i_size) {
1593 		/*
1594 		 * Trunc up case.ud_bmap_write will insure that the right blocks
1595 		 * are allocated.  This includes doing any work needed for
1596 		 * allocating the last block.
1597 		 */
1598 		if (boff == 0) {
1599 			error = ud_bmap_write(oip, length - 1,
1600 			    (int)bsize, 0, cr);
1601 		} else {
1602 			error = ud_bmap_write(oip, length - 1, boff, 0, cr);
1603 		}
1604 		if (error == 0) {
1605 			u_offset_t osize = oip->i_size;
1606 			oip->i_size  = length;
1607 
1608 			/*
1609 			 * Make sure we zero out the remaining bytes of
1610 			 * the page in case a mmap scribbled on it. We
1611 			 * can't prevent a mmap from writing beyond EOF
1612 			 * on the last page of a file.
1613 			 */
1614 			if ((boff = blkoff(udf_vfsp, osize)) != 0) {
1615 				pvn_vpzero(ITOV(oip), osize,
1616 				    (uint32_t)(bsize - boff));
1617 			}
1618 			mutex_enter(&oip->i_tlock);
1619 			oip->i_flag |= ICHG;
1620 			ITIMES_NOLOCK(oip);
1621 			mutex_exit(&oip->i_tlock);
1622 		}
1623 		return (error);
1624 	}
1625 
1626 	/*
1627 	 * Update the pages of the file.  If the file is not being
1628 	 * truncated to a block boundary, the contents of the
1629 	 * pages following the end of the file must be zero'ed
1630 	 * in case it ever become accessable again because
1631 	 * of subsequent file growth.
1632 	 */
1633 	if (boff == 0) {
1634 		(void) pvn_vplist_dirty(ITOV(oip), length,
1635 		    ud_putapage, B_INVAL | B_TRUNC, CRED());
1636 	} else {
1637 		/*
1638 		 * Make sure that the last block is properly allocated.
1639 		 * We only really have to do this if the last block is
1640 		 * actually allocated.  Just to be sure, we do it now
1641 		 * independent of current allocation.
1642 		 */
1643 		error = ud_bmap_write(oip, length - 1, boff, 0, cr);
1644 		if (error) {
1645 			return (error);
1646 		}
1647 
1648 		pvn_vpzero(ITOV(oip), length, (uint32_t)(bsize - boff));
1649 
1650 		(void) pvn_vplist_dirty(ITOV(oip), length,
1651 		    ud_putapage, B_INVAL | B_TRUNC, CRED());
1652 	}
1653 
1654 
1655 	/* Free the blocks */
1656 	if (oip->i_desc_type == ICB_FLAG_ONE_AD) {
1657 		if (length > oip->i_max_emb) {
1658 			return (EFBIG);
1659 		}
1660 		oip->i_size = length;
1661 		mutex_enter(&oip->i_tlock);
1662 		oip->i_flag |= ICHG|IUPD;
1663 		mutex_exit(&oip->i_tlock);
1664 		ud_iupdat(oip, 1);
1665 	} else {
1666 		if ((error = ud_read_icb_till_off(oip, oip->i_size)) != 0) {
1667 			return (error);
1668 		}
1669 
1670 		if (oip->i_astrat == STRAT_TYPE4) {
1671 			ud_trunc_ext4(oip, length);
1672 		} else if (oip->i_astrat == STRAT_TYPE4096) {
1673 			ud_trunc_ext4096(oip, length);
1674 		}
1675 	}
1676 
1677 done:
1678 	return (0);
1679 }
1680 
1681 void
1682 ud_trunc_ext4(struct ud_inode *ip, u_offset_t length)
1683 {
1684 	int32_t index, l2b, count, ecount;
1685 	int32_t elen, ndent, nient;
1686 	u_offset_t ext_beg, ext_end;
1687 	struct icb_ext *iext, *icon;
1688 	int32_t lbmask, ext_used;
1689 	uint32_t loc;
1690 	struct icb_ext text;
1691 	uint32_t con_freed;
1692 
1693 	ASSERT((ip->i_desc_type == ICB_FLAG_SHORT_AD) ||
1694 	    (ip->i_desc_type == ICB_FLAG_LONG_AD));
1695 
1696 	if (ip->i_ext_used == 0) {
1697 		return;
1698 	}
1699 
1700 	ext_used = ip->i_ext_used;
1701 
1702 	lbmask = ip->i_udf->udf_lbmask;
1703 	l2b = ip->i_udf->udf_l2b_shift;
1704 
1705 	ASSERT(ip->i_ext);
1706 
1707 	ip->i_lbr = 0;
1708 	for (index = 0; index < ext_used; index++) {
1709 		iext = &ip->i_ext[index];
1710 
1711 		/*
1712 		 * Find the begining and end
1713 		 * of current extent
1714 		 */
1715 		ext_beg = iext->ib_offset;
1716 		ext_end = iext->ib_offset +
1717 		    ((iext->ib_count + lbmask) & ~lbmask);
1718 
1719 		/*
1720 		 * This is the extent that has offset "length"
1721 		 * make a copy of this extent and
1722 		 * remember the index. We can use
1723 		 * it to free blocks
1724 		 */
1725 		if ((length <= ext_end) && (length >= ext_beg)) {
1726 			text = *iext;
1727 
1728 			iext->ib_count = length - ext_beg;
1729 			ip->i_ext_used = index + 1;
1730 			break;
1731 		}
1732 		if (iext->ib_flags != IB_UN_RE_AL) {
1733 			ip->i_lbr += iext->ib_count >> l2b;
1734 		}
1735 	}
1736 	if (ip->i_ext_used != index) {
1737 		if (iext->ib_flags != IB_UN_RE_AL) {
1738 			ip->i_lbr +=
1739 			    ((iext->ib_count + lbmask) & ~lbmask) >> l2b;
1740 		}
1741 	}
1742 
1743 	ip->i_size = length;
1744 	mutex_enter(&ip->i_tlock);
1745 	ip->i_flag |= ICHG|IUPD;
1746 	mutex_exit(&ip->i_tlock);
1747 	ud_iupdat(ip, 1);
1748 
1749 	/*
1750 	 * Free the unused space
1751 	 */
1752 	if (text.ib_flags != IB_UN_RE_AL) {
1753 		count = (ext_end - length) >> l2b;
1754 		if (count) {
1755 			loc = text.ib_block +
1756 			    (((length - text.ib_offset) + lbmask) >> l2b);
1757 			ud_free_space(ip->i_udf->udf_vfs, text.ib_prn,
1758 			    loc, count);
1759 		}
1760 	}
1761 	for (index = ip->i_ext_used; index < ext_used; index++) {
1762 		iext = &ip->i_ext[index];
1763 		if (iext->ib_flags != IB_UN_RE_AL) {
1764 			count = (iext->ib_count + lbmask) >> l2b;
1765 			ud_free_space(ip->i_udf->udf_vfs, iext->ib_prn,
1766 			    iext->ib_block, count);
1767 		}
1768 		bzero(iext, sizeof (struct icb_ext));
1769 		continue;
1770 	}
1771 
1772 	/*
1773 	 * release any continuation blocks
1774 	 */
1775 	if (ip->i_con) {
1776 
1777 		ASSERT(ip->i_con_count >= ip->i_con_used);
1778 
1779 		/*
1780 		 * Find out how many indirect blocks
1781 		 * are required and release the rest
1782 		 */
1783 		if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1784 			elen = sizeof (struct short_ad);
1785 		} else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
1786 			elen = sizeof (struct long_ad);
1787 		}
1788 		ndent = ip->i_max_emb / elen;
1789 		if (ip->i_ext_used > ndent) {
1790 			ecount = ip->i_ext_used - ndent;
1791 		} else {
1792 			ecount = 0;
1793 		}
1794 		con_freed = 0;
1795 		for (index = 0; index < ip->i_con_used; index++) {
1796 			icon = &ip->i_con[index];
1797 			nient = icon->ib_count -
1798 			    (sizeof (struct alloc_ext_desc) + elen);
1799 			/* Header + 1 indirect extent */
1800 			nient /= elen;
1801 			if (ecount) {
1802 				if (ecount > nient) {
1803 					ecount -= nient;
1804 				} else {
1805 					ecount = 0;
1806 				}
1807 			} else {
1808 				count = ((icon->ib_count + lbmask) &
1809 				    ~lbmask) >> l2b;
1810 				ud_free_space(ip->i_udf->udf_vfs,
1811 				    icon->ib_prn, icon->ib_block, count);
1812 				con_freed++;
1813 				ip->i_cur_max_ext -= nient;
1814 			}
1815 		}
1816 		/*
1817 		 * set the continuation extents used(i_con_used)i to correct
1818 		 * value. It is possible for i_con_used to be zero,
1819 		 * if we free up all continuation extents. This happens
1820 		 * when ecount is 0 before entering the for loop above.
1821 		 */
1822 		ip->i_con_used -= con_freed;
1823 		if (ip->i_con_read > ip->i_con_used) {
1824 			ip->i_con_read = ip->i_con_used;
1825 		}
1826 	}
1827 }
1828 
1829 void
1830 ud_trunc_ext4096(struct ud_inode *ip, u_offset_t length)
1831 {
1832 	/*
1833 	 * Truncate code is the same for
1834 	 * both file of type 4 and 4096
1835 	 */
1836 	ud_trunc_ext4(ip, length);
1837 }
1838 
1839 /*
1840  * Remove any inodes in the inode cache belonging to dev
1841  *
1842  * There should not be any active ones, return error if any are found but
1843  * still invalidate others (N.B.: this is a user error, not a system error).
1844  *
1845  * Also, count the references to dev by block devices - this really
1846  * has nothing to do with the object of the procedure, but as we have
1847  * to scan the inode table here anyway, we might as well get the
1848  * extra benefit.
1849  */
1850 int32_t
1851 ud_iflush(struct vfs *vfsp)
1852 {
1853 	int32_t index, busy = 0;
1854 	union ihead *ih;
1855 	struct udf_vfs *udf_vfsp;
1856 	dev_t dev;
1857 	struct vnode *rvp, *vp;
1858 	struct ud_inode *ip, *next;
1859 
1860 	ud_printf("ud_iflush\n");
1861 	udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
1862 	rvp = udf_vfsp->udf_root;
1863 	dev = vfsp->vfs_dev;
1864 
1865 	mutex_enter(&ud_icache_lock);
1866 	for (index = 0; index < UD_HASH_SZ; index++) {
1867 		ih = &ud_ihead[index];
1868 
1869 		next = ih->ih_chain[0];
1870 		while (next != (struct ud_inode *)ih) {
1871 			ip = next;
1872 			next = ip->i_forw;
1873 			if (ip->i_dev != dev) {
1874 				continue;
1875 			}
1876 			vp = ITOV(ip);
1877 			/*
1878 			 * root inode is processed by the caller
1879 			 */
1880 			if (vp == rvp) {
1881 				if (vp->v_count > 1) {
1882 					busy = -1;
1883 				}
1884 				continue;
1885 			}
1886 			if (ip->i_flag & IREF) {
1887 				/*
1888 				 * Set error indicator for return value,
1889 				 * but continue invalidating other
1890 				 * inodes.
1891 				 */
1892 				busy = -1;
1893 				continue;
1894 			}
1895 
1896 			rw_enter(&ip->i_contents, RW_WRITER);
1897 			remque(ip);
1898 			ip->i_forw = ip;
1899 			ip->i_back = ip;
1900 			/*
1901 			 * Hold the vnode since its not done
1902 			 * in VOP_PUTPAGE anymore.
1903 			 */
1904 			VN_HOLD(vp);
1905 			/*
1906 			 * XXX Synchronous write holding
1907 			 * cache lock
1908 			 */
1909 			(void) ud_syncip(ip, B_INVAL, I_SYNC);
1910 			rw_exit(&ip->i_contents);
1911 			VN_RELE(vp);
1912 		}
1913 	}
1914 	mutex_exit(&ud_icache_lock);
1915 
1916 	return (busy);
1917 }
1918 
1919 
1920 /*
1921  * Check mode permission on inode.  Mode is READ, WRITE or EXEC.
1922  * In the case of WRITE, the read-only status of the file system
1923  * is checked.  The applicable mode bits are compared with the
1924  * requested form of access.  If bits are missing, the secpolicy
1925  * function will check for privileges.
1926  */
1927 int
1928 ud_iaccess(struct ud_inode *ip, int32_t mode, struct cred *cr, int dolock)
1929 {
1930 	int shift = 0;
1931 	int ret = 0;
1932 
1933 	if (dolock)
1934 		rw_enter(&ip->i_contents, RW_READER);
1935 	ASSERT(RW_LOCK_HELD(&ip->i_contents));
1936 
1937 	ud_printf("ud_iaccess\n");
1938 	if (mode & IWRITE) {
1939 		/*
1940 		 * Disallow write attempts on read-only
1941 		 * file systems, unless the file is a block
1942 		 * or character device or a FIFO.
1943 		 */
1944 		if (ip->i_udf->udf_flags & UDF_FL_RDONLY) {
1945 			if ((ip->i_type != VCHR) &&
1946 			    (ip->i_type != VBLK) &&
1947 			    (ip->i_type != VFIFO)) {
1948 				ret = EROFS;
1949 				goto out;
1950 			}
1951 		}
1952 	}
1953 
1954 	/*
1955 	 * Access check is based on only
1956 	 * one of owner, group, public.
1957 	 * If not owner, then check group.
1958 	 * If not a member of the group, then
1959 	 * check public access.
1960 	 */
1961 	if (crgetuid(cr) != ip->i_uid) {
1962 		shift += 5;
1963 		if (!groupmember((uid_t)ip->i_gid, cr))
1964 			shift += 5;
1965 	}
1966 	mode &= ~(ip->i_perm << shift);
1967 
1968 	if (mode == 0)
1969 		goto out;
1970 
1971 	ret = secpolicy_vnode_access(cr, ITOV(ip), ip->i_uid,
1972 	    UD2VA_PERM(mode));
1973 
1974 out:
1975 	if (dolock)
1976 		rw_exit(&ip->i_contents);
1977 	return (ret);
1978 }
1979 
1980 void
1981 ud_imark(struct ud_inode *ip)
1982 {
1983 	timestruc_t	now;
1984 
1985 	gethrestime(&now);
1986 	ud_printf("ud_imark\n");
1987 	if (ip->i_flag & IACC) {
1988 		ip->i_atime.tv_sec = now.tv_sec;
1989 		ip->i_atime.tv_nsec = now.tv_nsec;
1990 	}
1991 	if (ip->i_flag & IUPD) {
1992 		ip->i_mtime.tv_sec = now.tv_sec;
1993 		ip->i_mtime.tv_nsec = now.tv_nsec;
1994 		ip->i_flag |= IMODTIME;
1995 	}
1996 	if (ip->i_flag & ICHG) {
1997 		ip->i_diroff = 0;
1998 		ip->i_ctime.tv_sec = now.tv_sec;
1999 		ip->i_ctime.tv_nsec = now.tv_nsec;
2000 	}
2001 }
2002 
2003 
2004 void
2005 ud_itimes_nolock(struct ud_inode *ip)
2006 {
2007 	ud_printf("ud_itimes_nolock\n");
2008 
2009 	if (ip->i_flag & (IUPD|IACC|ICHG)) {
2010 		if (ip->i_flag & ICHG) {
2011 			ip->i_flag |= IMOD;
2012 		} else {
2013 			ip->i_flag |= IMODACC;
2014 		}
2015 		ud_imark(ip);
2016 		ip->i_flag &= ~(IACC|IUPD|ICHG);
2017 	}
2018 }
2019 
2020 void
2021 ud_delcache(struct ud_inode *ip)
2022 {
2023 	ud_printf("ud_delcache\n");
2024 
2025 	mutex_enter(&ud_icache_lock);
2026 	remque(ip);
2027 	ip->i_forw = ip;
2028 	ip->i_back = ip;
2029 	mutex_exit(&ud_icache_lock);
2030 }
2031 
2032 void
2033 ud_idrop(struct ud_inode *ip)
2034 {
2035 	struct vnode *vp = ITOV(ip);
2036 
2037 	ASSERT(RW_WRITE_HELD(&ip->i_contents));
2038 
2039 	ud_printf("ud_idrop\n");
2040 
2041 	mutex_enter(&vp->v_lock);
2042 	if (vp->v_count > 1) {
2043 		vp->v_count--;
2044 		mutex_exit(&vp->v_lock);
2045 		return;
2046 	}
2047 	vp->v_count = 0;
2048 	mutex_exit(&vp->v_lock);
2049 
2050 
2051 	/*
2052 	 *  if inode is invalid or there is no page associated with
2053 	 *  this inode, put the inode in the front of the free list
2054 	 */
2055 	mutex_enter(&ip->i_tlock);
2056 	mutex_enter(&udf_ifree_lock);
2057 	if (!vn_has_cached_data(vp) || ip->i_perm == 0) {
2058 		ud_add_to_free_list(ip, UD_BEGIN);
2059 	} else {
2060 		/*
2061 		 * Otherwise, put the inode back on the end of the free list.
2062 		 */
2063 		ud_add_to_free_list(ip, UD_END);
2064 	}
2065 	mutex_exit(&udf_ifree_lock);
2066 	ip->i_flag &= IMODTIME;
2067 	mutex_exit(&ip->i_tlock);
2068 }
2069 
2070 void
2071 ud_add_to_free_list(struct ud_inode *ip, uint32_t at)
2072 {
2073 	ASSERT(ip);
2074 	ASSERT(mutex_owned(&udf_ifree_lock));
2075 
2076 #ifdef	DEBUG
2077 	/* Search if the element is already in the list */
2078 	if (udf_ifreeh != NULL) {
2079 		struct ud_inode *iq;
2080 
2081 		iq = udf_ifreeh;
2082 		while (iq) {
2083 			if (iq == ip) {
2084 				cmn_err(CE_WARN, "Duplicate %p\n", (void *)ip);
2085 			}
2086 			iq = iq->i_freef;
2087 		}
2088 	}
2089 #endif
2090 
2091 	ip->i_freef = NULL;
2092 	ip->i_freeb = NULL;
2093 	if (udf_ifreeh == NULL) {
2094 		/*
2095 		 * Nothing on the list just add it
2096 		 */
2097 		udf_ifreeh = ip;
2098 		udf_ifreet = ip;
2099 	} else {
2100 		if (at == UD_BEGIN) {
2101 			/*
2102 			 * Add at the begining of the list
2103 			 */
2104 			ip->i_freef = udf_ifreeh;
2105 			udf_ifreeh->i_freeb = ip;
2106 			udf_ifreeh = ip;
2107 		} else {
2108 			/*
2109 			 * Add at the end of the list
2110 			 */
2111 			ip->i_freeb = udf_ifreet;
2112 			udf_ifreet->i_freef = ip;
2113 			udf_ifreet = ip;
2114 		}
2115 	}
2116 }
2117 
2118 void
2119 ud_remove_from_free_list(struct ud_inode *ip, uint32_t at)
2120 {
2121 	ASSERT(ip);
2122 	ASSERT(mutex_owned(&udf_ifree_lock));
2123 
2124 #ifdef	DEBUG
2125 	{
2126 		struct ud_inode *iq;
2127 		uint32_t found = 0;
2128 
2129 		iq = udf_ifreeh;
2130 		while (iq) {
2131 			if (iq == ip) {
2132 				found++;
2133 			}
2134 			iq = iq->i_freef;
2135 		}
2136 		if (found != 1) {
2137 			cmn_err(CE_WARN, "ip %p is found %x times\n",
2138 			    (void *)ip,  found);
2139 		}
2140 	}
2141 #endif
2142 
2143 	if ((ip->i_freef == NULL) && (ip->i_freeb == NULL)) {
2144 		if (ip != udf_ifreeh) {
2145 			return;
2146 		}
2147 	}
2148 
2149 	if ((at == UD_BEGIN) || (ip == udf_ifreeh)) {
2150 		udf_ifreeh = ip->i_freef;
2151 		if (ip->i_freef == NULL) {
2152 			udf_ifreet = NULL;
2153 		} else {
2154 			udf_ifreeh->i_freeb = NULL;
2155 		}
2156 	} else {
2157 		ip->i_freeb->i_freef = ip->i_freef;
2158 		if (ip->i_freef) {
2159 			ip->i_freef->i_freeb = ip->i_freeb;
2160 		} else {
2161 			udf_ifreet = ip->i_freeb;
2162 		}
2163 	}
2164 	ip->i_freef = NULL;
2165 	ip->i_freeb = NULL;
2166 }
2167 
2168 void
2169 ud_init_inodes(void)
2170 {
2171 	union ihead *ih = ud_ihead;
2172 	int index;
2173 
2174 #ifndef	__lint
2175 	_NOTE(NO_COMPETING_THREADS_NOW);
2176 #endif
2177 	for (index = 0; index < UD_HASH_SZ; index++, ih++) {
2178 		ih->ih_head[0] = ih;
2179 		ih->ih_head[1] = ih;
2180 	}
2181 	mutex_init(&ud_icache_lock, NULL, MUTEX_DEFAULT, NULL);
2182 	mutex_init(&ud_nino_lock, NULL, MUTEX_DEFAULT, NULL);
2183 
2184 	udf_ifreeh = NULL;
2185 	udf_ifreet = NULL;
2186 	mutex_init(&udf_ifree_lock, NULL, MUTEX_DEFAULT, NULL);
2187 
2188 	mutex_init(&ud_sync_busy, NULL, MUTEX_DEFAULT, NULL);
2189 	udf_vfs_instances = NULL;
2190 	mutex_init(&udf_vfs_mutex, NULL, MUTEX_DEFAULT, NULL);
2191 
2192 #ifndef	__lint
2193 	_NOTE(COMPETING_THREADS_NOW);
2194 #endif
2195 }
2196