1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 #include <sys/types.h>
26 #include <sys/t_lock.h>
27 #include <sys/param.h>
28 #include <sys/time.h>
29 #include <sys/systm.h>
30 #include <sys/sysmacros.h>
31 #include <sys/resource.h>
32 #include <sys/signal.h>
33 #include <sys/cred.h>
34 #include <sys/user.h>
35 #include <sys/buf.h>
36 #include <sys/vfs.h>
37 #include <sys/stat.h>
38 #include <sys/vnode.h>
39 #include <sys/mode.h>
40 #include <sys/proc.h>
41 #include <sys/disp.h>
42 #include <sys/file.h>
43 #include <sys/fcntl.h>
44 #include <sys/flock.h>
45 #include <sys/kmem.h>
46 #include <sys/uio.h>
47 #include <sys/dnlc.h>
48 #include <sys/conf.h>
49 #include <sys/errno.h>
50 #include <sys/mman.h>
51 #include <sys/fbuf.h>
52 #include <sys/pathname.h>
53 #include <sys/debug.h>
54 #include <sys/vmsystm.h>
55 #include <sys/cmn_err.h>
56 #include <sys/dirent.h>
57 #include <sys/errno.h>
58 #include <sys/modctl.h>
59 #include <sys/statvfs.h>
60 #include <sys/mount.h>
61 #include <sys/sunddi.h>
62 #include <sys/bootconf.h>
63 #include <sys/policy.h>
64
65 #include <vm/hat.h>
66 #include <vm/page.h>
67 #include <vm/pvn.h>
68 #include <vm/as.h>
69 #include <vm/seg.h>
70 #include <vm/seg_map.h>
71 #include <vm/seg_kmem.h>
72 #include <vm/seg_vn.h>
73 #include <vm/rm.h>
74 #include <vm/page.h>
75 #include <sys/swap.h>
76
77
78 #include <fs/fs_subr.h>
79
80
81 #include <sys/fs/udf_volume.h>
82 #include <sys/fs/udf_inode.h>
83
84 extern struct vnodeops *udf_vnodeops;
85
86 kmutex_t ud_sync_busy;
87 /*
88 * udf_vfs list manipulation routines
89 */
90 kmutex_t udf_vfs_mutex;
91 struct udf_vfs *udf_vfs_instances;
92 #ifndef __lint
93 _NOTE(MUTEX_PROTECTS_DATA(udf_vfs_mutex, udf_vfs_instances))
94 #endif
95
96 union ihead ud_ihead[UD_HASH_SZ];
97 kmutex_t ud_icache_lock;
98
99 #define UD_BEGIN 0x0
100 #define UD_END 0x1
101 #define UD_UNKN 0x2
102 struct ud_inode *udf_ifreeh, *udf_ifreet;
103 kmutex_t udf_ifree_lock;
104 #ifndef __lint
105 _NOTE(MUTEX_PROTECTS_DATA(udf_ifree_lock, udf_ifreeh))
106 _NOTE(MUTEX_PROTECTS_DATA(udf_ifree_lock, udf_ifreet))
107 #endif
108
109 kmutex_t ud_nino_lock;
110 int32_t ud_max_inodes = 512;
111 int32_t ud_cur_inodes = 0;
112 #ifndef __lint
113 _NOTE(MUTEX_PROTECTS_DATA(ud_nino_lock, ud_cur_inodes))
114 #endif
115
116 uid_t ud_default_uid = 0;
117 gid_t ud_default_gid = 3;
118
119 int32_t ud_updat_ext4(struct ud_inode *, struct file_entry *);
120 int32_t ud_updat_ext4096(struct ud_inode *, struct file_entry *);
121 void ud_make_sad(struct icb_ext *, struct short_ad *, int32_t);
122 void ud_make_lad(struct icb_ext *, struct long_ad *, int32_t);
123 void ud_trunc_ext4(struct ud_inode *, u_offset_t);
124 void ud_trunc_ext4096(struct ud_inode *, u_offset_t);
125 void ud_add_to_free_list(struct ud_inode *, uint32_t);
126 void ud_remove_from_free_list(struct ud_inode *, uint32_t);
127
128
129 #ifdef DEBUG
130 struct ud_inode *
ud_search_icache(struct vfs * vfsp,uint16_t prn,uint32_t ploc)131 ud_search_icache(struct vfs *vfsp, uint16_t prn, uint32_t ploc)
132 {
133 int32_t hno;
134 union ihead *ih;
135 struct ud_inode *ip;
136 struct udf_vfs *udf_vfsp;
137 uint32_t loc, dummy;
138
139 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
140 loc = ud_xlate_to_daddr(udf_vfsp, prn, ploc, 1, &dummy);
141
142 mutex_enter(&ud_icache_lock);
143 hno = UD_INOHASH(vfsp->vfs_dev, loc);
144 ih = &ud_ihead[hno];
145 for (ip = ih->ih_chain[0];
146 ip != (struct ud_inode *)ih;
147 ip = ip->i_forw) {
148 if ((prn == ip->i_icb_prn) && (ploc == ip->i_icb_block) &&
149 (vfsp->vfs_dev == ip->i_dev)) {
150 mutex_exit(&ud_icache_lock);
151 return (ip);
152 }
153 }
154 mutex_exit(&ud_icache_lock);
155 return (0);
156 }
157 #endif
158
159 /* ARGSUSED */
160 int
ud_iget(struct vfs * vfsp,uint16_t prn,uint32_t ploc,struct ud_inode ** ipp,struct buf * pbp,struct cred * cred)161 ud_iget(struct vfs *vfsp, uint16_t prn, uint32_t ploc,
162 struct ud_inode **ipp, struct buf *pbp, struct cred *cred)
163 {
164 int32_t hno, nomem = 0, icb_tag_flags;
165 union ihead *ih;
166 struct ud_inode *ip;
167 struct vnode *vp;
168 struct buf *bp = NULL;
169 struct file_entry *fe;
170 struct udf_vfs *udf_vfsp;
171 struct ext_attr_hdr *eah;
172 struct attr_hdr *ah;
173 int32_t ea_len, ea_off;
174 daddr_t loc;
175 uint64_t offset = 0;
176 struct icb_ext *iext, *con;
177 uint32_t length, dummy;
178 int32_t ndesc, ftype;
179 uint16_t old_prn;
180 uint32_t old_block, old_lbano;
181
182 ud_printf("ud_iget\n");
183 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
184 old_prn = 0;
185 old_block = old_lbano = 0;
186 ftype = 0;
187 loc = ud_xlate_to_daddr(udf_vfsp, prn, ploc, 1, &dummy);
188 loop:
189 mutex_enter(&ud_icache_lock);
190 hno = UD_INOHASH(vfsp->vfs_dev, loc);
191
192 ih = &ud_ihead[hno];
193 for (ip = ih->ih_chain[0];
194 ip != (struct ud_inode *)ih;
195 ip = ip->i_forw) {
196
197 if ((prn == ip->i_icb_prn) &&
198 (ploc == ip->i_icb_block) &&
199 (vfsp->vfs_dev == ip->i_dev)) {
200
201 vp = ITOV(ip);
202 VN_HOLD(vp);
203 mutex_exit(&ud_icache_lock);
204
205 rw_enter(&ip->i_contents, RW_READER);
206 mutex_enter(&ip->i_tlock);
207 if ((ip->i_flag & IREF) == 0) {
208 mutex_enter(&udf_ifree_lock);
209 ud_remove_from_free_list(ip, UD_UNKN);
210 mutex_exit(&udf_ifree_lock);
211 }
212 ip->i_flag |= IREF;
213 mutex_exit(&ip->i_tlock);
214 rw_exit(&ip->i_contents);
215
216 *ipp = ip;
217
218 if (pbp != NULL) {
219 brelse(pbp);
220 }
221
222 return (0);
223 }
224 }
225
226 /*
227 * We don't have it in the cache
228 * Allocate a new entry
229 */
230 tryagain:
231 mutex_enter(&udf_ifree_lock);
232 mutex_enter(&ud_nino_lock);
233 if (ud_cur_inodes > ud_max_inodes) {
234 int32_t purged;
235
236 mutex_exit(&ud_nino_lock);
237 while (udf_ifreeh == NULL ||
238 vn_has_cached_data(ITOV(udf_ifreeh))) {
239 /*
240 * Try to put an inode on the freelist that's
241 * sitting in the dnlc.
242 */
243 mutex_exit(&udf_ifree_lock);
244 purged = dnlc_fs_purge1(udf_vnodeops);
245 mutex_enter(&udf_ifree_lock);
246 if (!purged) {
247 break;
248 }
249 }
250 mutex_enter(&ud_nino_lock);
251 }
252
253 /*
254 * If there's a free one available and it has no pages attached
255 * take it. If we're over the high water mark, take it even if
256 * it has attached pages. Otherwise, make a new one.
257 */
258 if (udf_ifreeh &&
259 (nomem || !vn_has_cached_data(ITOV(udf_ifreeh)) ||
260 ud_cur_inodes >= ud_max_inodes)) {
261
262 mutex_exit(&ud_nino_lock);
263 ip = udf_ifreeh;
264 vp = ITOV(ip);
265
266 ud_remove_from_free_list(ip, UD_BEGIN);
267
268 mutex_exit(&udf_ifree_lock);
269 if (ip->i_flag & IREF) {
270 cmn_err(CE_WARN, "ud_iget: bad i_flag\n");
271 mutex_exit(&ud_icache_lock);
272 if (pbp != NULL) {
273 brelse(pbp);
274 }
275 return (EINVAL);
276 }
277 rw_enter(&ip->i_contents, RW_WRITER);
278
279 /*
280 * We call udf_syncip() to synchronously destroy all pages
281 * associated with the vnode before re-using it. The pageout
282 * thread may have beat us to this page so our v_count can
283 * be > 0 at this point even though we are on the freelist.
284 */
285 mutex_enter(&ip->i_tlock);
286 ip->i_flag = (ip->i_flag & IMODTIME) | IREF;
287 mutex_exit(&ip->i_tlock);
288
289 VN_HOLD(vp);
290 if (ud_syncip(ip, B_INVAL, I_SYNC) != 0) {
291 ud_idrop(ip);
292 rw_exit(&ip->i_contents);
293 mutex_exit(&ud_icache_lock);
294 goto loop;
295 }
296
297 mutex_enter(&ip->i_tlock);
298 ip->i_flag &= ~IMODTIME;
299 mutex_exit(&ip->i_tlock);
300
301 if (ip->i_ext) {
302 kmem_free(ip->i_ext,
303 sizeof (struct icb_ext) * ip->i_ext_count);
304 ip->i_ext = 0;
305 ip->i_ext_count = ip->i_ext_used = 0;
306 }
307
308 if (ip->i_con) {
309 kmem_free(ip->i_con,
310 sizeof (struct icb_ext) * ip->i_con_count);
311 ip->i_con = 0;
312 ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
313 }
314
315 /*
316 * The pageout thread may not have had a chance to release
317 * its hold on the vnode (if it was active with this vp),
318 * but the pages should all be invalidated.
319 */
320 } else {
321 mutex_exit(&ud_nino_lock);
322 mutex_exit(&udf_ifree_lock);
323 /*
324 * Try to get memory for this inode without blocking.
325 * If we can't and there is something on the freelist,
326 * go ahead and use it, otherwise block waiting for
327 * memory holding the hash_lock. We expose a potential
328 * deadlock if all users of memory have to do a ud_iget()
329 * before releasing memory.
330 */
331 ip = (struct ud_inode *)kmem_zalloc(sizeof (struct ud_inode),
332 KM_NOSLEEP);
333 vp = vn_alloc(KM_NOSLEEP);
334 if ((ip == NULL) || (vp == NULL)) {
335 mutex_enter(&udf_ifree_lock);
336 if (udf_ifreeh) {
337 mutex_exit(&udf_ifree_lock);
338 if (ip != NULL)
339 kmem_free(ip, sizeof (struct ud_inode));
340 if (vp != NULL)
341 vn_free(vp);
342 nomem = 1;
343 goto tryagain;
344 } else {
345 mutex_exit(&udf_ifree_lock);
346 if (ip == NULL)
347 ip = (struct ud_inode *)
348 kmem_zalloc(
349 sizeof (struct ud_inode),
350 KM_SLEEP);
351 if (vp == NULL)
352 vp = vn_alloc(KM_SLEEP);
353 }
354 }
355 ip->i_vnode = vp;
356
357 ip->i_marker1 = (uint32_t)0xAAAAAAAA;
358 ip->i_marker2 = (uint32_t)0xBBBBBBBB;
359 ip->i_marker3 = (uint32_t)0xCCCCCCCC;
360
361 rw_init(&ip->i_rwlock, NULL, RW_DEFAULT, NULL);
362 rw_init(&ip->i_contents, NULL, RW_DEFAULT, NULL);
363 mutex_init(&ip->i_tlock, NULL, MUTEX_DEFAULT, NULL);
364
365 ip->i_forw = ip;
366 ip->i_back = ip;
367 vp->v_data = (caddr_t)ip;
368 vn_setops(vp, udf_vnodeops);
369 ip->i_flag = IREF;
370 cv_init(&ip->i_wrcv, NULL, CV_DRIVER, NULL);
371 mutex_enter(&ud_nino_lock);
372 ud_cur_inodes++;
373 mutex_exit(&ud_nino_lock);
374
375 rw_enter(&ip->i_contents, RW_WRITER);
376 }
377
378 if (vp->v_count < 1) {
379 cmn_err(CE_WARN, "ud_iget: v_count < 1\n");
380 mutex_exit(&ud_icache_lock);
381 rw_exit(&ip->i_contents);
382 if (pbp != NULL) {
383 brelse(pbp);
384 }
385 return (EINVAL);
386 }
387 if (vn_has_cached_data(vp)) {
388 cmn_err(CE_WARN, "ud_iget: v_pages not NULL\n");
389 mutex_exit(&ud_icache_lock);
390 rw_exit(&ip->i_contents);
391 if (pbp != NULL) {
392 brelse(pbp);
393 }
394 return (EINVAL);
395 }
396
397 /*
398 * Move the inode on the chain for its new (ino, dev) pair
399 */
400 remque(ip);
401 ip->i_forw = ip;
402 ip->i_back = ip;
403 insque(ip, ih);
404
405 ip->i_dev = vfsp->vfs_dev;
406 ip->i_udf = udf_vfsp;
407 ip->i_diroff = 0;
408 ip->i_devvp = ip->i_udf->udf_devvp;
409 ip->i_icb_prn = prn;
410 ip->i_icb_block = ploc;
411 ip->i_icb_lbano = loc;
412 ip->i_nextr = 0;
413 ip->i_seq = 0;
414 mutex_exit(&ud_icache_lock);
415
416 read_de:
417 if (pbp != NULL) {
418 /*
419 * assumption is that we will not
420 * create a 4096 file
421 */
422 bp = pbp;
423 } else {
424 bp = ud_bread(ip->i_dev,
425 ip->i_icb_lbano << udf_vfsp->udf_l2d_shift,
426 udf_vfsp->udf_lbsize);
427 }
428
429 /*
430 * Check I/O errors
431 */
432 fe = (struct file_entry *)bp->b_un.b_addr;
433 if ((bp->b_flags & B_ERROR) ||
434 (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY,
435 ip->i_icb_block, 1, udf_vfsp->udf_lbsize) != 0)) {
436
437 if (((bp->b_flags & B_ERROR) == 0) &&
438 (ftype == STRAT_TYPE4096)) {
439 if (ud_check_te_unrec(udf_vfsp,
440 bp->b_un.b_addr, ip->i_icb_block) == 0) {
441
442 brelse(bp);
443
444 /*
445 * restore old file entry location
446 */
447 ip->i_icb_prn = old_prn;
448 ip->i_icb_block = old_block;
449 ip->i_icb_lbano = old_lbano;
450
451 /*
452 * reread old file entry
453 */
454 bp = ud_bread(ip->i_dev,
455 old_lbano << udf_vfsp->udf_l2d_shift,
456 udf_vfsp->udf_lbsize);
457 if ((bp->b_flags & B_ERROR) == 0) {
458 fe = (struct file_entry *)
459 bp->b_un.b_addr;
460 if (ud_verify_tag_and_desc(&fe->fe_tag,
461 UD_FILE_ENTRY, ip->i_icb_block, 1,
462 udf_vfsp->udf_lbsize) == 0) {
463 goto end_4096;
464 }
465 }
466 }
467 }
468 error_ret:
469 brelse(bp);
470 /*
471 * The inode may not contain anything useful. Mark it as
472 * having an error and let anyone else who was waiting for
473 * this know there was an error. Callers waiting for
474 * access to this inode in ud_iget will find
475 * the i_icb_lbano == 0, so there won't be a match.
476 * It remains in the cache. Put it back on the freelist.
477 */
478 mutex_enter(&vp->v_lock);
479 vp->v_count--;
480 mutex_exit(&vp->v_lock);
481 ip->i_icb_lbano = 0;
482
483 /*
484 * The folowing two lines make
485 * it impossible for any one do
486 * a VN_HOLD and then a VN_RELE
487 * so avoiding a ud_iinactive
488 */
489 ip->i_icb_prn = 0xffff;
490 ip->i_icb_block = 0;
491
492 /*
493 * remove the bad inode from hash chains
494 * so that during unmount we will not
495 * go through this inode
496 */
497 mutex_enter(&ud_icache_lock);
498 remque(ip);
499 ip->i_forw = ip;
500 ip->i_back = ip;
501 mutex_exit(&ud_icache_lock);
502
503 /* Put the inode at the front of the freelist */
504 mutex_enter(&ip->i_tlock);
505 mutex_enter(&udf_ifree_lock);
506 ud_add_to_free_list(ip, UD_BEGIN);
507 mutex_exit(&udf_ifree_lock);
508 ip->i_flag = 0;
509 mutex_exit(&ip->i_tlock);
510 rw_exit(&ip->i_contents);
511 return (EIO);
512 }
513
514 if (fe->fe_icb_tag.itag_strategy == SWAP_16(STRAT_TYPE4096)) {
515 struct buf *ibp = NULL;
516 struct indirect_entry *ie;
517
518 /*
519 * save old file_entry location
520 */
521 old_prn = ip->i_icb_prn;
522 old_block = ip->i_icb_block;
523 old_lbano = ip->i_icb_lbano;
524
525 ftype = STRAT_TYPE4096;
526
527 /*
528 * If astrat is 4096 different versions
529 * of the file exist on the media.
530 * we are supposed to get to the latest
531 * version of the file
532 */
533
534 /*
535 * IE is supposed to be in the next block
536 * of DE
537 */
538 ibp = ud_bread(ip->i_dev,
539 (ip->i_icb_lbano + 1) << udf_vfsp->udf_l2d_shift,
540 udf_vfsp->udf_lbsize);
541 if (ibp->b_flags & B_ERROR) {
542 /*
543 * Get rid of current ibp and
544 * then goto error on DE's bp
545 */
546 ie_error:
547 brelse(ibp);
548 goto error_ret;
549 }
550
551 ie = (struct indirect_entry *)ibp->b_un.b_addr;
552 if (ud_verify_tag_and_desc(&ie->ie_tag,
553 UD_INDIRECT_ENT, ip->i_icb_block + 1,
554 1, udf_vfsp->udf_lbsize) == 0) {
555 struct long_ad *lad;
556
557 lad = &ie->ie_indirecticb;
558 ip->i_icb_prn = SWAP_16(lad->lad_ext_prn);
559 ip->i_icb_block = SWAP_32(lad->lad_ext_loc);
560 ip->i_icb_lbano = ud_xlate_to_daddr(udf_vfsp,
561 ip->i_icb_prn, ip->i_icb_block,
562 1, &dummy);
563 brelse(ibp);
564 brelse(bp);
565 goto read_de;
566 }
567
568 /*
569 * If this block is TE or unrecorded we
570 * are at the last entry
571 */
572 if (ud_check_te_unrec(udf_vfsp, ibp->b_un.b_addr,
573 ip->i_icb_block + 1) != 0) {
574 /*
575 * This is not an unrecorded block
576 * Check if it a valid IE and
577 * get the address of DE that
578 * this IE points to
579 */
580 goto ie_error;
581 }
582 /*
583 * If ud_check_unrec returns "0"
584 * this is the last in the chain
585 * Latest file_entry
586 */
587 brelse(ibp);
588 }
589
590 end_4096:
591
592 ip->i_uid = SWAP_32(fe->fe_uid);
593 if (ip->i_uid == -1) {
594 ip->i_uid = ud_default_uid;
595 }
596 ip->i_gid = SWAP_32(fe->fe_gid);
597 if (ip->i_gid == -1) {
598 ip->i_gid = ud_default_gid;
599 }
600 ip->i_perm = SWAP_32(fe->fe_perms) & 0xFFFF;
601 if (fe->fe_icb_tag.itag_strategy == SWAP_16(STRAT_TYPE4096)) {
602 ip->i_perm &= ~(IWRITE | (IWRITE >> 5) | (IWRITE >> 10));
603 }
604
605 ip->i_nlink = SWAP_16(fe->fe_lcount);
606 ip->i_size = SWAP_64(fe->fe_info_len);
607 ip->i_lbr = SWAP_64(fe->fe_lbr);
608
609 ud_dtime2utime(&ip->i_atime, &fe->fe_acc_time);
610 ud_dtime2utime(&ip->i_mtime, &fe->fe_mod_time);
611 ud_dtime2utime(&ip->i_ctime, &fe->fe_attr_time);
612
613
614 ip->i_uniqid = SWAP_64(fe->fe_uniq_id);
615 icb_tag_flags = SWAP_16(fe->fe_icb_tag.itag_flags);
616
617 if ((fe->fe_icb_tag.itag_ftype == FTYPE_CHAR_DEV) ||
618 (fe->fe_icb_tag.itag_ftype == FTYPE_BLOCK_DEV)) {
619
620 eah = (struct ext_attr_hdr *)fe->fe_spec;
621 ea_off = GET_32(&eah->eah_ial);
622 ea_len = GET_32(&fe->fe_len_ear);
623 if (ea_len && (ud_verify_tag_and_desc(&eah->eah_tag,
624 UD_EXT_ATTR_HDR, ip->i_icb_block, 1,
625 sizeof (struct file_entry) -
626 offsetof(struct file_entry, fe_spec)) == 0)) {
627
628 while (ea_off < ea_len) {
629 /*
630 * We now check the validity of ea_off.
631 * (ea_len - ea_off) should be large enough to
632 * hold the attribute header atleast.
633 */
634 if ((ea_len - ea_off) <
635 sizeof (struct attr_hdr)) {
636 cmn_err(CE_NOTE,
637 "ea_len(0x%x) - ea_off(0x%x) is "
638 "too small to hold attr. info. "
639 "blockno 0x%x\n",
640 ea_len, ea_off, ip->i_icb_block);
641 goto error_ret;
642 }
643 ah = (struct attr_hdr *)&fe->fe_spec[ea_off];
644
645 /*
646 * Device Specification EA
647 */
648 if ((GET_32(&ah->ahdr_atype) == 12) &&
649 (ah->ahdr_astype == 1)) {
650 struct dev_spec_ear *ds;
651
652 if ((ea_len - ea_off) <
653 sizeof (struct dev_spec_ear)) {
654 cmn_err(CE_NOTE,
655 "ea_len(0x%x) - "
656 "ea_off(0x%x) is too small "
657 "to hold dev_spec_ear."
658 " blockno 0x%x\n",
659 ea_len, ea_off,
660 ip->i_icb_block);
661 goto error_ret;
662 }
663 ds = (struct dev_spec_ear *)ah;
664 ip->i_major = GET_32(&ds->ds_major_id);
665 ip->i_minor = GET_32(&ds->ds_minor_id);
666 }
667
668 /*
669 * Impl Use EA
670 */
671 if ((GET_32(&ah->ahdr_atype) == 2048) &&
672 (ah->ahdr_astype == 1)) {
673 struct iu_ea *iuea;
674 struct copy_mgt_info *cmi;
675
676 if ((ea_len - ea_off) <
677 sizeof (struct iu_ea)) {
678 cmn_err(CE_NOTE,
679 "ea_len(0x%x) - ea_off(0x%x) is too small to hold iu_ea. blockno 0x%x\n",
680 ea_len, ea_off,
681 ip->i_icb_block);
682 goto error_ret;
683 }
684 iuea = (struct iu_ea *)ah;
685 if (strncmp(iuea->iuea_ii.reg_id,
686 UDF_FREEEASPACE,
687 sizeof (iuea->iuea_ii.reg_id))
688 == 0) {
689 /* skip it */
690 iuea = iuea;
691 } else if (strncmp(iuea->iuea_ii.reg_id,
692 UDF_CGMS_INFO,
693 sizeof (iuea->iuea_ii.reg_id))
694 == 0) {
695 cmi = (struct copy_mgt_info *)
696 iuea->iuea_iu;
697 cmi = cmi;
698 }
699 }
700 /* ??? PARANOIA */
701 if (GET_32(&ah->ahdr_length) == 0) {
702 break;
703 }
704 ea_off += GET_32(&ah->ahdr_length);
705 }
706 }
707 }
708
709 ip->i_nextr = 0;
710
711 ip->i_maxent = SWAP_16(fe->fe_icb_tag.itag_max_ent);
712 ip->i_astrat = SWAP_16(fe->fe_icb_tag.itag_strategy);
713
714 ip->i_desc_type = icb_tag_flags & 0x7;
715
716 /* Strictly Paranoia */
717 ip->i_ext = NULL;
718 ip->i_ext_count = ip->i_ext_used = 0;
719 ip->i_con = 0;
720 ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
721
722 ip->i_data_off = 0xB0 + SWAP_32(fe->fe_len_ear);
723 ip->i_max_emb = udf_vfsp->udf_lbsize - ip->i_data_off;
724 if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
725 /* Short allocation desc */
726 struct short_ad *sad;
727
728 ip->i_ext_used = 0;
729 ip->i_ext_count = ndesc =
730 SWAP_32(fe->fe_len_adesc) / sizeof (struct short_ad);
731 ip->i_ext_count =
732 ((ip->i_ext_count / EXT_PER_MALLOC) + 1) * EXT_PER_MALLOC;
733 ip->i_ext = (struct icb_ext *)kmem_zalloc(ip->i_ext_count *
734 sizeof (struct icb_ext), KM_SLEEP);
735 ip->i_cur_max_ext = ip->i_max_emb / sizeof (struct short_ad);
736 ip->i_cur_max_ext --;
737
738 if ((ip->i_astrat != STRAT_TYPE4) &&
739 (ip->i_astrat != STRAT_TYPE4096)) {
740 goto error_ret;
741 }
742
743 sad = (struct short_ad *)
744 (fe->fe_spec + SWAP_32(fe->fe_len_ear));
745 iext = ip->i_ext;
746 while (ndesc --) {
747 length = SWAP_32(sad->sad_ext_len);
748 if ((length & 0x3FFFFFFF) == 0) {
749 break;
750 }
751 if (((length >> 30) & IB_MASK) == IB_CON) {
752 if (ip->i_con == NULL) {
753 ip->i_con_count = EXT_PER_MALLOC;
754 ip->i_con_used = 0;
755 ip->i_con_read = 0;
756 ip->i_con = kmem_zalloc(
757 ip->i_con_count *
758 sizeof (struct icb_ext),
759 KM_SLEEP);
760 }
761 con = &ip->i_con[ip->i_con_used];
762 con->ib_prn = 0;
763 con->ib_block = SWAP_32(sad->sad_ext_loc);
764 con->ib_count = length & 0x3FFFFFFF;
765 con->ib_flags = (length >> 30) & IB_MASK;
766 ip->i_con_used++;
767 sad ++;
768 break;
769 }
770 iext->ib_prn = 0;
771 iext->ib_block = SWAP_32(sad->sad_ext_loc);
772 length = SWAP_32(sad->sad_ext_len);
773 iext->ib_count = length & 0x3FFFFFFF;
774 iext->ib_offset = offset;
775 iext->ib_marker1 = (uint32_t)0xAAAAAAAA;
776 iext->ib_marker2 = (uint32_t)0xBBBBBBBB;
777 offset += (iext->ib_count + udf_vfsp->udf_lbmask) &
778 (~udf_vfsp->udf_lbmask);
779
780 iext->ib_flags = (length >> 30) & IB_MASK;
781
782 ip->i_ext_used++;
783 iext++;
784 sad ++;
785 }
786 } else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
787 /* Long allocation desc */
788 struct long_ad *lad;
789
790 ip->i_ext_used = 0;
791 ip->i_ext_count = ndesc =
792 SWAP_32(fe->fe_len_adesc) / sizeof (struct long_ad);
793 ip->i_ext_count =
794 ((ip->i_ext_count / EXT_PER_MALLOC) + 1) * EXT_PER_MALLOC;
795 ip->i_ext = (struct icb_ext *)kmem_zalloc(ip->i_ext_count *
796 sizeof (struct icb_ext), KM_SLEEP);
797
798 ip->i_cur_max_ext = ip->i_max_emb / sizeof (struct long_ad);
799 ip->i_cur_max_ext --;
800
801 if ((ip->i_astrat != STRAT_TYPE4) &&
802 (ip->i_astrat != STRAT_TYPE4096)) {
803 goto error_ret;
804 }
805
806 lad = (struct long_ad *)
807 (fe->fe_spec + SWAP_32(fe->fe_len_ear));
808 iext = ip->i_ext;
809 while (ndesc --) {
810 length = SWAP_32(lad->lad_ext_len);
811 if ((length & 0x3FFFFFFF) == 0) {
812 break;
813 }
814 if (((length >> 30) & IB_MASK) == IB_CON) {
815 if (ip->i_con == NULL) {
816 ip->i_con_count = EXT_PER_MALLOC;
817 ip->i_con_used = 0;
818 ip->i_con_read = 0;
819 ip->i_con = kmem_zalloc(
820 ip->i_con_count *
821 sizeof (struct icb_ext),
822 KM_SLEEP);
823 }
824 con = &ip->i_con[ip->i_con_used];
825 con->ib_prn = SWAP_16(lad->lad_ext_prn);
826 con->ib_block = SWAP_32(lad->lad_ext_loc);
827 con->ib_count = length & 0x3FFFFFFF;
828 con->ib_flags = (length >> 30) & IB_MASK;
829 ip->i_con_used++;
830 lad ++;
831 break;
832 }
833 iext->ib_prn = SWAP_16(lad->lad_ext_prn);
834 iext->ib_block = SWAP_32(lad->lad_ext_loc);
835 iext->ib_count = length & 0x3FFFFFFF;
836 iext->ib_offset = offset;
837 iext->ib_marker1 = (uint32_t)0xAAAAAAAA;
838 iext->ib_marker2 = (uint32_t)0xBBBBBBBB;
839 offset += (iext->ib_count + udf_vfsp->udf_lbmask) &
840 (~udf_vfsp->udf_lbmask);
841
842 iext->ib_flags = (length >> 30) & IB_MASK;
843
844 ip->i_ext_used++;
845 iext++;
846 lad ++;
847 }
848 } else if (ip->i_desc_type == ICB_FLAG_ONE_AD) {
849 ASSERT(SWAP_32(fe->fe_len_ear) < udf_vfsp->udf_lbsize);
850
851 if (SWAP_32(fe->fe_len_ear) > udf_vfsp->udf_lbsize) {
852 goto error_ret;
853 }
854 } else {
855 /* Not to be used in UDF 1.50 */
856 cmn_err(CE_NOTE, "Invalid Allocation Descriptor type %x\n",
857 ip->i_desc_type);
858 goto error_ret;
859 }
860
861
862 if (icb_tag_flags & ICB_FLAG_SETUID) {
863 ip->i_char = ISUID;
864 } else {
865 ip->i_char = 0;
866 }
867 if (icb_tag_flags & ICB_FLAG_SETGID) {
868 ip->i_char |= ISGID;
869 }
870 if (icb_tag_flags & ICB_FLAG_STICKY) {
871 ip->i_char |= ISVTX;
872 }
873 switch (fe->fe_icb_tag.itag_ftype) {
874 case FTYPE_DIRECTORY :
875 ip->i_type = VDIR;
876 break;
877 case FTYPE_FILE :
878 ip->i_type = VREG;
879 break;
880 case FTYPE_BLOCK_DEV :
881 ip->i_type = VBLK;
882 break;
883 case FTYPE_CHAR_DEV :
884 ip->i_type = VCHR;
885 break;
886 case FTYPE_FIFO :
887 ip->i_type = VFIFO;
888 break;
889 case FTYPE_C_ISSOCK :
890 ip->i_type = VSOCK;
891 break;
892 case FTYPE_SYMLINK :
893 ip->i_type = VLNK;
894 break;
895 default :
896 ip->i_type = VNON;
897 break;
898 }
899
900 if (ip->i_type == VBLK || ip->i_type == VCHR) {
901 ip->i_rdev = makedevice(ip->i_major, ip->i_minor);
902 }
903
904 /*
905 * Fill in the rest. Don't bother with the vnode lock because nobody
906 * should be looking at this vnode. We have already invalidated the
907 * pages if it had any so pageout shouldn't be referencing this vnode
908 * and we are holding the write contents lock so a look up can't use
909 * the vnode.
910 */
911 vp->v_vfsp = vfsp;
912 vp->v_type = ip->i_type;
913 vp->v_rdev = ip->i_rdev;
914 if (ip->i_udf->udf_root_blkno == loc) {
915 vp->v_flag = VROOT;
916 } else {
917 vp->v_flag = 0;
918 }
919
920 brelse(bp);
921 *ipp = ip;
922 rw_exit(&ip->i_contents);
923 vn_exists(vp);
924 return (0);
925 }
926
927 void
ud_iinactive(struct ud_inode * ip,struct cred * cr)928 ud_iinactive(struct ud_inode *ip, struct cred *cr)
929 {
930 int32_t busy = 0;
931 struct vnode *vp;
932 vtype_t type;
933 caddr_t addr, addr1;
934 size_t size, size1;
935
936
937 ud_printf("ud_iinactive\n");
938
939 /*
940 * Get exclusive access to inode data.
941 */
942 rw_enter(&ip->i_contents, RW_WRITER);
943
944 /*
945 * Make sure no one reclaimed the inode before we put
946 * it on the freelist or destroy it. We keep our 'hold'
947 * on the vnode from vn_rele until we are ready to
948 * do something with the inode (freelist/destroy).
949 *
950 * Pageout may put a VN_HOLD/VN_RELE at anytime during this
951 * operation via an async putpage, so we must make sure
952 * we don't free/destroy the inode more than once. ud_iget
953 * may also put a VN_HOLD on the inode before it grabs
954 * the i_contents lock. This is done so we don't kmem_free
955 * an inode that a thread is waiting on.
956 */
957 vp = ITOV(ip);
958
959 mutex_enter(&vp->v_lock);
960 if (vp->v_count < 1) {
961 cmn_err(CE_WARN, "ud_iinactive: v_count < 1\n");
962 return;
963 }
964 if ((vp->v_count > 1) || ((ip->i_flag & IREF) == 0)) {
965 vp->v_count--; /* release our hold from vn_rele */
966 mutex_exit(&vp->v_lock);
967 rw_exit(&ip->i_contents);
968 return;
969 }
970 mutex_exit(&vp->v_lock);
971
972 /*
973 * For forced umount case: if i_udf is NULL, the contents of
974 * the inode and all the pages have already been pushed back
975 * to disk. It can be safely destroyed.
976 */
977 if (ip->i_udf == NULL) {
978 addr = (caddr_t)ip->i_ext;
979 size = sizeof (struct icb_ext) * ip->i_ext_count;
980 ip->i_ext = 0;
981 ip->i_ext_count = ip->i_ext_used = 0;
982 addr1 = (caddr_t)ip->i_con;
983 size1 = sizeof (struct icb_ext) * ip->i_con_count;
984 ip->i_con = 0;
985 ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
986 rw_exit(&ip->i_contents);
987 vn_invalid(vp);
988
989 mutex_enter(&ud_nino_lock);
990 ud_cur_inodes--;
991 mutex_exit(&ud_nino_lock);
992
993 cv_destroy(&ip->i_wrcv); /* throttling */
994 rw_destroy(&ip->i_rwlock);
995 rw_exit(&ip->i_contents);
996 rw_destroy(&ip->i_contents);
997 kmem_free(addr, size);
998 kmem_free(addr1, size1);
999 vn_free(vp);
1000 kmem_free(ip, sizeof (struct ud_inode));
1001 return;
1002 }
1003
1004 if ((ip->i_udf->udf_flags & UDF_FL_RDONLY) == 0) {
1005 if (ip->i_nlink <= 0) {
1006 ip->i_marker3 = (uint32_t)0xDDDD0000;
1007 ip->i_nlink = 1; /* prevent free-ing twice */
1008 (void) ud_itrunc(ip, 0, 0, cr);
1009 type = ip->i_type;
1010 ip->i_perm = 0;
1011 ip->i_uid = 0;
1012 ip->i_gid = 0;
1013 ip->i_rdev = 0; /* Zero in core version of rdev */
1014 mutex_enter(&ip->i_tlock);
1015 ip->i_flag |= IUPD|ICHG;
1016 mutex_exit(&ip->i_tlock);
1017 ud_ifree(ip, type);
1018 ip->i_icb_prn = 0xFFFF;
1019 } else if (!IS_SWAPVP(vp)) {
1020 /*
1021 * Write the inode out if dirty. Pages are
1022 * written back and put on the freelist.
1023 */
1024 (void) ud_syncip(ip, B_FREE | B_ASYNC, 0);
1025 /*
1026 * Do nothing if inode is now busy -- inode may
1027 * have gone busy because ud_syncip
1028 * releases/reacquires the i_contents lock
1029 */
1030 mutex_enter(&vp->v_lock);
1031 if (vp->v_count > 1) {
1032 vp->v_count--;
1033 mutex_exit(&vp->v_lock);
1034 rw_exit(&ip->i_contents);
1035 return;
1036 }
1037 mutex_exit(&vp->v_lock);
1038 } else {
1039 ud_iupdat(ip, 0);
1040 }
1041 }
1042
1043
1044 /*
1045 * Put the inode on the end of the free list.
1046 * Possibly in some cases it would be better to
1047 * put the inode at the head of the free list,
1048 * (e.g.: where i_perm == 0 || i_number == 0)
1049 * but I will think about that later.
1050 * (i_number is rarely 0 - only after an i/o error in ud_iget,
1051 * where i_perm == 0, the inode will probably be wanted
1052 * again soon for an ialloc, so possibly we should keep it)
1053 */
1054 /*
1055 * If inode is invalid or there is no page associated with
1056 * this inode, put the inode in the front of the free list.
1057 * Since we have a VN_HOLD on the vnode, and checked that it
1058 * wasn't already on the freelist when we entered, we can safely
1059 * put it on the freelist even if another thread puts a VN_HOLD
1060 * on it (pageout/ud_iget).
1061 */
1062 tryagain:
1063 mutex_enter(&ud_nino_lock);
1064 if (vn_has_cached_data(vp)) {
1065 mutex_exit(&ud_nino_lock);
1066 mutex_enter(&vp->v_lock);
1067 vp->v_count--;
1068 mutex_exit(&vp->v_lock);
1069 mutex_enter(&ip->i_tlock);
1070 mutex_enter(&udf_ifree_lock);
1071 ud_add_to_free_list(ip, UD_END);
1072 mutex_exit(&udf_ifree_lock);
1073 ip->i_flag &= IMODTIME;
1074 mutex_exit(&ip->i_tlock);
1075 rw_exit(&ip->i_contents);
1076 } else if (busy || ud_cur_inodes < ud_max_inodes) {
1077 mutex_exit(&ud_nino_lock);
1078 /*
1079 * We're not over our high water mark, or it's
1080 * not safe to kmem_free the inode, so put it
1081 * on the freelist.
1082 */
1083 mutex_enter(&vp->v_lock);
1084 if (vn_has_cached_data(vp)) {
1085 cmn_err(CE_WARN, "ud_iinactive: v_pages not NULL\n");
1086 }
1087 vp->v_count--;
1088 mutex_exit(&vp->v_lock);
1089
1090 mutex_enter(&ip->i_tlock);
1091 mutex_enter(&udf_ifree_lock);
1092 ud_add_to_free_list(ip, UD_BEGIN);
1093 mutex_exit(&udf_ifree_lock);
1094 ip->i_flag &= IMODTIME;
1095 mutex_exit(&ip->i_tlock);
1096 rw_exit(&ip->i_contents);
1097 } else {
1098 mutex_exit(&ud_nino_lock);
1099 if (vn_has_cached_data(vp)) {
1100 cmn_err(CE_WARN, "ud_iinactive: v_pages not NULL\n");
1101 }
1102 /*
1103 * Try to free the inode. We must make sure
1104 * it's o.k. to destroy this inode. We can't destroy
1105 * if a thread is waiting for this inode. If we can't get the
1106 * cache now, put it back on the freelist.
1107 */
1108 if (!mutex_tryenter(&ud_icache_lock)) {
1109 busy = 1;
1110 goto tryagain;
1111 }
1112 mutex_enter(&vp->v_lock);
1113 if (vp->v_count > 1) {
1114 /* inode is wanted in ud_iget */
1115 busy = 1;
1116 mutex_exit(&vp->v_lock);
1117 mutex_exit(&ud_icache_lock);
1118 goto tryagain;
1119 }
1120 mutex_exit(&vp->v_lock);
1121 remque(ip);
1122 ip->i_forw = ip;
1123 ip->i_back = ip;
1124 mutex_enter(&ud_nino_lock);
1125 ud_cur_inodes--;
1126 mutex_exit(&ud_nino_lock);
1127 mutex_exit(&ud_icache_lock);
1128 if (ip->i_icb_prn != 0xFFFF) {
1129 ud_iupdat(ip, 0);
1130 }
1131 addr = (caddr_t)ip->i_ext;
1132 size = sizeof (struct icb_ext) * ip->i_ext_count;
1133 ip->i_ext = 0;
1134 ip->i_ext_count = ip->i_ext_used = 0;
1135 addr1 = (caddr_t)ip->i_con;
1136 size1 = sizeof (struct icb_ext) * ip->i_con_count;
1137 ip->i_con = 0;
1138 ip->i_con_count = ip->i_con_used = ip->i_con_read = 0;
1139 cv_destroy(&ip->i_wrcv); /* throttling */
1140 rw_destroy(&ip->i_rwlock);
1141 rw_exit(&ip->i_contents);
1142 rw_destroy(&ip->i_contents);
1143 kmem_free(addr, size);
1144 kmem_free(addr1, size1);
1145 ip->i_marker3 = (uint32_t)0xDDDDDDDD;
1146 vn_free(vp);
1147 kmem_free(ip, sizeof (struct ud_inode));
1148 }
1149 }
1150
1151
1152 void
ud_iupdat(struct ud_inode * ip,int32_t waitfor)1153 ud_iupdat(struct ud_inode *ip, int32_t waitfor)
1154 {
1155 uint16_t flag, tag_flags;
1156 int32_t error;
1157 struct buf *bp;
1158 struct udf_vfs *udf_vfsp;
1159 struct file_entry *fe;
1160 uint16_t crc_len = 0;
1161
1162 ASSERT(RW_WRITE_HELD(&ip->i_contents));
1163
1164 ud_printf("ud_iupdat\n");
1165 /*
1166 * Return if file system has been forcibly umounted.
1167 */
1168 if (ip->i_udf == NULL) {
1169 return;
1170 }
1171
1172 udf_vfsp = ip->i_udf;
1173 flag = ip->i_flag; /* Atomic read */
1174 if ((flag & (IUPD|IACC|ICHG|IMOD|IMODACC)) != 0) {
1175 if (udf_vfsp->udf_flags & UDF_FL_RDONLY) {
1176 ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD|IMODACC|IATTCHG);
1177 return;
1178 }
1179
1180 bp = ud_bread(ip->i_dev,
1181 ip->i_icb_lbano << udf_vfsp->udf_l2d_shift,
1182 ip->i_udf->udf_lbsize);
1183 if (bp->b_flags & B_ERROR) {
1184 brelse(bp);
1185 return;
1186 }
1187 fe = (struct file_entry *)bp->b_un.b_addr;
1188 if (ud_verify_tag_and_desc(&fe->fe_tag, UD_FILE_ENTRY,
1189 ip->i_icb_block,
1190 1, ip->i_udf->udf_lbsize) != 0) {
1191 brelse(bp);
1192 return;
1193 }
1194
1195 mutex_enter(&ip->i_tlock);
1196 if (ip->i_flag & (IUPD|IACC|ICHG)) {
1197 IMARK(ip);
1198 }
1199 ip->i_flag &= ~(IUPD|IACC|ICHG|IMOD|IMODACC);
1200 mutex_exit(&ip->i_tlock);
1201
1202 fe->fe_uid = SWAP_32(ip->i_uid);
1203 fe->fe_gid = SWAP_32(ip->i_gid);
1204
1205 fe->fe_perms = SWAP_32(ip->i_perm);
1206
1207 fe->fe_lcount = SWAP_16(ip->i_nlink);
1208 fe->fe_info_len = SWAP_64(ip->i_size);
1209 fe->fe_lbr = SWAP_64(ip->i_lbr);
1210
1211 ud_utime2dtime(&ip->i_atime, &fe->fe_acc_time);
1212 ud_utime2dtime(&ip->i_mtime, &fe->fe_mod_time);
1213 ud_utime2dtime(&ip->i_ctime, &fe->fe_attr_time);
1214
1215 if (ip->i_char & ISUID) {
1216 tag_flags = ICB_FLAG_SETUID;
1217 } else {
1218 tag_flags = 0;
1219 }
1220 if (ip->i_char & ISGID) {
1221 tag_flags |= ICB_FLAG_SETGID;
1222 }
1223 if (ip->i_char & ISVTX) {
1224 tag_flags |= ICB_FLAG_STICKY;
1225 }
1226 tag_flags |= ip->i_desc_type;
1227
1228 /*
1229 * Remove the following it is no longer contig
1230 * if (ip->i_astrat == STRAT_TYPE4) {
1231 * tag_flags |= ICB_FLAG_CONTIG;
1232 * }
1233 */
1234
1235 fe->fe_icb_tag.itag_flags &= ~SWAP_16((uint16_t)0x3C3);
1236 fe->fe_icb_tag.itag_strategy = SWAP_16(ip->i_astrat);
1237 fe->fe_icb_tag.itag_flags |= SWAP_16(tag_flags);
1238
1239 ud_update_regid(&fe->fe_impl_id);
1240
1241 crc_len = offsetof(struct file_entry, fe_spec) +
1242 SWAP_32(fe->fe_len_ear);
1243 if (ip->i_desc_type == ICB_FLAG_ONE_AD) {
1244 crc_len += ip->i_size;
1245 fe->fe_len_adesc = SWAP_32(((uint32_t)ip->i_size));
1246 } else if ((ip->i_size != 0) && (ip->i_ext != NULL) &&
1247 (ip->i_ext_used != 0)) {
1248
1249 if ((error = ud_read_icb_till_off(ip,
1250 ip->i_size)) == 0) {
1251 if (ip->i_astrat == STRAT_TYPE4) {
1252 error = ud_updat_ext4(ip, fe);
1253 } else if (ip->i_astrat == STRAT_TYPE4096) {
1254 error = ud_updat_ext4096(ip, fe);
1255 }
1256 if (error) {
1257 udf_vfsp->udf_mark_bad = 1;
1258 }
1259 }
1260 crc_len += SWAP_32(fe->fe_len_adesc);
1261 } else {
1262 fe->fe_len_adesc = 0;
1263 }
1264
1265 /*
1266 * Zero out the rest of the block
1267 */
1268 bzero(bp->b_un.b_addr + crc_len,
1269 ip->i_udf->udf_lbsize - crc_len);
1270
1271 ud_make_tag(ip->i_udf, &fe->fe_tag,
1272 UD_FILE_ENTRY, ip->i_icb_block, crc_len);
1273
1274
1275 if (waitfor) {
1276 BWRITE(bp);
1277
1278 /*
1279 * Synchronous write has guaranteed that inode
1280 * has been written on disk so clear the flag
1281 */
1282 ip->i_flag &= ~(IBDWRITE);
1283 } else {
1284 bdwrite(bp);
1285
1286 /*
1287 * This write hasn't guaranteed that inode has been
1288 * written on the disk.
1289 * Since, all updat flags on indoe are cleared, we must
1290 * remember the condition in case inode is to be updated
1291 * synchronously later (e.g.- fsync()/fdatasync())
1292 * and inode has not been modified yet.
1293 */
1294 ip->i_flag |= (IBDWRITE);
1295 }
1296 } else {
1297 /*
1298 * In case previous inode update was done asynchronously
1299 * (IBDWRITE) and this inode update request wants guaranteed
1300 * (synchronous) disk update, flush the inode.
1301 */
1302 if (waitfor && (flag & IBDWRITE)) {
1303 blkflush(ip->i_dev,
1304 (daddr_t)fsbtodb(udf_vfsp, ip->i_icb_lbano));
1305 ip->i_flag &= ~(IBDWRITE);
1306 }
1307 }
1308 }
1309
1310 int32_t
ud_updat_ext4(struct ud_inode * ip,struct file_entry * fe)1311 ud_updat_ext4(struct ud_inode *ip, struct file_entry *fe)
1312 {
1313 uint32_t dummy;
1314 int32_t elen, ndent, index, count, con_index;
1315 daddr_t bno;
1316 struct buf *bp;
1317 struct short_ad *sad;
1318 struct long_ad *lad;
1319 struct icb_ext *iext, *icon;
1320
1321
1322 ASSERT(ip);
1323 ASSERT(fe);
1324 ASSERT((ip->i_desc_type == ICB_FLAG_SHORT_AD) ||
1325 (ip->i_desc_type == ICB_FLAG_LONG_AD));
1326
1327 if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1328 elen = sizeof (struct short_ad);
1329 sad = (struct short_ad *)
1330 (fe->fe_spec + SWAP_32(fe->fe_len_ear));
1331 } else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
1332 elen = sizeof (struct long_ad);
1333 lad = (struct long_ad *)
1334 (fe->fe_spec + SWAP_32(fe->fe_len_ear));
1335 } else {
1336 /* This cannot happen return */
1337 return (EINVAL);
1338 }
1339
1340 ndent = ip->i_max_emb / elen;
1341
1342 if (ip->i_ext_used < ndent) {
1343
1344 if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1345 ud_make_sad(ip->i_ext, sad, ip->i_ext_used);
1346 } else {
1347 ud_make_lad(ip->i_ext, lad, ip->i_ext_used);
1348 }
1349 fe->fe_len_adesc = SWAP_32(ip->i_ext_used * elen);
1350 con_index = 0;
1351 } else {
1352
1353 con_index = index = 0;
1354
1355 while (index < ip->i_ext_used) {
1356 if (index == 0) {
1357 /*
1358 * bp is already read
1359 * First few extents will go
1360 * into the file_entry
1361 */
1362 count = ndent - 1;
1363 fe->fe_len_adesc = SWAP_32(ndent * elen);
1364 bp = NULL;
1365
1366 /*
1367 * Last entry to be cont ext
1368 */
1369 icon = &ip->i_con[con_index];
1370 } else {
1371 /*
1372 * Read the buffer
1373 */
1374 icon = &ip->i_con[con_index];
1375
1376 bno = ud_xlate_to_daddr(ip->i_udf,
1377 icon->ib_prn, icon->ib_block,
1378 icon->ib_count >> ip->i_udf->udf_l2d_shift,
1379 &dummy);
1380 bp = ud_bread(ip->i_dev,
1381 bno << ip->i_udf->udf_l2d_shift,
1382 ip->i_udf->udf_lbsize);
1383 if (bp->b_flags & B_ERROR) {
1384 brelse(bp);
1385 return (EIO);
1386 }
1387
1388 /*
1389 * Figure out how many extents in
1390 * this time
1391 */
1392 count = (bp->b_bcount -
1393 sizeof (struct alloc_ext_desc)) / elen;
1394 if (count > (ip->i_ext_used - index)) {
1395 count = ip->i_ext_used - index;
1396 } else {
1397 count --;
1398 }
1399 con_index++;
1400 if (con_index >= ip->i_con_used) {
1401 icon = NULL;
1402 } else {
1403 icon = &ip->i_con[con_index];
1404 }
1405 }
1406
1407
1408
1409 /*
1410 * convert to on disk form and
1411 * update
1412 */
1413 iext = &ip->i_ext[index];
1414 if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1415 if (index != 0) {
1416 sad = (struct short_ad *)
1417 (bp->b_un.b_addr +
1418 sizeof (struct alloc_ext_desc));
1419 }
1420 ud_make_sad(iext, sad, count);
1421 sad += count;
1422 if (icon != NULL) {
1423 ud_make_sad(icon, sad, 1);
1424 }
1425 } else {
1426 if (index != 0) {
1427 lad = (struct long_ad *)
1428 (bp->b_un.b_addr +
1429 sizeof (struct alloc_ext_desc));
1430 }
1431 ud_make_lad(iext, lad, count);
1432 lad += count;
1433 if (icon != NULL) {
1434 ud_make_lad(icon, lad, 1);
1435 }
1436 }
1437
1438 if (con_index != 0) {
1439 struct alloc_ext_desc *aed;
1440 int32_t sz;
1441 struct icb_ext *oicon;
1442
1443 oicon = &ip->i_con[con_index - 1];
1444 sz = count * elen;
1445 if (icon != NULL) {
1446 sz += elen;
1447 }
1448 aed = (struct alloc_ext_desc *)bp->b_un.b_addr;
1449 aed->aed_len_aed = SWAP_32(sz);
1450 if (con_index == 1) {
1451 aed->aed_rev_ael =
1452 SWAP_32(ip->i_icb_block);
1453 } else {
1454 aed->aed_rev_ael =
1455 SWAP_32(oicon->ib_block);
1456 }
1457 sz += sizeof (struct alloc_ext_desc);
1458 ud_make_tag(ip->i_udf, &aed->aed_tag,
1459 UD_ALLOC_EXT_DESC, oicon->ib_block, sz);
1460 }
1461
1462 /*
1463 * Write back to disk
1464 */
1465 if (bp != NULL) {
1466 BWRITE(bp);
1467 }
1468 index += count;
1469 }
1470
1471 }
1472
1473 if (con_index != ip->i_con_used) {
1474 int32_t lbmask, l2b, temp;
1475
1476 temp = con_index;
1477 lbmask = ip->i_udf->udf_lbmask;
1478 l2b = ip->i_udf->udf_l2b_shift;
1479 /*
1480 * Free unused continuation extents
1481 */
1482 for (; con_index < ip->i_con_used; con_index++) {
1483 icon = &ip->i_con[con_index];
1484 count = (icon->ib_count + lbmask) >> l2b;
1485 ud_free_space(ip->i_udf->udf_vfs, icon->ib_prn,
1486 icon->ib_block, count);
1487 count = (count << l2b) - sizeof (struct alloc_ext_desc);
1488 ip->i_cur_max_ext -= (count / elen) - 1;
1489 }
1490 ip->i_con_used = temp;
1491 }
1492 return (0);
1493 }
1494
1495 /* ARGSUSED */
1496 int32_t
ud_updat_ext4096(struct ud_inode * ip,struct file_entry * fe)1497 ud_updat_ext4096(struct ud_inode *ip, struct file_entry *fe)
1498 {
1499 return (ENXIO);
1500 }
1501
1502 void
ud_make_sad(struct icb_ext * iext,struct short_ad * sad,int32_t count)1503 ud_make_sad(struct icb_ext *iext, struct short_ad *sad, int32_t count)
1504 {
1505 int32_t index = 0, scount;
1506
1507 ASSERT(iext);
1508 ASSERT(sad);
1509
1510 if (count != 0) {
1511 ASSERT(count > 0);
1512 while (index < count) {
1513 scount = (iext->ib_count & 0x3FFFFFFF) |
1514 (iext->ib_flags << 30);
1515 sad->sad_ext_len = SWAP_32(scount);
1516 sad->sad_ext_loc = SWAP_32(iext->ib_block);
1517 sad++;
1518 iext++;
1519 index++;
1520 }
1521 }
1522 }
1523
1524 void
ud_make_lad(struct icb_ext * iext,struct long_ad * lad,int32_t count)1525 ud_make_lad(struct icb_ext *iext, struct long_ad *lad, int32_t count)
1526 {
1527 int32_t index = 0, scount;
1528
1529 ASSERT(iext);
1530 ASSERT(lad);
1531
1532 if (count != 0) {
1533 ASSERT(count > 0);
1534
1535 while (index < count) {
1536 lad->lad_ext_prn = SWAP_16(iext->ib_prn);
1537 scount = (iext->ib_count & 0x3FFFFFFF) |
1538 (iext->ib_flags << 30);
1539 lad->lad_ext_len = SWAP_32(scount);
1540 lad->lad_ext_loc = SWAP_32(iext->ib_block);
1541 lad++;
1542 iext++;
1543 index++;
1544 }
1545 }
1546 }
1547
1548 /*
1549 * Truncate the inode ip to at most length size.
1550 * Free affected disk blocks -- the blocks of the
1551 * file are removed in reverse order.
1552 */
1553 /* ARGSUSED */
1554 int
ud_itrunc(struct ud_inode * oip,u_offset_t length,int32_t flags,struct cred * cr)1555 ud_itrunc(struct ud_inode *oip, u_offset_t length,
1556 int32_t flags, struct cred *cr)
1557 {
1558 int32_t error, boff;
1559 off_t bsize;
1560 mode_t mode;
1561 struct udf_vfs *udf_vfsp;
1562
1563 ud_printf("ud_itrunc\n");
1564
1565 ASSERT(RW_WRITE_HELD(&oip->i_contents));
1566 udf_vfsp = oip->i_udf;
1567 bsize = udf_vfsp->udf_lbsize;
1568
1569 /*
1570 * We only allow truncation of regular files and directories
1571 * to arbritary lengths here. In addition, we allow symbolic
1572 * links to be truncated only to zero length. Other inode
1573 * types cannot have their length set here.
1574 */
1575 mode = oip->i_type;
1576 if (mode == VFIFO) {
1577 return (0);
1578 }
1579 if ((mode != VREG) && (mode != VDIR) &&
1580 (!(mode == VLNK && length == 0))) {
1581 return (EINVAL);
1582 }
1583 if (length == oip->i_size) {
1584 /* update ctime and mtime to please POSIX tests */
1585 mutex_enter(&oip->i_tlock);
1586 oip->i_flag |= ICHG |IUPD;
1587 mutex_exit(&oip->i_tlock);
1588 return (0);
1589 }
1590
1591 boff = blkoff(udf_vfsp, length);
1592
1593 if (length > oip->i_size) {
1594 /*
1595 * Trunc up case.ud_bmap_write will insure that the right blocks
1596 * are allocated. This includes doing any work needed for
1597 * allocating the last block.
1598 */
1599 if (boff == 0) {
1600 error = ud_bmap_write(oip, length - 1,
1601 (int)bsize, 0, cr);
1602 } else {
1603 error = ud_bmap_write(oip, length - 1, boff, 0, cr);
1604 }
1605 if (error == 0) {
1606 u_offset_t osize = oip->i_size;
1607 oip->i_size = length;
1608
1609 /*
1610 * Make sure we zero out the remaining bytes of
1611 * the page in case a mmap scribbled on it. We
1612 * can't prevent a mmap from writing beyond EOF
1613 * on the last page of a file.
1614 */
1615 if ((boff = blkoff(udf_vfsp, osize)) != 0) {
1616 pvn_vpzero(ITOV(oip), osize,
1617 (uint32_t)(bsize - boff));
1618 }
1619 mutex_enter(&oip->i_tlock);
1620 oip->i_flag |= ICHG;
1621 ITIMES_NOLOCK(oip);
1622 mutex_exit(&oip->i_tlock);
1623 }
1624 return (error);
1625 }
1626
1627 /*
1628 * Update the pages of the file. If the file is not being
1629 * truncated to a block boundary, the contents of the
1630 * pages following the end of the file must be zero'ed
1631 * in case it ever become accessable again because
1632 * of subsequent file growth.
1633 */
1634 if (boff == 0) {
1635 (void) pvn_vplist_dirty(ITOV(oip), length,
1636 ud_putapage, B_INVAL | B_TRUNC, CRED());
1637 } else {
1638 /*
1639 * Make sure that the last block is properly allocated.
1640 * We only really have to do this if the last block is
1641 * actually allocated. Just to be sure, we do it now
1642 * independent of current allocation.
1643 */
1644 error = ud_bmap_write(oip, length - 1, boff, 0, cr);
1645 if (error) {
1646 return (error);
1647 }
1648
1649 pvn_vpzero(ITOV(oip), length, (uint32_t)(bsize - boff));
1650
1651 (void) pvn_vplist_dirty(ITOV(oip), length,
1652 ud_putapage, B_INVAL | B_TRUNC, CRED());
1653 }
1654
1655
1656 /* Free the blocks */
1657 if (oip->i_desc_type == ICB_FLAG_ONE_AD) {
1658 if (length > oip->i_max_emb) {
1659 return (EFBIG);
1660 }
1661 oip->i_size = length;
1662 mutex_enter(&oip->i_tlock);
1663 oip->i_flag |= ICHG|IUPD;
1664 mutex_exit(&oip->i_tlock);
1665 ud_iupdat(oip, 1);
1666 } else {
1667 if ((error = ud_read_icb_till_off(oip, oip->i_size)) != 0) {
1668 return (error);
1669 }
1670
1671 if (oip->i_astrat == STRAT_TYPE4) {
1672 ud_trunc_ext4(oip, length);
1673 } else if (oip->i_astrat == STRAT_TYPE4096) {
1674 ud_trunc_ext4096(oip, length);
1675 }
1676 }
1677
1678 done:
1679 return (0);
1680 }
1681
1682 void
ud_trunc_ext4(struct ud_inode * ip,u_offset_t length)1683 ud_trunc_ext4(struct ud_inode *ip, u_offset_t length)
1684 {
1685 int32_t index, l2b, count, ecount;
1686 int32_t elen, ndent, nient;
1687 u_offset_t ext_beg, ext_end;
1688 struct icb_ext *iext, *icon;
1689 int32_t lbmask, ext_used;
1690 uint32_t loc;
1691 struct icb_ext text;
1692 uint32_t con_freed;
1693
1694 ASSERT((ip->i_desc_type == ICB_FLAG_SHORT_AD) ||
1695 (ip->i_desc_type == ICB_FLAG_LONG_AD));
1696
1697 if (ip->i_ext_used == 0) {
1698 return;
1699 }
1700
1701 ext_used = ip->i_ext_used;
1702
1703 lbmask = ip->i_udf->udf_lbmask;
1704 l2b = ip->i_udf->udf_l2b_shift;
1705
1706 ASSERT(ip->i_ext);
1707
1708 ip->i_lbr = 0;
1709 for (index = 0; index < ext_used; index++) {
1710 iext = &ip->i_ext[index];
1711
1712 /*
1713 * Find the begining and end
1714 * of current extent
1715 */
1716 ext_beg = iext->ib_offset;
1717 ext_end = iext->ib_offset +
1718 ((iext->ib_count + lbmask) & ~lbmask);
1719
1720 /*
1721 * This is the extent that has offset "length"
1722 * make a copy of this extent and
1723 * remember the index. We can use
1724 * it to free blocks
1725 */
1726 if ((length <= ext_end) && (length >= ext_beg)) {
1727 text = *iext;
1728
1729 iext->ib_count = length - ext_beg;
1730 ip->i_ext_used = index + 1;
1731 break;
1732 }
1733 if (iext->ib_flags != IB_UN_RE_AL) {
1734 ip->i_lbr += iext->ib_count >> l2b;
1735 }
1736 }
1737 if (ip->i_ext_used != index) {
1738 if (iext->ib_flags != IB_UN_RE_AL) {
1739 ip->i_lbr +=
1740 ((iext->ib_count + lbmask) & ~lbmask) >> l2b;
1741 }
1742 }
1743
1744 ip->i_size = length;
1745 mutex_enter(&ip->i_tlock);
1746 ip->i_flag |= ICHG|IUPD;
1747 mutex_exit(&ip->i_tlock);
1748 ud_iupdat(ip, 1);
1749
1750 /*
1751 * Free the unused space
1752 */
1753 if (text.ib_flags != IB_UN_RE_AL) {
1754 count = (ext_end - length) >> l2b;
1755 if (count) {
1756 loc = text.ib_block +
1757 (((length - text.ib_offset) + lbmask) >> l2b);
1758 ud_free_space(ip->i_udf->udf_vfs, text.ib_prn,
1759 loc, count);
1760 }
1761 }
1762 for (index = ip->i_ext_used; index < ext_used; index++) {
1763 iext = &ip->i_ext[index];
1764 if (iext->ib_flags != IB_UN_RE_AL) {
1765 count = (iext->ib_count + lbmask) >> l2b;
1766 ud_free_space(ip->i_udf->udf_vfs, iext->ib_prn,
1767 iext->ib_block, count);
1768 }
1769 bzero(iext, sizeof (struct icb_ext));
1770 continue;
1771 }
1772
1773 /*
1774 * release any continuation blocks
1775 */
1776 if (ip->i_con) {
1777
1778 ASSERT(ip->i_con_count >= ip->i_con_used);
1779
1780 /*
1781 * Find out how many indirect blocks
1782 * are required and release the rest
1783 */
1784 if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1785 elen = sizeof (struct short_ad);
1786 } else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
1787 elen = sizeof (struct long_ad);
1788 }
1789 ndent = ip->i_max_emb / elen;
1790 if (ip->i_ext_used > ndent) {
1791 ecount = ip->i_ext_used - ndent;
1792 } else {
1793 ecount = 0;
1794 }
1795 con_freed = 0;
1796 for (index = 0; index < ip->i_con_used; index++) {
1797 icon = &ip->i_con[index];
1798 nient = icon->ib_count -
1799 (sizeof (struct alloc_ext_desc) + elen);
1800 /* Header + 1 indirect extent */
1801 nient /= elen;
1802 if (ecount) {
1803 if (ecount > nient) {
1804 ecount -= nient;
1805 } else {
1806 ecount = 0;
1807 }
1808 } else {
1809 count = ((icon->ib_count + lbmask) &
1810 ~lbmask) >> l2b;
1811 ud_free_space(ip->i_udf->udf_vfs,
1812 icon->ib_prn, icon->ib_block, count);
1813 con_freed++;
1814 ip->i_cur_max_ext -= nient;
1815 }
1816 }
1817 /*
1818 * set the continuation extents used(i_con_used)i to correct
1819 * value. It is possible for i_con_used to be zero,
1820 * if we free up all continuation extents. This happens
1821 * when ecount is 0 before entering the for loop above.
1822 */
1823 ip->i_con_used -= con_freed;
1824 if (ip->i_con_read > ip->i_con_used) {
1825 ip->i_con_read = ip->i_con_used;
1826 }
1827 }
1828 }
1829
1830 void
ud_trunc_ext4096(struct ud_inode * ip,u_offset_t length)1831 ud_trunc_ext4096(struct ud_inode *ip, u_offset_t length)
1832 {
1833 /*
1834 * Truncate code is the same for
1835 * both file of type 4 and 4096
1836 */
1837 ud_trunc_ext4(ip, length);
1838 }
1839
1840 /*
1841 * Remove any inodes in the inode cache belonging to dev
1842 *
1843 * There should not be any active ones, return error if any are found but
1844 * still invalidate others (N.B.: this is a user error, not a system error).
1845 *
1846 * Also, count the references to dev by block devices - this really
1847 * has nothing to do with the object of the procedure, but as we have
1848 * to scan the inode table here anyway, we might as well get the
1849 * extra benefit.
1850 */
1851 int32_t
ud_iflush(struct vfs * vfsp)1852 ud_iflush(struct vfs *vfsp)
1853 {
1854 int32_t index, busy = 0;
1855 union ihead *ih;
1856 struct udf_vfs *udf_vfsp;
1857 dev_t dev;
1858 struct vnode *rvp, *vp;
1859 struct ud_inode *ip, *next;
1860
1861 ud_printf("ud_iflush\n");
1862 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
1863 rvp = udf_vfsp->udf_root;
1864 dev = vfsp->vfs_dev;
1865
1866 mutex_enter(&ud_icache_lock);
1867 for (index = 0; index < UD_HASH_SZ; index++) {
1868 ih = &ud_ihead[index];
1869
1870 next = ih->ih_chain[0];
1871 while (next != (struct ud_inode *)ih) {
1872 ip = next;
1873 next = ip->i_forw;
1874 if (ip->i_dev != dev) {
1875 continue;
1876 }
1877 vp = ITOV(ip);
1878 /*
1879 * root inode is processed by the caller
1880 */
1881 if (vp == rvp) {
1882 if (vp->v_count > 1) {
1883 busy = -1;
1884 }
1885 continue;
1886 }
1887 if (ip->i_flag & IREF) {
1888 /*
1889 * Set error indicator for return value,
1890 * but continue invalidating other
1891 * inodes.
1892 */
1893 busy = -1;
1894 continue;
1895 }
1896
1897 rw_enter(&ip->i_contents, RW_WRITER);
1898 remque(ip);
1899 ip->i_forw = ip;
1900 ip->i_back = ip;
1901 /*
1902 * Hold the vnode since its not done
1903 * in VOP_PUTPAGE anymore.
1904 */
1905 VN_HOLD(vp);
1906 /*
1907 * XXX Synchronous write holding
1908 * cache lock
1909 */
1910 (void) ud_syncip(ip, B_INVAL, I_SYNC);
1911 rw_exit(&ip->i_contents);
1912 VN_RELE(vp);
1913 }
1914 }
1915 mutex_exit(&ud_icache_lock);
1916
1917 return (busy);
1918 }
1919
1920
1921 /*
1922 * Check mode permission on inode. Mode is READ, WRITE or EXEC.
1923 * In the case of WRITE, the read-only status of the file system
1924 * is checked. The applicable mode bits are compared with the
1925 * requested form of access. If bits are missing, the secpolicy
1926 * function will check for privileges.
1927 */
1928 int
ud_iaccess(struct ud_inode * ip,int32_t mode,struct cred * cr,int dolock)1929 ud_iaccess(struct ud_inode *ip, int32_t mode, struct cred *cr, int dolock)
1930 {
1931 int shift = 0;
1932 int ret = 0;
1933
1934 if (dolock)
1935 rw_enter(&ip->i_contents, RW_READER);
1936 ASSERT(RW_LOCK_HELD(&ip->i_contents));
1937
1938 ud_printf("ud_iaccess\n");
1939 if (mode & IWRITE) {
1940 /*
1941 * Disallow write attempts on read-only
1942 * file systems, unless the file is a block
1943 * or character device or a FIFO.
1944 */
1945 if (ip->i_udf->udf_flags & UDF_FL_RDONLY) {
1946 if ((ip->i_type != VCHR) &&
1947 (ip->i_type != VBLK) &&
1948 (ip->i_type != VFIFO)) {
1949 ret = EROFS;
1950 goto out;
1951 }
1952 }
1953 }
1954
1955 /*
1956 * Access check is based on only
1957 * one of owner, group, public.
1958 * If not owner, then check group.
1959 * If not a member of the group, then
1960 * check public access.
1961 */
1962 if (crgetuid(cr) != ip->i_uid) {
1963 shift += 5;
1964 if (!groupmember((uid_t)ip->i_gid, cr))
1965 shift += 5;
1966 }
1967
1968 ret = secpolicy_vnode_access2(cr, ITOV(ip), ip->i_uid,
1969 UD2VA_PERM(ip->i_perm << shift), UD2VA_PERM(mode));
1970
1971 out:
1972 if (dolock)
1973 rw_exit(&ip->i_contents);
1974 return (ret);
1975 }
1976
1977 void
ud_imark(struct ud_inode * ip)1978 ud_imark(struct ud_inode *ip)
1979 {
1980 timestruc_t now;
1981
1982 gethrestime(&now);
1983 ud_printf("ud_imark\n");
1984 if (ip->i_flag & IACC) {
1985 ip->i_atime.tv_sec = now.tv_sec;
1986 ip->i_atime.tv_nsec = now.tv_nsec;
1987 }
1988 if (ip->i_flag & IUPD) {
1989 ip->i_mtime.tv_sec = now.tv_sec;
1990 ip->i_mtime.tv_nsec = now.tv_nsec;
1991 ip->i_flag |= IMODTIME;
1992 }
1993 if (ip->i_flag & ICHG) {
1994 ip->i_diroff = 0;
1995 ip->i_ctime.tv_sec = now.tv_sec;
1996 ip->i_ctime.tv_nsec = now.tv_nsec;
1997 }
1998 }
1999
2000
2001 void
ud_itimes_nolock(struct ud_inode * ip)2002 ud_itimes_nolock(struct ud_inode *ip)
2003 {
2004 ud_printf("ud_itimes_nolock\n");
2005
2006 if (ip->i_flag & (IUPD|IACC|ICHG)) {
2007 if (ip->i_flag & ICHG) {
2008 ip->i_flag |= IMOD;
2009 } else {
2010 ip->i_flag |= IMODACC;
2011 }
2012 ud_imark(ip);
2013 ip->i_flag &= ~(IACC|IUPD|ICHG);
2014 }
2015 }
2016
2017 void
ud_delcache(struct ud_inode * ip)2018 ud_delcache(struct ud_inode *ip)
2019 {
2020 ud_printf("ud_delcache\n");
2021
2022 mutex_enter(&ud_icache_lock);
2023 remque(ip);
2024 ip->i_forw = ip;
2025 ip->i_back = ip;
2026 mutex_exit(&ud_icache_lock);
2027 }
2028
2029 void
ud_idrop(struct ud_inode * ip)2030 ud_idrop(struct ud_inode *ip)
2031 {
2032 struct vnode *vp = ITOV(ip);
2033
2034 ASSERT(RW_WRITE_HELD(&ip->i_contents));
2035
2036 ud_printf("ud_idrop\n");
2037
2038 mutex_enter(&vp->v_lock);
2039 if (vp->v_count > 1) {
2040 vp->v_count--;
2041 mutex_exit(&vp->v_lock);
2042 return;
2043 }
2044 vp->v_count = 0;
2045 mutex_exit(&vp->v_lock);
2046
2047
2048 /*
2049 * if inode is invalid or there is no page associated with
2050 * this inode, put the inode in the front of the free list
2051 */
2052 mutex_enter(&ip->i_tlock);
2053 mutex_enter(&udf_ifree_lock);
2054 if (!vn_has_cached_data(vp) || ip->i_perm == 0) {
2055 ud_add_to_free_list(ip, UD_BEGIN);
2056 } else {
2057 /*
2058 * Otherwise, put the inode back on the end of the free list.
2059 */
2060 ud_add_to_free_list(ip, UD_END);
2061 }
2062 mutex_exit(&udf_ifree_lock);
2063 ip->i_flag &= IMODTIME;
2064 mutex_exit(&ip->i_tlock);
2065 }
2066
2067 void
ud_add_to_free_list(struct ud_inode * ip,uint32_t at)2068 ud_add_to_free_list(struct ud_inode *ip, uint32_t at)
2069 {
2070 ASSERT(ip);
2071 ASSERT(mutex_owned(&udf_ifree_lock));
2072
2073 #ifdef DEBUG
2074 /* Search if the element is already in the list */
2075 if (udf_ifreeh != NULL) {
2076 struct ud_inode *iq;
2077
2078 iq = udf_ifreeh;
2079 while (iq) {
2080 if (iq == ip) {
2081 cmn_err(CE_WARN, "Duplicate %p\n", (void *)ip);
2082 }
2083 iq = iq->i_freef;
2084 }
2085 }
2086 #endif
2087
2088 ip->i_freef = NULL;
2089 ip->i_freeb = NULL;
2090 if (udf_ifreeh == NULL) {
2091 /*
2092 * Nothing on the list just add it
2093 */
2094 udf_ifreeh = ip;
2095 udf_ifreet = ip;
2096 } else {
2097 if (at == UD_BEGIN) {
2098 /*
2099 * Add at the begining of the list
2100 */
2101 ip->i_freef = udf_ifreeh;
2102 udf_ifreeh->i_freeb = ip;
2103 udf_ifreeh = ip;
2104 } else {
2105 /*
2106 * Add at the end of the list
2107 */
2108 ip->i_freeb = udf_ifreet;
2109 udf_ifreet->i_freef = ip;
2110 udf_ifreet = ip;
2111 }
2112 }
2113 }
2114
2115 void
ud_remove_from_free_list(struct ud_inode * ip,uint32_t at)2116 ud_remove_from_free_list(struct ud_inode *ip, uint32_t at)
2117 {
2118 ASSERT(ip);
2119 ASSERT(mutex_owned(&udf_ifree_lock));
2120
2121 #ifdef DEBUG
2122 {
2123 struct ud_inode *iq;
2124 uint32_t found = 0;
2125
2126 iq = udf_ifreeh;
2127 while (iq) {
2128 if (iq == ip) {
2129 found++;
2130 }
2131 iq = iq->i_freef;
2132 }
2133 if (found != 1) {
2134 cmn_err(CE_WARN, "ip %p is found %x times\n",
2135 (void *)ip, found);
2136 }
2137 }
2138 #endif
2139
2140 if ((ip->i_freef == NULL) && (ip->i_freeb == NULL)) {
2141 if (ip != udf_ifreeh) {
2142 return;
2143 }
2144 }
2145
2146 if ((at == UD_BEGIN) || (ip == udf_ifreeh)) {
2147 udf_ifreeh = ip->i_freef;
2148 if (ip->i_freef == NULL) {
2149 udf_ifreet = NULL;
2150 } else {
2151 udf_ifreeh->i_freeb = NULL;
2152 }
2153 } else {
2154 ip->i_freeb->i_freef = ip->i_freef;
2155 if (ip->i_freef) {
2156 ip->i_freef->i_freeb = ip->i_freeb;
2157 } else {
2158 udf_ifreet = ip->i_freeb;
2159 }
2160 }
2161 ip->i_freef = NULL;
2162 ip->i_freeb = NULL;
2163 }
2164
2165 void
ud_init_inodes(void)2166 ud_init_inodes(void)
2167 {
2168 union ihead *ih = ud_ihead;
2169 int index;
2170
2171 #ifndef __lint
2172 _NOTE(NO_COMPETING_THREADS_NOW);
2173 #endif
2174 for (index = 0; index < UD_HASH_SZ; index++, ih++) {
2175 ih->ih_head[0] = ih;
2176 ih->ih_head[1] = ih;
2177 }
2178 mutex_init(&ud_icache_lock, NULL, MUTEX_DEFAULT, NULL);
2179 mutex_init(&ud_nino_lock, NULL, MUTEX_DEFAULT, NULL);
2180
2181 udf_ifreeh = NULL;
2182 udf_ifreet = NULL;
2183 mutex_init(&udf_ifree_lock, NULL, MUTEX_DEFAULT, NULL);
2184
2185 mutex_init(&ud_sync_busy, NULL, MUTEX_DEFAULT, NULL);
2186 udf_vfs_instances = NULL;
2187 mutex_init(&udf_vfs_mutex, NULL, MUTEX_DEFAULT, NULL);
2188
2189 #ifndef __lint
2190 _NOTE(COMPETING_THREADS_NOW);
2191 #endif
2192 }
2193