xref: /titanic_50/usr/src/uts/common/fs/udfs/udf_alloc.c (revision 7eea693d6b672899726e75993fddc4e95b52647f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/t_lock.h>
31 #include <sys/param.h>
32 #include <sys/time.h>
33 #include <sys/systm.h>
34 #include <sys/sysmacros.h>
35 #include <sys/resource.h>
36 #include <sys/signal.h>
37 #include <sys/cred.h>
38 #include <sys/user.h>
39 #include <sys/buf.h>
40 #include <sys/vfs.h>
41 #include <sys/stat.h>
42 #include <sys/vnode.h>
43 #include <sys/mode.h>
44 #include <sys/proc.h>
45 #include <sys/disp.h>
46 #include <sys/file.h>
47 #include <sys/fcntl.h>
48 #include <sys/flock.h>
49 #include <sys/kmem.h>
50 #include <sys/uio.h>
51 #include <sys/dnlc.h>
52 #include <sys/conf.h>
53 #include <sys/errno.h>
54 #include <sys/mman.h>
55 #include <sys/fbuf.h>
56 #include <sys/pathname.h>
57 #include <sys/debug.h>
58 #include <sys/vmsystm.h>
59 #include <sys/cmn_err.h>
60 #include <sys/dirent.h>
61 #include <sys/errno.h>
62 #include <sys/modctl.h>
63 #include <sys/statvfs.h>
64 #include <sys/mount.h>
65 #include <sys/sunddi.h>
66 #include <sys/bootconf.h>
67 #include <sys/policy.h>
68 
69 #include <vm/hat.h>
70 #include <vm/page.h>
71 #include <vm/pvn.h>
72 #include <vm/as.h>
73 #include <vm/seg.h>
74 #include <vm/seg_map.h>
75 #include <vm/seg_kmem.h>
76 #include <vm/seg_vn.h>
77 #include <vm/rm.h>
78 #include <vm/page.h>
79 #include <sys/swap.h>
80 
81 #include <fs/fs_subr.h>
82 
83 #include <sys/fs/udf_volume.h>
84 #include <sys/fs/udf_inode.h>
85 
86 #ifdef	DEBUG
87 extern struct ud_inode *ud_search_icache(struct vfs *, uint16_t, uint32_t);
88 #endif
89 
90 int32_t ud_alloc_space_bmap(struct vfs *, struct ud_part *,
91 	uint32_t, uint32_t, uint32_t *, uint32_t *, int32_t);
92 int32_t ud_check_free_and_mark_used(struct vfs *,
93 	struct ud_part *, uint32_t, uint32_t *);
94 int32_t ud_check_free(uint8_t *, uint8_t *, uint32_t, uint32_t);
95 void ud_mark_used(uint8_t *, uint32_t, uint32_t);
96 void ud_mark_free(uint8_t *, uint32_t, uint32_t);
97 int32_t ud_alloc_space_stbl(struct vfs *, struct ud_part *,
98 	uint32_t, uint32_t, uint32_t *, uint32_t *, int32_t);
99 int32_t ud_free_space_bmap(struct vfs *,
100 	struct ud_part *, uint32_t, uint32_t);
101 int32_t ud_free_space_stbl(struct vfs *,
102 	struct ud_part *, uint32_t, uint32_t);
103 
104 
105 /*
106  * WORKSAROUND to the buffer cache crap
107  * If the requested block exists in the buffer cache
108  * buffer cache does not care about the count
109  * it just returns the old buffer(does not even
110  * set resid value). Same problem exists if the
111  * block that is requested is not the first block
112  * in the cached buffer then this will return
113  * a different buffer. We work around the above by
114  * using a fixed size request to the buffer cache
115  * all the time. This is currently udf_lbsize.
116  * (Actually it is restricted to udf_lbsize
117  * because iget always does udf_lbsize requests)
118  */
119 
120 
121 /*
122  * allocate blkcount blocks continuously
123  * near "proximity" block in partion defined by prn.
124  * if proximity != 0 means less_is_ok = 0
125  * return the starting block no and count
126  * of blocks allocated in start_blkno & size
127  * if less_is_ok == 0 then allocate only if
128  * entire requirement can be met.
129  */
130 int32_t
131 ud_alloc_space(struct vfs *vfsp, uint16_t prn,
132 	uint32_t proximity, uint32_t blkcount,
133 	uint32_t *start_blkno, uint32_t *size,
134 	int32_t less_is_ok, int32_t metadata)
135 {
136 	int32_t i, error = 0;
137 	struct udf_vfs *udf_vfsp;
138 	struct ud_part *ud_part;
139 
140 	ud_printf("ud_alloc_space\n");
141 
142 
143 /*
144  * prom_printf("ud_alloc_space %x %x %x %x\n",
145  * proximity, blkcount, less_is_ok, metadata);
146  */
147 
148 	if (blkcount == 0) {
149 		*start_blkno = 0;
150 		*size = 0;
151 		return (0);
152 	}
153 
154 	udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
155 	ud_part = udf_vfsp->udf_parts;
156 	for (i = 0; i < udf_vfsp->udf_npart; i++) {
157 		if (prn == ud_part->udp_number) {
158 			break;
159 		}
160 		ud_part ++;
161 	}
162 
163 	if (i == udf_vfsp->udf_npart) {
164 		return (1);
165 	}
166 	*start_blkno = 0;
167 	*size = 0;
168 	if (metadata) {
169 		error = ud_alloc_from_cache(udf_vfsp, ud_part, start_blkno);
170 		if (error == 0) {
171 			*size = 1;
172 			return (0);
173 		}
174 	}
175 	if (ud_part->udp_nfree != 0) {
176 		if (ud_part->udp_flags == UDP_BITMAPS) {
177 			error = ud_alloc_space_bmap(vfsp, ud_part, proximity,
178 				blkcount, start_blkno, size, less_is_ok);
179 		} else {
180 			error = ud_alloc_space_stbl(vfsp, ud_part, proximity,
181 				blkcount, start_blkno, size, less_is_ok);
182 		}
183 		if (error == 0) {
184 			mutex_enter(&udf_vfsp->udf_lock);
185 			ASSERT(ud_part->udp_nfree >= *size);
186 			ASSERT(udf_vfsp->udf_freeblks >= *size);
187 			ud_part->udp_nfree -= *size;
188 			udf_vfsp->udf_freeblks -= *size;
189 			mutex_exit(&udf_vfsp->udf_lock);
190 		}
191 	} else {
192 		error = ENOSPC;
193 	}
194 /*
195  * prom_printf("end %x %x %x\n", error, *start_blkno, *size);
196  */
197 
198 	return (error);
199 }
200 
201 #ifdef	SKIP_USED_BLOCKS
202 /*
203  * This table is manually constructed
204  */
205 int8_t skip[256] = {
206 8, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
207 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
208 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
209 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
210 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
211 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
212 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
213 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
214 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
215 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
216 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
217 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
218 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
219 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
220 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
221 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
222 };
223 #endif
224 
225 #define	HDR_BLKS	(24 * 8)
226 
227 int32_t
228 ud_alloc_space_bmap(struct vfs *vfsp,
229 	struct ud_part *ud_part, uint32_t proximity,
230 	uint32_t blkcount, uint32_t *start_blkno,
231 	uint32_t *size, int32_t less_is_ok)
232 {
233 	struct buf *bp = NULL;
234 	struct udf_vfs *udf_vfsp;
235 	uint32_t old_loc, old_size, new_size;
236 	uint8_t *addr, *eaddr;
237 	uint32_t loop_count, loop_begin, loop_end;
238 	uint32_t bno, begin, dummy, temp, lbsz, bb_count;
239 	uint32_t bblk = 0, eblk = 0;
240 	int32_t fragmented;
241 
242 	ud_printf("ud_alloc_space_bmap\n");
243 
244 	ASSERT(ud_part);
245 	ASSERT(ud_part->udp_flags == UDP_BITMAPS);
246 
247 	if (ud_part->udp_unall_len == 0) {
248 		return (ENOSPC);
249 	}
250 	udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
251 	lbsz = udf_vfsp->udf_lbsize;
252 	bb_count = udf_vfsp->udf_lbsize << 3;
253 
254 	if (proximity != 0) {
255 		/*
256 		 * directly try allocating
257 		 * at proximity
258 		 */
259 		temp = blkcount;
260 		if (ud_check_free_and_mark_used(vfsp,
261 				ud_part, proximity, &temp) == 0) {
262 			if (temp != 0) {
263 				*start_blkno = proximity;
264 				*size = temp;
265 				return (0);
266 			}
267 		}
268 		*start_blkno = 0;
269 		*size = 0;
270 	}
271 
272 	mutex_enter(&udf_vfsp->udf_lock);
273 	fragmented = udf_vfsp->udf_fragmented;
274 	mutex_exit(&udf_vfsp->udf_lock);
275 retry:
276 	old_loc = old_size = 0;
277 
278 	mutex_enter(&udf_vfsp->udf_lock);
279 	loop_begin = (ud_part->udp_last_alloc + CLSTR_MASK) & ~CLSTR_MASK;
280 	mutex_exit(&udf_vfsp->udf_lock);
281 
282 	loop_end = ud_part->udp_nblocks + HDR_BLKS;
283 	loop_count = (loop_begin) ? 2 : 1;
284 	while (loop_count--) {
285 		for (bno = loop_begin + HDR_BLKS; bno + blkcount < loop_end; ) {
286 
287 
288 			/*
289 			 * Each bread is restricted to lbsize
290 			 * due to the way bread is implemented
291 			 */
292 			if ((bp == NULL) ||
293 				((eblk - bno) < blkcount)) {
294 				if (bp != NULL) {
295 					brelse(bp);
296 				}
297 				begin = ud_part->udp_unall_loc +
298 						bno / bb_count;
299 				bp = ud_bread(vfsp->vfs_dev,
300 					ud_xlate_to_daddr(udf_vfsp,
301 						ud_part->udp_number,
302 						begin, 1, &dummy)
303 					<< udf_vfsp->udf_l2d_shift, lbsz);
304 				if (bp->b_flags & B_ERROR) {
305 					brelse(bp);
306 					return (EIO);
307 				}
308 				bblk = begin * bb_count;
309 				eblk = bblk + bb_count;
310 				addr = (uint8_t *)bp->b_un.b_addr;
311 				eaddr = addr + bp->b_bcount;
312 			}
313 
314 			if (blkcount > (eblk - bno)) {
315 				temp = eblk - bno;
316 			} else {
317 				temp = blkcount;
318 			}
319 			if ((new_size = ud_check_free(addr, eaddr,
320 					bno - bblk, temp)) == temp) {
321 				ud_mark_used(addr, bno - bblk, temp);
322 				bdwrite(bp);
323 				*start_blkno = bno - HDR_BLKS;
324 				*size = temp;
325 				mutex_enter(&udf_vfsp->udf_lock);
326 				ud_part->udp_last_alloc =
327 					bno + temp - HDR_BLKS;
328 				mutex_exit(&udf_vfsp->udf_lock);
329 				return (0);
330 			}
331 			if (less_is_ok) {
332 				if (old_size < new_size) {
333 					old_loc = bno - HDR_BLKS;
334 					old_size = new_size;
335 				}
336 			}
337 			if (new_size != 0) {
338 				bno += new_size;
339 			} else {
340 #ifdef	SKIP_USED_BLOCKS
341 				/*
342 				 * Skipping 0's
343 				 * implement a allocated block skip
344 				 * using a while loop with an
345 				 * preinitialised array of 256 elements
346 				 * for number of blocks skipped
347 				 */
348 				bno &= ~3;
349 				while (skip[addr[(bno - bblk) >> 3]] == 8)
350 					bno += 8;
351 				bno += skip[addr[(bno - bblk) >> 3]];
352 #else
353 				bno++;
354 #endif
355 			}
356 			if (!fragmented) {
357 				bno = (bno + CLSTR_MASK) & ~CLSTR_MASK;
358 			}
359 		}
360 		if (bp != NULL) {
361 			brelse(bp);
362 			bp = NULL;
363 		}
364 		if (loop_count) {
365 			loop_end = loop_begin + HDR_BLKS;
366 			loop_begin = 0;
367 		}
368 	}
369 	if ((old_size == 0) && (!fragmented)) {
370 		mutex_enter(&udf_vfsp->udf_lock);
371 		fragmented = udf_vfsp->udf_fragmented = 1;
372 		mutex_exit(&udf_vfsp->udf_lock);
373 		goto retry;
374 	}
375 	if (less_is_ok && (old_size != 0)) {
376 
377 		/*
378 		 * Check once again
379 		 * somebody else might have
380 		 * already allocated behind us
381 		 */
382 		if (ud_check_free_and_mark_used(vfsp,
383 				ud_part, old_loc, &old_size) == 0) {
384 			if (old_size != 0) {
385 				*start_blkno = old_loc;
386 				*size = old_size;
387 				mutex_enter(&udf_vfsp->udf_lock);
388 				ud_part->udp_last_alloc = old_loc + old_size;
389 				mutex_exit(&udf_vfsp->udf_lock);
390 				return (0);
391 			}
392 		}
393 
394 		/*
395 		 * Failed what ever the reason
396 		 */
397 		goto retry;
398 	}
399 	return (ENOSPC);
400 }
401 
402 /*
403  * start is the block from the begining
404  * of the partition ud_part
405  */
406 int32_t
407 ud_check_free_and_mark_used(struct vfs *vfsp,
408 	struct ud_part *ud_part, uint32_t start, uint32_t *count)
409 {
410 	struct buf *bp;
411 	struct udf_vfs *udf_vfsp;
412 	uint32_t begin, dummy, bb_count;
413 
414 	/*
415 	 * Adjust start for the header
416 	 */
417 	start += HDR_BLKS;
418 	udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
419 	bb_count = udf_vfsp->udf_lbsize << 3;
420 
421 	/*
422 	 * Read just on block worth of bitmap
423 	 */
424 	begin = ud_part->udp_unall_loc + (start / bb_count);
425 	bp = ud_bread(vfsp->vfs_dev,
426 		ud_xlate_to_daddr(udf_vfsp, ud_part->udp_number,
427 			begin, 1, &dummy) << udf_vfsp->udf_l2d_shift,
428 			udf_vfsp->udf_lbsize);
429 	if (bp->b_flags & B_ERROR) {
430 		brelse(bp);
431 		return (EIO);
432 	}
433 
434 	/*
435 	 * Adjust the count if necessary
436 	 */
437 	start -= begin * bb_count;
438 	if ((start + *count) > bb_count) {
439 		*count = bb_count - start;
440 		ASSERT(*count > 0);
441 	}
442 	if (ud_check_free((uint8_t *)bp->b_un.b_addr,
443 			(uint8_t *)bp->b_un.b_addr + bp->b_bcount,
444 			start, *count) != *count) {
445 		brelse(bp);
446 		return (1);
447 	}
448 	ud_mark_used((uint8_t *)bp->b_un.b_addr, start, *count);
449 	bdwrite(bp);
450 
451 	return (0);
452 }
453 
454 int32_t
455 ud_check_free(uint8_t *addr, uint8_t *eaddr, uint32_t start, uint32_t count)
456 {
457 	int32_t i = 0;
458 
459 	for (i = 0; i < count; i++) {
460 		if (&addr[start >> 3] >= eaddr) {
461 			break;
462 		}
463 		if ((addr[start >> 3] & (1 << (start & 0x7))) == 0) {
464 			break;
465 		}
466 		start ++;
467 	}
468 	return (i);
469 }
470 
471 void
472 ud_mark_used(uint8_t *addr, uint32_t start, uint32_t count)
473 {
474 	int32_t i = 0;
475 
476 	for (i = 0; i < count; i++) {
477 		addr[start >> 3] &= ~(1 << (start & 0x7));
478 		start++;
479 	}
480 }
481 
482 void
483 ud_mark_free(uint8_t *addr, uint32_t start, uint32_t count)
484 {
485 	int32_t i = 0;
486 
487 	for (i = 0; i < count; i++) {
488 		addr[start >> 3] |= (1 << (start & 0x7));
489 		start++;
490 	}
491 }
492 
493 /* ARGSUSED */
494 int32_t
495 ud_alloc_space_stbl(struct vfs *vfsp,
496 	struct ud_part *ud_part, uint32_t proximity,
497 	uint32_t blkcount, uint32_t *start_blkno,
498 	uint32_t *size, int32_t less_is_ok)
499 {
500 	uint16_t adesc;
501 	uint32_t temp, sz;
502 	int32_t error, index, count, larg_index, larg_sz;
503 	struct buf *bp;
504 	struct udf_vfs *udf_vfsp;
505 	struct unall_space_ent *use;
506 
507 	ASSERT(ud_part);
508 	ASSERT(ud_part->udp_flags == UDP_SPACETBLS);
509 
510 	ud_printf("ud_alloc_space_stbl\n");
511 
512 	if (ud_part->udp_unall_len == 0) {
513 		return (ENOSPC);
514 	}
515 
516 	udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
517 	ASSERT((ud_part->udp_unall_len + 40) <= udf_vfsp->udf_lbsize);
518 
519 	bp = ud_bread(vfsp->vfs_dev,
520 			ud_xlate_to_daddr(udf_vfsp, ud_part->udp_number,
521 				ud_part->udp_unall_loc, 1, &temp),
522 			udf_vfsp->udf_lbsize);
523 
524 	use = (struct unall_space_ent *)bp->b_un.b_addr;
525 	sz = SWAP_32(use->use_len_ad);
526 	adesc = SWAP_16(use->use_icb_tag.itag_flags) & 0x7;
527 	if (adesc == ICB_FLAG_SHORT_AD) {
528 		struct short_ad *sad;
529 
530 		sad = (struct short_ad *)use->use_ad;
531 		count = sz / sizeof (struct short_ad);
532 
533 		/*
534 		 * Search the entire list for
535 		 * a extent which can give the entire data
536 		 * Do only first fit
537 		 */
538 		larg_index = larg_sz = 0;
539 		for (index = 0; index < count; index++, sad++) {
540 			temp = SWAP_32(sad->sad_ext_len) >>
541 					udf_vfsp->udf_l2b_shift;
542 			if (temp == blkcount) {
543 				/*
544 				 * We found the right fit
545 				 * return the values and
546 				 * compress the table
547 				 */
548 				less_is_ok = 1;
549 				larg_index = index;
550 				larg_sz = temp;
551 				goto compress_sad;
552 			} else if (temp > blkcount) {
553 				/*
554 				 * We found an entry larger than the
555 				 * requirement. Change the start block
556 				 * number and the count to reflect the
557 				 * allocation
558 				 */
559 				*start_blkno = SWAP_32(sad->sad_ext_loc);
560 				*size = blkcount;
561 				temp = (temp - blkcount) <<
562 					udf_vfsp->udf_l2b_shift;
563 				sad->sad_ext_len = SWAP_32(temp);
564 				temp = SWAP_32(sad->sad_ext_loc) + blkcount;
565 				sad->sad_ext_loc = SWAP_32(temp);
566 				goto end;
567 			}
568 			/*
569 			 * Let us keep track of the largest
570 			 * extent available if less_is_ok.
571 			 */
572 			if (less_is_ok) {
573 				if (temp > larg_sz) {
574 					larg_sz = temp;
575 					larg_index = index;
576 				}
577 			}
578 		}
579 compress_sad:
580 		if ((less_is_ok) &&
581 			(larg_sz != 0)) {
582 			/*
583 			 * If we came here we could
584 			 * not find a extent to cover the entire size
585 			 * return whatever could be allocated
586 			 * and compress the table
587 			 */
588 			sad = (struct short_ad *)use->use_ad;
589 			sad += larg_index;
590 			*start_blkno = SWAP_32(sad->sad_ext_loc);
591 			*size = larg_sz;
592 			for (index = larg_index; index < count;
593 					index++, sad++) {
594 				*sad = *(sad+1);
595 			}
596 			sz -= sizeof (struct short_ad);
597 			use->use_len_ad = SWAP_32(sz);
598 		} else {
599 			error = ENOSPC;
600 		}
601 		goto end;
602 	} else if (adesc == ICB_FLAG_LONG_AD) {
603 		struct long_ad *lad;
604 
605 		lad = (struct long_ad *)use->use_ad;
606 		count = sz / sizeof (struct long_ad);
607 
608 		/*
609 		 * Search the entire list for
610 		 * a extent which can give the entire data
611 		 * Do only first fit
612 		 */
613 		larg_index = larg_sz = 0;
614 		for (index = 0; index < count; index++, lad++) {
615 			temp = SWAP_32(lad->lad_ext_len) >>
616 					udf_vfsp->udf_l2b_shift;
617 			if (temp == blkcount) {
618 				/*
619 				 * We found the right fit
620 				 * return the values and
621 				 * compress the table
622 				 */
623 				less_is_ok = 1;
624 				larg_index = index;
625 				larg_sz = temp;
626 				goto compress_lad;
627 			} else if (temp > blkcount) {
628 				/*
629 				 * We found an entry larger than the
630 				 * requirement. Change the start block
631 				 * number and the count to reflect the
632 				 * allocation
633 				 */
634 				*start_blkno = SWAP_32(lad->lad_ext_loc);
635 				*size = blkcount;
636 				temp = (temp - blkcount) <<
637 					udf_vfsp->udf_l2b_shift;
638 				lad->lad_ext_len = SWAP_32(temp);
639 				temp = SWAP_32(lad->lad_ext_loc) + blkcount;
640 				lad->lad_ext_loc = SWAP_32(temp);
641 				goto end;
642 			}
643 			/*
644 			 * Let us keep track of the largest
645 			 * extent available if less_is_ok.
646 			 */
647 			if (less_is_ok) {
648 				if (temp > larg_sz) {
649 					larg_sz = temp;
650 					larg_index = index;
651 				}
652 			}
653 		}
654 compress_lad:
655 		if ((less_is_ok) &&
656 			(larg_sz != 0)) {
657 			/*
658 			 * If we came here we could
659 			 * not find a extent to cover the entire size
660 			 * return whatever could be allocated
661 			 * and compress the table
662 			 */
663 			lad = (struct long_ad *)use->use_ad;
664 			lad += larg_index;
665 			*start_blkno = SWAP_32(lad->lad_ext_loc);
666 			*size = larg_sz;
667 			for (index = larg_index; index < count;
668 					index++, lad++) {
669 				*lad = *(lad+1);
670 			}
671 			sz -= sizeof (struct long_ad);
672 			use->use_len_ad = SWAP_32(sz);
673 		} else {
674 			error = ENOSPC;
675 		}
676 		goto end;
677 	} else {
678 		error = ENOSPC;
679 	}
680 end:
681 	if (!error) {
682 		bdwrite(bp);
683 	} else {
684 		brelse(bp);
685 	}
686 	return (error);
687 }
688 
689 
690 /*
691  * release blkcount blocks starting from beginblk
692  * Call appropriate bmap/space table fucntions
693  */
694 void
695 ud_free_space(struct vfs *vfsp, uint16_t prn,
696 	uint32_t beginblk, uint32_t blkcount)
697 {
698 	int32_t i, error;
699 	struct ud_part *ud_part;
700 	struct udf_vfs *udf_vfsp;
701 
702 	ud_printf("ud_free_space\n");
703 
704 	if (blkcount == 0) {
705 		return;
706 	}
707 
708 	udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
709 	ud_part = udf_vfsp->udf_parts;
710 	for (i = 0; i < udf_vfsp->udf_npart; i++) {
711 		if (prn == ud_part->udp_number) {
712 			break;
713 		}
714 		ud_part ++;
715 	}
716 
717 	if (i == udf_vfsp->udf_npart) {
718 		return;
719 	}
720 
721 	if (ud_part->udp_flags == UDP_BITMAPS) {
722 		error = ud_free_space_bmap(vfsp, ud_part, beginblk, blkcount);
723 	} else {
724 		error = ud_free_space_stbl(vfsp, ud_part, beginblk, blkcount);
725 	}
726 
727 	if (error) {
728 		udf_vfsp->udf_mark_bad = 1;
729 	}
730 }
731 
732 /*
733  * If there is a freed table then
734  * release blocks to the freed table
735  * other wise release to the un allocated table.
736  * Findout the offset into the bitmap and
737  * mark the blocks as free blocks
738  */
739 int32_t
740 ud_free_space_bmap(struct vfs *vfsp,
741 	struct ud_part *ud_part,
742 	uint32_t beginblk, uint32_t blkcount)
743 {
744 	struct buf *bp;
745 	struct udf_vfs *udf_vfsp;
746 	uint32_t block, begin, end, blkno, count, map_end_blk, dummy;
747 
748 	ud_printf("ud_free_space_bmap\n");
749 
750 	ASSERT(ud_part);
751 	ASSERT(ud_part->udp_flags == UDP_BITMAPS);
752 /*
753  * prom_printf("%x %x\n", udblock, udcount);
754  */
755 
756 	udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
757 	if ((ud_part->udp_freed_len == 0) &&
758 		(ud_part->udp_unall_len == 0)) {
759 		return (ENOSPC);
760 	}
761 	/*
762 	 * decide unallocated/freed table to use
763 	 */
764 	if (ud_part->udp_freed_len == 0) {
765 		begin = ud_part->udp_unall_loc;
766 		map_end_blk = ud_part->udp_unall_len << 3;
767 	} else {
768 		begin = ud_part->udp_freed_loc;
769 		map_end_blk = ud_part->udp_freed_len << 3;
770 	}
771 
772 	if (beginblk + blkcount > map_end_blk) {
773 		return (ENOSPC);
774 	}
775 
776 	/* adjust for the bitmap header */
777 	beginblk += HDR_BLKS;
778 
779 	end = begin + ((beginblk + blkcount) / (udf_vfsp->udf_lbsize << 3));
780 	begin += (beginblk / (udf_vfsp->udf_lbsize << 3));
781 
782 	for (block = begin; block <= end; block++) {
783 
784 		bp = ud_bread(vfsp->vfs_dev,
785 			ud_xlate_to_daddr(udf_vfsp,
786 				ud_part->udp_number, block, 1, &dummy)
787 				<< udf_vfsp->udf_l2d_shift,
788 			udf_vfsp->udf_lbsize);
789 		if (bp->b_flags & B_ERROR) {
790 			brelse(bp);
791 			return (EIO);
792 		}
793 		ASSERT(dummy == 1);
794 
795 		mutex_enter(&udf_vfsp->udf_lock);
796 
797 		/*
798 		 * add freed blocks to the bitmap
799 		 */
800 
801 		blkno = beginblk - (block * (udf_vfsp->udf_lbsize << 3));
802 		if (blkno + blkcount > (udf_vfsp->udf_lbsize << 3)) {
803 			count = (udf_vfsp->udf_lbsize << 3) - blkno;
804 		} else {
805 			count = blkcount;
806 		}
807 
808 /*
809  * if (begin != end) {
810  *	printf("%x %x %x %x %x %x\n",
811  *		begin, end, block, blkno, count);
812  *	printf("%x %x %x\n", bp->b_un.b_addr, blkno, count);
813  * }
814  */
815 
816 		ud_mark_free((uint8_t *)bp->b_un.b_addr, blkno, count);
817 
818 		beginblk += count;
819 		blkcount -= count;
820 
821 		if (ud_part->udp_freed_len == 0) {
822 			ud_part->udp_nfree += count;
823 			udf_vfsp->udf_freeblks += count;
824 		}
825 		mutex_exit(&udf_vfsp->udf_lock);
826 
827 		bdwrite(bp);
828 	}
829 
830 	return (0);
831 }
832 
833 
834 /* ARGSUSED */
835 /*
836  * search the entire table if there is
837  * a entry with which we can merge the
838  * current entry. Other wise create
839  * a new entry at the end of the table
840  */
841 int32_t
842 ud_free_space_stbl(struct vfs *vfsp,
843 	struct ud_part *ud_part,
844 	uint32_t beginblk, uint32_t blkcount)
845 {
846 	uint16_t adesc;
847 	int32_t error = 0, index, count;
848 	uint32_t block, dummy, sz;
849 	struct buf *bp;
850 	struct udf_vfs *udf_vfsp;
851 	struct unall_space_ent *use;
852 
853 	ud_printf("ud_free_space_stbl\n");
854 
855 	ASSERT(ud_part);
856 	ASSERT(ud_part->udp_flags == UDP_SPACETBLS);
857 
858 	if ((ud_part->udp_freed_len == 0) &&
859 		(ud_part->udp_unall_len == 0)) {
860 		return (ENOSPC);
861 	}
862 
863 	if (ud_part->udp_freed_len != 0) {
864 		block = ud_part->udp_freed_loc;
865 	} else {
866 		block = ud_part->udp_unall_loc;
867 	}
868 
869 	udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
870 	ASSERT((ud_part->udp_unall_len + 40) <= udf_vfsp->udf_lbsize);
871 
872 	bp = ud_bread(vfsp->vfs_dev,
873 			ud_xlate_to_daddr(udf_vfsp, ud_part->udp_number,
874 				block, 1, &dummy), udf_vfsp->udf_lbsize);
875 
876 	use = (struct unall_space_ent *)bp->b_un.b_addr;
877 	sz = SWAP_32(use->use_len_ad);
878 	adesc = SWAP_16(use->use_icb_tag.itag_flags) & 0x7;
879 	if (adesc == ICB_FLAG_SHORT_AD) {
880 		struct short_ad *sad;
881 
882 		sad = (struct short_ad *)use->use_ad;
883 		count = sz / sizeof (struct short_ad);
884 		/*
885 		 * Check if the blocks being freed
886 		 * are continuous with any of the
887 		 * existing extents
888 		 */
889 		for (index = 0; index < count; index++, sad++) {
890 			if (beginblk == (SWAP_32(sad->sad_ext_loc) +
891 					(SWAP_32(sad->sad_ext_len) /
892 					udf_vfsp->udf_lbsize))) {
893 				dummy = SWAP_32(sad->sad_ext_len) +
894 					blkcount * udf_vfsp->udf_lbsize;
895 				sad->sad_ext_len = SWAP_32(dummy);
896 				goto end;
897 			} else if ((beginblk + blkcount) ==
898 					SWAP_32(sad->sad_ext_loc)) {
899 				sad->sad_ext_loc = SWAP_32(beginblk);
900 				goto end;
901 			}
902 		}
903 
904 		/*
905 		 * We need to add a new entry
906 		 * Check if we space.
907 		 */
908 		if ((40 + sz + sizeof (struct short_ad)) >
909 				udf_vfsp->udf_lbsize) {
910 			error = ENOSPC;
911 			goto end;
912 		}
913 
914 		/*
915 		 * We have enough space
916 		 * just add the entry at the end
917 		 */
918 		dummy = SWAP_32(use->use_len_ad);
919 		sad = (struct short_ad *)&use->use_ad[dummy];
920 		sz = blkcount * udf_vfsp->udf_lbsize;
921 		sad->sad_ext_len = SWAP_32(sz);
922 		sad->sad_ext_loc = SWAP_32(beginblk);
923 		dummy += sizeof (struct short_ad);
924 		use->use_len_ad = SWAP_32(dummy);
925 	} else if (adesc == ICB_FLAG_LONG_AD) {
926 		struct long_ad *lad;
927 
928 		lad = (struct long_ad *)use->use_ad;
929 		count = sz / sizeof (struct long_ad);
930 		/*
931 		 * Check if the blocks being freed
932 		 * are continuous with any of the
933 		 * existing extents
934 		 */
935 		for (index = 0; index < count; index++, lad++) {
936 			if (beginblk == (SWAP_32(lad->lad_ext_loc) +
937 					(SWAP_32(lad->lad_ext_len) /
938 					udf_vfsp->udf_lbsize))) {
939 				dummy = SWAP_32(lad->lad_ext_len) +
940 					blkcount * udf_vfsp->udf_lbsize;
941 				lad->lad_ext_len = SWAP_32(dummy);
942 				goto end;
943 			} else if ((beginblk + blkcount) ==
944 					SWAP_32(lad->lad_ext_loc)) {
945 				lad->lad_ext_loc = SWAP_32(beginblk);
946 				goto end;
947 			}
948 		}
949 
950 		/*
951 		 * We need to add a new entry
952 		 * Check if we space.
953 		 */
954 		if ((40 + sz + sizeof (struct long_ad)) >
955 				udf_vfsp->udf_lbsize) {
956 			error = ENOSPC;
957 			goto end;
958 		}
959 
960 		/*
961 		 * We have enough space
962 		 * just add the entry at the end
963 		 */
964 		dummy = SWAP_32(use->use_len_ad);
965 		lad = (struct long_ad *)&use->use_ad[dummy];
966 		sz = blkcount * udf_vfsp->udf_lbsize;
967 		lad->lad_ext_len = SWAP_32(sz);
968 		lad->lad_ext_loc = SWAP_32(beginblk);
969 		lad->lad_ext_prn = SWAP_16(ud_part->udp_number);
970 		dummy += sizeof (struct long_ad);
971 		use->use_len_ad = SWAP_32(dummy);
972 	} else {
973 		error = ENOSPC;
974 		goto end;
975 	}
976 
977 end:
978 	if (!error) {
979 		bdwrite(bp);
980 	} else {
981 		brelse(bp);
982 	}
983 	return (error);
984 }
985 
986 /* ARGSUSED */
987 int32_t
988 ud_ialloc(struct ud_inode *pip,
989 	struct ud_inode **ipp, struct vattr *vap, struct cred *cr)
990 {
991 	int32_t err;
992 	uint32_t blkno, size, loc;
993 	uint32_t imode, ichar, lbsize, ea_len, dummy;
994 	uint16_t prn, flags;
995 	struct buf *bp;
996 	struct file_entry *fe;
997 	struct timespec32 time;
998 	struct timespec32 settime;
999 	struct icb_tag *icb;
1000 	struct ext_attr_hdr *eah;
1001 	struct dev_spec_ear *ds;
1002 	struct udf_vfs *udf_vfsp;
1003 	timestruc_t now;
1004 	uid_t uid;
1005 	gid_t gid;
1006 
1007 
1008 	ASSERT(pip);
1009 	ASSERT(vap != NULL);
1010 
1011 	ud_printf("ud_ialloc\n");
1012 
1013 	if (((vap->va_mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
1014 	    ((vap->va_mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime)))
1015 		return (EOVERFLOW);
1016 
1017 	udf_vfsp = pip->i_udf;
1018 	lbsize = udf_vfsp->udf_lbsize;
1019 	prn = pip->i_icb_prn;
1020 
1021 	if ((err = ud_alloc_space(pip->i_vfs, prn,
1022 			0, 1, &blkno, &size, 0, 1)) != 0) {
1023 		return (err);
1024 	}
1025 	loc = ud_xlate_to_daddr(udf_vfsp, prn, blkno, 1, &dummy);
1026 	ASSERT(dummy == 1);
1027 
1028 	bp = ud_bread(pip->i_dev, loc << udf_vfsp->udf_l2d_shift, lbsize);
1029 	if (bp->b_flags & B_ERROR) {
1030 		ud_free_space(pip->i_vfs, prn, blkno, size);
1031 		return (EIO);
1032 	}
1033 	bzero(bp->b_un.b_addr, bp->b_bcount);
1034 	fe = (struct file_entry *)bp->b_un.b_addr;
1035 
1036 	uid = crgetuid(cr);
1037 	fe->fe_uid = SWAP_32(uid);
1038 
1039 	/*
1040 	 * To determine the group-id of the created file:
1041 	 * 1) If the gid is set in the attribute list (non-Sun & pre-4.0
1042 	 *	clients are not likely to set the gid), then use it if
1043 	 *	the process is privileged, belongs to the target group,
1044 	 *	or the group is the same as the parent directory.
1045 	 * 2) If the filesystem was not mounted with the Old-BSD-compatible
1046 	 *	GRPID option, and the directory's set-gid bit is clear,
1047 	 *	then use the process's gid.
1048 	 * 3) Otherwise, set the group-id to the gid of the parent directory.
1049 	 */
1050 	if ((vap->va_mask & AT_GID) &&
1051 		((vap->va_gid == pip->i_gid) || groupmember(vap->va_gid, cr) ||
1052 		secpolicy_vnode_create_gid(cr) == 0)) {
1053 		/*
1054 		 * XXX - is this only the case when a 4.0 NFS client, or a
1055 		 * client derived from that code, makes a call over the wire?
1056 		 */
1057 		fe->fe_gid = SWAP_32(vap->va_gid);
1058 	} else {
1059 		gid = crgetgid(cr);
1060 		fe->fe_gid = (pip->i_char & ISGID) ?
1061 				SWAP_32(pip->i_gid) : SWAP_32(gid);
1062 	}
1063 
1064 	imode = MAKEIMODE(vap->va_type, vap->va_mode);
1065 	ichar = imode & (VSUID | VSGID | VSVTX);
1066 	imode = UD_UPERM2DPERM(imode);
1067 
1068 	/*
1069 	 * Under solaris only the owner can
1070 	 * change the attributes of files so set
1071 	 * the change attribute bit only for user
1072 	 */
1073 	imode |= IATTR;
1074 
1075 	/*
1076 	 * File delete permissions on Solaris are
1077 	 * the permissions on the directory but not the file
1078 	 * when we create a file just inherit the directorys
1079 	 * write permission to be the file delete permissions
1080 	 * Atleast we will be consistent in the files we create
1081 	 */
1082 	imode |= (pip->i_perm & (IWRITE | IWRITE >> 5 | IWRITE >> 10)) << 3;
1083 
1084 	fe->fe_perms = SWAP_32(imode);
1085 
1086 	/*
1087 	 * udf does not have a "." entry in dir's
1088 	 * so even directories have only one link
1089 	 */
1090 	fe->fe_lcount = SWAP_16(1);
1091 
1092 	fe->fe_info_len = 0;
1093 	fe->fe_lbr = 0;
1094 
1095 	gethrestime(&now);
1096 	time.tv_sec = now.tv_sec;
1097 	time.tv_nsec = now.tv_nsec;
1098 	if (vap->va_mask & AT_ATIME) {
1099 		TIMESPEC_TO_TIMESPEC32(&settime, &vap->va_atime)
1100 		ud_utime2dtime(&settime, &fe->fe_acc_time);
1101 	} else
1102 		ud_utime2dtime(&time, &fe->fe_acc_time);
1103 	if (vap->va_mask & AT_MTIME) {
1104 		TIMESPEC_TO_TIMESPEC32(&settime, &vap->va_mtime)
1105 		ud_utime2dtime(&settime, &fe->fe_mod_time);
1106 	} else
1107 		ud_utime2dtime(&time, &fe->fe_mod_time);
1108 	ud_utime2dtime(&time, &fe->fe_attr_time);
1109 
1110 	ud_update_regid(&fe->fe_impl_id);
1111 
1112 	mutex_enter(&udf_vfsp->udf_lock);
1113 	fe->fe_uniq_id = SWAP_64(udf_vfsp->udf_maxuniq);
1114 	udf_vfsp->udf_maxuniq++;
1115 	mutex_exit(&udf_vfsp->udf_lock);
1116 
1117 	ea_len = 0;
1118 	if ((vap->va_type == VBLK) ||
1119 		(vap->va_type == VCHR)) {
1120 		eah = (struct ext_attr_hdr *)fe->fe_spec;
1121 		ea_len = (sizeof (struct ext_attr_hdr) + 3) & ~3;
1122 		eah->eah_ial = SWAP_32(ea_len);
1123 
1124 		ds = (struct dev_spec_ear *)&fe->fe_spec[ea_len];
1125 		ea_len += ud_make_dev_spec_ear(ds,
1126 			getmajor(vap->va_rdev), getminor(vap->va_rdev));
1127 		ea_len = (ea_len + 3) & ~3;
1128 		eah->eah_aal = SWAP_32(ea_len);
1129 		ud_make_tag(udf_vfsp, &eah->eah_tag,
1130 			UD_EXT_ATTR_HDR, blkno, ea_len);
1131 	}
1132 
1133 	fe->fe_len_ear = SWAP_32(ea_len);
1134 	fe->fe_len_adesc = 0;
1135 
1136 	icb = &fe->fe_icb_tag;
1137 	icb->itag_prnde = 0;
1138 	icb->itag_strategy = SWAP_16(STRAT_TYPE4);
1139 	icb->itag_param = 0;
1140 	icb->itag_max_ent = SWAP_16(1);
1141 	switch (vap->va_type) {
1142 		case VREG :
1143 			icb->itag_ftype = FTYPE_FILE;
1144 			break;
1145 		case VDIR :
1146 			icb->itag_ftype = FTYPE_DIRECTORY;
1147 			break;
1148 		case VBLK :
1149 			icb->itag_ftype = FTYPE_BLOCK_DEV;
1150 			break;
1151 		case VCHR :
1152 			icb->itag_ftype = FTYPE_CHAR_DEV;
1153 			break;
1154 		case VLNK :
1155 			icb->itag_ftype = FTYPE_SYMLINK;
1156 			break;
1157 		case VFIFO :
1158 			icb->itag_ftype = FTYPE_FIFO;
1159 			break;
1160 		case VSOCK :
1161 			icb->itag_ftype = FTYPE_C_ISSOCK;
1162 			break;
1163 		default :
1164 			brelse(bp);
1165 			goto error;
1166 	}
1167 	icb->itag_lb_loc = 0;
1168 	icb->itag_lb_prn = 0;
1169 	flags = ICB_FLAG_ONE_AD;
1170 	if ((pip->i_char & ISGID) && (vap->va_type == VDIR)) {
1171 		ichar |= ISGID;
1172 	} else {
1173 		if ((ichar & ISGID) &&
1174 		    secpolicy_vnode_setids_setgids(cr,
1175 			    (gid_t)SWAP_32(fe->fe_gid)) != 0) {
1176 			ichar &= ~ISGID;
1177 		}
1178 	}
1179 	if (ichar & ISUID) {
1180 		flags |= ICB_FLAG_SETUID;
1181 	}
1182 	if (ichar & ISGID) {
1183 		flags |= ICB_FLAG_SETGID;
1184 	}
1185 	if (ichar & ISVTX) {
1186 		flags |= ICB_FLAG_STICKY;
1187 	}
1188 	icb->itag_flags = SWAP_16(flags);
1189 	ud_make_tag(udf_vfsp, &fe->fe_tag, UD_FILE_ENTRY, blkno,
1190 		((uint32_t)&((struct file_entry *)0)->fe_spec) +
1191 			SWAP_32(fe->fe_len_ear) + SWAP_32(fe->fe_len_adesc));
1192 
1193 	BWRITE2(bp);
1194 
1195 	mutex_enter(&udf_vfsp->udf_lock);
1196 	if (vap->va_type == VDIR) {
1197 		udf_vfsp->udf_ndirs++;
1198 	} else {
1199 		udf_vfsp->udf_nfiles++;
1200 	}
1201 	mutex_exit(&udf_vfsp->udf_lock);
1202 
1203 #ifdef	DEBUG
1204 	{
1205 		struct ud_inode *ip;
1206 
1207 		if ((ip = ud_search_icache(pip->i_vfs, prn, blkno)) != NULL) {
1208 			cmn_err(CE_NOTE, "duplicate %p %x\n",
1209 				(void *)ip, (uint32_t)ip->i_icb_lbano);
1210 		}
1211 	}
1212 #endif
1213 
1214 	if ((err = ud_iget(pip->i_vfs, prn, blkno, ipp, bp, cr)) != 0) {
1215 error:
1216 		ud_free_space(pip->i_vfs, prn, blkno, size);
1217 		return (err);
1218 	}
1219 
1220 	return (0);
1221 
1222 noinodes:
1223 	cmn_err(CE_NOTE, "%s: out of inodes\n", pip->i_udf->udf_volid);
1224 	return (ENOSPC);
1225 }
1226 
1227 
1228 void
1229 ud_ifree(struct ud_inode *ip, vtype_t type)
1230 {
1231 	struct udf_vfs *udf_vfsp;
1232 	struct buf *bp;
1233 
1234 	ud_printf("ud_ifree\n");
1235 
1236 	if (ip->i_vfs == NULL) {
1237 		return;
1238 	}
1239 
1240 	udf_vfsp = (struct udf_vfs *)ip->i_vfs->vfs_data;
1241 	bp = ud_bread(ip->i_dev, ip->i_icb_lbano <<
1242 			udf_vfsp->udf_l2d_shift,
1243 			udf_vfsp->udf_lbsize);
1244 	if (bp->b_flags & B_ERROR) {
1245 		/*
1246 		 * Error get rid of bp
1247 		 */
1248 		brelse(bp);
1249 	} else {
1250 		/*
1251 		 * Just trash the inode
1252 		 */
1253 		bzero(bp->b_un.b_addr, 0x10);
1254 		BWRITE(bp);
1255 	}
1256 	ud_free_space(ip->i_vfs, ip->i_icb_prn,
1257 		ip->i_icb_block, 1);
1258 	mutex_enter(&udf_vfsp->udf_lock);
1259 	if (type == VDIR) {
1260 		if (udf_vfsp->udf_ndirs > 1) {
1261 			udf_vfsp->udf_ndirs--;
1262 		}
1263 	} else {
1264 		if (udf_vfsp->udf_nfiles > 0) {
1265 			udf_vfsp->udf_nfiles --;
1266 		}
1267 	}
1268 	mutex_exit(&udf_vfsp->udf_lock);
1269 }
1270 
1271 
1272 /*
1273  * Free storage space associated with the specified inode.  The portion
1274  * to be freed is specified by lp->l_start and lp->l_len (already
1275  * normalized to a "whence" of 0).
1276  *
1277  * This is an experimental facility whose continued existence is not
1278  * guaranteed.  Currently, we only support the special case
1279  * of l_len == 0, meaning free to end of file.
1280  *
1281  * Blocks are freed in reverse order.  This FILO algorithm will tend to
1282  * maintain a contiguous free list much longer than FIFO.
1283  * See also ufs_itrunc() in ufs_inode.c.
1284  *
1285  * Bug: unused bytes in the last retained block are not cleared.
1286  * This may result in a "hole" in the file that does not read as zeroes.
1287  */
1288 int32_t
1289 ud_freesp(struct vnode *vp,
1290 	struct flock64 *lp,
1291 	int32_t flag, struct cred *cr)
1292 {
1293 	int32_t i;
1294 	struct ud_inode *ip = VTOI(vp);
1295 	int32_t error;
1296 
1297 	ASSERT(vp->v_type == VREG);
1298 	ASSERT(lp->l_start >= (offset_t)0);	/* checked by convoff */
1299 
1300 	ud_printf("udf_freesp\n");
1301 
1302 	if (lp->l_len != 0) {
1303 		return (EINVAL);
1304 	}
1305 
1306 	rw_enter(&ip->i_contents, RW_READER);
1307 	if (ip->i_size == (u_offset_t)lp->l_start) {
1308 		rw_exit(&ip->i_contents);
1309 		return (0);
1310 	}
1311 
1312 	/*
1313 	 * Check if there is any active mandatory lock on the
1314 	 * range that will be truncated/expanded.
1315 	 */
1316 	if (MANDLOCK(vp, ip->i_char)) {
1317 		offset_t save_start;
1318 
1319 		save_start = lp->l_start;
1320 
1321 		if (ip->i_size < lp->l_start) {
1322 			/*
1323 			 * "Truncate up" case: need to make sure there
1324 			 * is no lock beyond current end-of-file. To
1325 			 * do so, we need to set l_start to the size
1326 			 * of the file temporarily.
1327 			 */
1328 			lp->l_start = ip->i_size;
1329 		}
1330 		lp->l_type = F_WRLCK;
1331 		lp->l_sysid = 0;
1332 		lp->l_pid = ttoproc(curthread)->p_pid;
1333 		i = (flag & (FNDELAY|FNONBLOCK)) ? 0 : SLPFLCK;
1334 		rw_exit(&ip->i_contents);
1335 		if ((i = reclock(vp, lp, i, 0, lp->l_start, NULL)) != 0 ||
1336 		    lp->l_type != F_UNLCK) {
1337 			return (i ? i : EAGAIN);
1338 		}
1339 		rw_enter(&ip->i_contents, RW_READER);
1340 
1341 		lp->l_start = save_start;
1342 	}
1343 	/*
1344 	 * Make sure a write isn't in progress (allocating blocks)
1345 	 * by acquiring i_rwlock (we promised ufs_bmap we wouldn't
1346 	 * truncate while it was allocating blocks).
1347 	 * Grab the locks in the right order.
1348 	 */
1349 	rw_exit(&ip->i_contents);
1350 	rw_enter(&ip->i_rwlock, RW_WRITER);
1351 	rw_enter(&ip->i_contents, RW_WRITER);
1352 	error = ud_itrunc(ip, lp->l_start, 0, cr);
1353 	rw_exit(&ip->i_contents);
1354 	rw_exit(&ip->i_rwlock);
1355 	return (error);
1356 }
1357 
1358 
1359 
1360 /*
1361  * Cache is implemented by
1362  * allocating a cluster of blocks
1363  */
1364 int32_t
1365 ud_alloc_from_cache(struct udf_vfs *udf_vfsp,
1366 	struct ud_part *part, uint32_t *blkno)
1367 {
1368 	uint32_t bno, sz;
1369 	int32_t error, index, free = 0;
1370 
1371 	ud_printf("ud_alloc_from_cache\n");
1372 
1373 	ASSERT(udf_vfsp);
1374 
1375 	mutex_enter(&udf_vfsp->udf_lock);
1376 	if (part->udp_cache_count == 0) {
1377 		mutex_exit(&udf_vfsp->udf_lock);
1378 		/* allocate new cluster */
1379 		if ((error = ud_alloc_space(udf_vfsp->udf_vfs,
1380 				part->udp_number, 0, CLSTR_SIZE,
1381 				&bno, &sz, 1, 0)) != 0) {
1382 			return (error);
1383 		}
1384 		if (sz == 0) {
1385 			return (ENOSPC);
1386 		}
1387 		mutex_enter(&udf_vfsp->udf_lock);
1388 		if (part->udp_cache_count == 0) {
1389 			for (index = 0; index < sz; index++, bno++) {
1390 				part->udp_cache[index] = bno;
1391 			}
1392 			part->udp_cache_count = sz;
1393 		} else {
1394 			free = 1;
1395 		}
1396 	}
1397 	part->udp_cache_count--;
1398 	*blkno = part->udp_cache[part->udp_cache_count];
1399 	mutex_exit(&udf_vfsp->udf_lock);
1400 	if (free) {
1401 		ud_free_space(udf_vfsp->udf_vfs, part->udp_number, bno, sz);
1402 	}
1403 	return (0);
1404 }
1405 
1406 /*
1407  * Will be called from unmount
1408  */
1409 int32_t
1410 ud_release_cache(struct udf_vfs *udf_vfsp)
1411 {
1412 	int32_t i, error = 0;
1413 	struct ud_part *part;
1414 	uint32_t start, nblks;
1415 
1416 	ud_printf("ud_release_cache\n");
1417 
1418 	mutex_enter(&udf_vfsp->udf_lock);
1419 	part = udf_vfsp->udf_parts;
1420 	for (i = 0; i < udf_vfsp->udf_npart; i++, part++) {
1421 		if (part->udp_cache_count) {
1422 			nblks = part->udp_cache_count;
1423 			start = part->udp_cache[0];
1424 			part->udp_cache_count = 0;
1425 			mutex_exit(&udf_vfsp->udf_lock);
1426 			ud_free_space(udf_vfsp->udf_vfs,
1427 				part->udp_number, start, nblks);
1428 			mutex_enter(&udf_vfsp->udf_lock);
1429 		}
1430 	}
1431 	mutex_exit(&udf_vfsp->udf_lock);
1432 	return (error);
1433 }
1434