xref: /titanic_52/usr/src/uts/common/fs/udfs/udf_bmap.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/t_lock.h>
31 #include <sys/param.h>
32 #include <sys/time.h>
33 #include <sys/systm.h>
34 #include <sys/sysmacros.h>
35 #include <sys/resource.h>
36 #include <sys/signal.h>
37 #include <sys/cred.h>
38 #include <sys/user.h>
39 #include <sys/buf.h>
40 #include <sys/vfs.h>
41 #include <sys/stat.h>
42 #include <sys/vnode.h>
43 #include <sys/mode.h>
44 #include <sys/proc.h>
45 #include <sys/disp.h>
46 #include <sys/file.h>
47 #include <sys/fcntl.h>
48 #include <sys/flock.h>
49 #include <sys/kmem.h>
50 #include <sys/uio.h>
51 #include <sys/dnlc.h>
52 #include <sys/conf.h>
53 #include <sys/errno.h>
54 #include <sys/mman.h>
55 #include <sys/fbuf.h>
56 #include <sys/pathname.h>
57 #include <sys/debug.h>
58 #include <sys/vmsystm.h>
59 #include <sys/cmn_err.h>
60 #include <sys/dirent.h>
61 #include <sys/errno.h>
62 #include <sys/modctl.h>
63 #include <sys/statvfs.h>
64 #include <sys/mount.h>
65 #include <sys/sunddi.h>
66 #include <sys/bootconf.h>
67 
68 #include <vm/hat.h>
69 #include <vm/page.h>
70 #include <vm/pvn.h>
71 #include <vm/as.h>
72 #include <vm/seg.h>
73 #include <vm/seg_map.h>
74 #include <vm/seg_kmem.h>
75 #include <vm/seg_vn.h>
76 #include <vm/rm.h>
77 #include <vm/page.h>
78 #include <sys/swap.h>
79 
80 
81 #include <fs/fs_subr.h>
82 
83 
84 #include <sys/fs/udf_volume.h>
85 #include <sys/fs/udf_inode.h>
86 
87 
88 int32_t ud_break_create_new_icb(struct ud_inode *, int32_t, uint32_t);
89 int32_t ud_bump_ext_count(struct ud_inode *, int32_t);
90 void ud_remove_ext_at_index(struct ud_inode *, int32_t);
91 int32_t ud_last_alloc_ext(struct ud_inode *, uint64_t, uint32_t, int32_t);
92 int32_t ud_create_ext(struct ud_inode *, int32_t, uint32_t,
93 	int32_t, uint64_t, uint64_t *);
94 int32_t	ud_zero_it(struct ud_inode *, uint32_t, uint32_t);
95 
96 #define	ALLOC_SPACE	0x01
97 #define	NEW_EXT		0x02
98 
99 #define	MEXT_BITS	30
100 
101 int32_t
102 ud_bmap_has_holes(struct ud_inode *ip)
103 {
104 	int32_t i, error = 0;
105 	struct icb_ext *iext;
106 
107 	ud_printf("ud_bmap_has_holes\n");
108 
109 	ASSERT(RW_LOCK_HELD(&ip->i_contents));
110 
111 	/* ICB_FLAG_ONE_AD is always continuos */
112 	if (ip->i_desc_type != ICB_FLAG_ONE_AD) {
113 		if ((error = ud_read_icb_till_off(ip, ip->i_size)) == 0) {
114 			for (i = 0; i < ip->i_ext_used; i++) {
115 				iext = &ip->i_ext[i];
116 				if (iext->ib_flags == IB_UN_RE_AL) {
117 					error = 1;
118 					break;
119 				}
120 			}
121 		}
122 	}
123 
124 	return (error);
125 }
126 
127 int32_t
128 ud_bmap_read(struct ud_inode *ip, u_offset_t off, daddr_t *bnp, int32_t *lenp)
129 {
130 	struct icb_ext *iext;
131 	daddr_t bno;
132 	int32_t lbmask, i, l2b, l2d, error = 0, count;
133 	uint32_t length, block, dummy;
134 
135 	ud_printf("ud_bmap_read\n");
136 
137 	ASSERT(RW_LOCK_HELD(&ip->i_contents));
138 
139 	lbmask = ip->i_udf->udf_lbmask;
140 	l2b = ip->i_udf->udf_l2b_shift;
141 	l2d = ip->i_udf->udf_l2d_shift;
142 
143 	if ((error = ud_read_icb_till_off(ip, ip->i_size)) == 0) {
144 		for (i = 0; i < ip->i_ext_used; i++) {
145 			iext = &ip->i_ext[i];
146 			if ((iext->ib_offset <= off) &&
147 				(off < (iext->ib_offset + iext->ib_count))) {
148 				length = ((iext->ib_offset +
149 						iext->ib_count - off) +
150 						lbmask) & ~lbmask;
151 				if (iext->ib_flags == IB_UN_RE_AL) {
152 					*bnp = UDF_HOLE;
153 					*lenp = length;
154 					break;
155 				}
156 
157 				block = iext->ib_block +
158 					((off - iext->ib_offset) >> l2b);
159 				count = length >> l2b;
160 
161 				bno = ud_xlate_to_daddr(ip->i_udf,
162 					iext->ib_prn, block, count, &dummy);
163 				ASSERT(dummy != 0);
164 				ASSERT(dummy <= count);
165 				*bnp = bno << l2d;
166 				*lenp = dummy << l2b;
167 
168 				break;
169 			}
170 		}
171 		if (i == ip->i_ext_used) {
172 			error = EINVAL;
173 		}
174 	}
175 
176 	return (error);
177 }
178 
179 
180 /*
181  * Extent allocation in the inode
182  * Initially when the inode is allocated we
183  * will allocate EXT_PER_MALLOC extents and once these
184  * are used we allocate another 10 and copy
185  * the old extents and start using the others
186  */
187 #define	BASE(count)	((count) & ~lbmask)
188 #define	CEIL(count)	(((count) + lbmask) & ~lbmask)
189 
190 #define	PBASE(count)	((count) & PAGEMASK)
191 #define	PCEIL(count)	(((count) + PAGEOFFSET) & PAGEMASK)
192 
193 
194 /* ARGSUSED3 */
195 int32_t
196 ud_bmap_write(struct ud_inode *ip,
197 	u_offset_t off, int32_t size, int32_t alloc_only, struct cred *cr)
198 {
199 	int32_t error = 0, i, isdir, issync;
200 	struct udf_vfs *udf_vfsp;
201 	struct icb_ext *iext, *pext;
202 	uint32_t blkno, sz;
203 	u_offset_t isize;
204 	uint32_t acount, prox;
205 	int32_t blkcount, next;
206 	int32_t lbmask, l2b;
207 	uint64_t end_req, end_ext, mext_sz, icb_offset, count;
208 	int32_t dtype_changed = 0, memory_allocated = 0;
209 	struct	fbuf *fbp = NULL;
210 
211 
212 	ud_printf("ud_bmap_write\n");
213 
214 	ASSERT(RW_WRITE_HELD(&ip->i_contents));
215 
216 	udf_vfsp = ip->i_udf;
217 	lbmask = udf_vfsp->udf_lbmask;
218 	l2b = udf_vfsp->udf_l2b_shift;
219 	mext_sz = (1 << MEXT_BITS) - PAGESIZE;
220 
221 	if (lblkno(udf_vfsp, off) < 0) {
222 		return (EFBIG);
223 	}
224 
225 	issync = ((ip->i_flag & ISYNC) != 0);
226 
227 	isdir = (ip->i_type == VDIR);
228 	if (isdir || issync) {
229 		alloc_only = 0;		/* make sure */
230 	}
231 
232 	end_req = BASE(off) + size;
233 	if (ip->i_desc_type == ICB_FLAG_ONE_AD) {
234 		if (end_req < ip->i_max_emb) {
235 			goto out;
236 		}
237 
238 		if (ip->i_size != 0) {
239 			error = fbread(ITOV(ip), 0, ip->i_size, S_OTHER, &fbp);
240 			if (error != 0) {
241 				goto out;
242 			}
243 		} else {
244 			fbp = NULL;
245 		}
246 		/*
247 		 * Change the desc_type
248 		 */
249 		ip->i_desc_type = ICB_FLAG_SHORT_AD;
250 		dtype_changed = 1;
251 
252 one_ad_no_i_ext:
253 		ASSERT(ip->i_ext == NULL);
254 		ASSERT(ip->i_astrat == STRAT_TYPE4);
255 
256 		ip->i_ext_used = 0;
257 		ip->i_cur_max_ext = ip->i_max_emb / sizeof (struct short_ad);
258 		ip->i_cur_max_ext --;
259 		if (end_req > mext_sz) {
260 			next = end_req / mext_sz;
261 		} else {
262 			next = 1;
263 		}
264 		ip->i_ext_count =
265 			((next / EXT_PER_MALLOC) + 1) * EXT_PER_MALLOC;
266 		iext = ip->i_ext = (struct icb_ext  *)kmem_zalloc(
267 			ip->i_ext_count * sizeof (struct icb_ext), KM_SLEEP);
268 		memory_allocated = 1;
269 
270 		/* There will be atleast EXT_PER_MALLOC icb_ext's allocated */
271 
272 one_ad_i_ext:
273 		icb_offset = 0;
274 		count = end_req;
275 
276 		/* Can we create a HOLE */
277 
278 		if ((PCEIL(ip->i_size) < PBASE(off)) &&
279 			((PBASE(off) - PCEIL(ip->i_size)) >= PAGESIZE)) {
280 
281 			if (ip->i_size != 0) {
282 
283 				/*
284 				 * Allocate one block for
285 				 * old data.(cannot be more than one page)
286 				 */
287 
288 				count = PAGESIZE;
289 				if (error = ud_create_ext(ip, ip->i_ext_used,
290 					ALLOC_SPACE | NEW_EXT, alloc_only,
291 					icb_offset, &count)) {
292 					goto embedded_error;
293 				}
294 				icb_offset = PAGESIZE;
295 			}
296 
297 			/*
298 			 * Allocate a hole from PCEIL(ip->i_size) to PBASE(off)
299 			 */
300 
301 			count = PBASE(off) - PCEIL(ip->i_size);
302 			(void) ud_create_ext(ip, ip->i_ext_used, NEW_EXT,
303 					alloc_only, icb_offset, &count);
304 			icb_offset = PBASE(off);
305 
306 			/*
307 			 * Allocate the rest of the space PBASE(off) to end_req
308 			 */
309 			count = end_req - PBASE(off);
310 		} else {
311 			/*
312 			 * If no hole can be created then allocate
313 			 * space till the end of the request
314 			 */
315 			count = end_req;
316 		}
317 
318 
319 
320 		if (error = ud_create_ext(ip, ip->i_ext_used,
321 				ALLOC_SPACE | NEW_EXT,
322 				alloc_only, icb_offset, &count)) {
323 embedded_error:
324 			/*
325 			 * Something error
326 			 * most probable file system is full
327 			 * we know that the file came in as a embedded file.
328 			 * undo what ever we did in this block of code
329 			 */
330 			if (dtype_changed) {
331 				ip->i_desc_type = ICB_FLAG_ONE_AD;
332 			}
333 			for (i = 0; i < ip->i_ext_used; i++) {
334 				iext = &ip->i_ext[i];
335 				if (iext->ib_flags != IB_UN_RE_AL) {
336 					ud_free_space(ip->i_udf->udf_vfs,
337 						iext->ib_prn, iext->ib_block,
338 						(iext->ib_count + lbmask) >>
339 							l2b);
340 				}
341 			}
342 			if (memory_allocated) {
343 				kmem_free(ip->i_ext,
344 					ip->i_ext_count *
345 					sizeof (struct icb_ext));
346 				ip->i_ext = NULL;
347 				ip->i_ext_count = ip->i_ext_used = 0;
348 			}
349 		}
350 
351 		if (fbp != NULL) {
352 			fbrelse(fbp, S_WRITE);
353 		}
354 
355 		return (error);
356 	} else {
357 
358 		/*
359 		 * Type 4 directories being created
360 		 */
361 		if (ip->i_ext == NULL) {
362 			goto one_ad_no_i_ext;
363 		}
364 
365 		/*
366 		 * Read the entire icb's to memory
367 		 */
368 		if (ud_read_icb_till_off(ip, ip->i_size) != 0) {
369 			error = EINVAL;
370 			goto out;
371 		}
372 
373 		isize = CEIL(ip->i_size);
374 
375 		if (end_req > isize) {
376 
377 			/*
378 			 * The new file size is greater
379 			 * than the old size
380 			 */
381 
382 			if (ip->i_ext == NULL) {
383 				goto one_ad_no_i_ext;
384 			} else if (ip->i_ext_used == 0) {
385 				goto one_ad_i_ext;
386 			}
387 
388 			error = ud_last_alloc_ext(ip, off, size, alloc_only);
389 
390 			return (error);
391 		} else {
392 
393 			/*
394 			 * File growing the new size will be less than
395 			 * iext->ib_offset + CEIL(iext->ib_count)
396 			 */
397 
398 			iext = &ip->i_ext[ip->i_ext_used - 1];
399 
400 			if (end_req > (iext->ib_offset + iext->ib_count)) {
401 
402 				iext->ib_count = end_req - iext->ib_offset;
403 
404 				if (iext->ib_flags != IB_UN_RE_AL) {
405 					error = 0;
406 					goto out;
407 				}
408 			}
409 		}
410 	}
411 
412 	/* By this point the end of last extent is >= BASE(off) + size */
413 
414 	ASSERT(ip->i_ext);
415 
416 	/*
417 	 * Figure out the icb_ext that has offset "off"
418 	 */
419 	for (i = 0; i < ip->i_ext_used; i++) {
420 		iext = &ip->i_ext[i];
421 		if ((iext->ib_offset <= off) &&
422 			((iext->ib_offset + iext->ib_count) > off)) {
423 			break;
424 		}
425 	}
426 
427 	/*
428 	 * iext will have offset "off"
429 	 */
430 
431 
432 	do {
433 		iext = &ip->i_ext[i];
434 
435 		if ((iext->ib_flags & IB_UN_RE_AL) == 0) {
436 
437 			/*
438 			 * Already allocated do nothing
439 			 */
440 
441 			i++;
442 		} else {
443 
444 			/*
445 			 * We are in a hole.
446 			 * allocate the required space
447 			 * while trying to create smaller holes
448 			 */
449 
450 			if ((PBASE(off) > PBASE(iext->ib_offset)) &&
451 				((PBASE(off) - PBASE(iext->ib_offset)) >=
452 						PAGESIZE)) {
453 
454 				/*
455 				 * Allocate space from begining of
456 				 * old hole to the begining of new hole
457 				 * We want all holes created by us
458 				 * to be MMUPAGE Aligned
459 				 */
460 
461 				if (PBASE(iext->ib_offset) !=
462 						BASE(iext->ib_offset)) {
463 					if ((error = ud_break_create_new_icb(
464 						ip, i, BASE(iext->ib_offset) -
465 						PBASE(iext->ib_offset))) != 0) {
466 						return (error);
467 					}
468 					goto alloc_cur_ext;
469 				}
470 
471 				/*
472 				 * Create the new hole
473 				 */
474 
475 				if ((error = ud_break_create_new_icb(ip, i,
476 					PBASE(off) - iext->ib_offset)) != 0) {
477 					return (error);
478 				}
479 				iext = &ip->i_ext[i];
480 				i++;
481 				continue;
482 			}
483 
484 			end_ext = iext->ib_offset + iext->ib_count;
485 
486 			if ((PBASE(end_ext) > PCEIL(end_req)) &&
487 				((PBASE(end_ext) - PCEIL(end_req)) >=
488 							PAGESIZE)) {
489 				/*
490 				 * We can create a hole
491 				 * from PCEIL(end_req) - BASE(end_ext)
492 				 */
493 				if ((error = ud_break_create_new_icb(ip, i,
494 				PCEIL(end_req) - iext->ib_offset)) != 0) {
495 					return (error);
496 				}
497 			}
498 
499 
500 alloc_cur_ext:
501 			/*
502 			 * Allocate the current extent
503 			 */
504 
505 
506 			/*
507 			 * If the previous extent
508 			 * is allocated then try to allocate
509 			 * adjascent to the previous extent
510 			 */
511 			prox = 0;
512 			if (i != 0) {
513 				pext = &ip->i_ext[i - 1];
514 				if (pext->ib_flags != IB_UN_RE_AL) {
515 					prox = pext->ib_block +
516 						(CEIL(pext->ib_count) >> l2b);
517 				}
518 			}
519 
520 			iext = &ip->i_ext[i];
521 			blkcount = CEIL(iext->ib_count) >> l2b;
522 
523 			if ((error = ud_alloc_space(ip->i_vfs,
524 					ip->i_icb_prn, prox, blkcount,
525 					&blkno, &sz, 1, 0)) != 0) {
526 				return (error);
527 			}
528 			ip->i_lbr += sz;
529 			if (sz == 0) {
530 				return (ENOSPC);
531 			}
532 
533 			if (alloc_only == 0) {
534 				error = ud_zero_it(ip, blkno, sz);
535 			}
536 
537 			acount = sz << l2b;
538 			if ((prox == blkno) &&
539 				((pext->ib_count + acount) < mext_sz)) {
540 
541 				/*
542 				 * We are able to allocate adjascent to
543 				 * the previous extent. Increment the
544 				 * previous extent count if the size
545 				 * of the extent is not greater than
546 				 * max extent size
547 				 */
548 
549 				pext = &ip->i_ext[i - 1];
550 				pext->ib_count += acount;
551 
552 				if (sz == blkcount) {
553 					/*
554 					 * and get rid of the current
555 					 * extent since we have
556 					 * allocated all of its size
557 					 * and incremented the
558 					 * previous extents count
559 					 */
560 					ud_remove_ext_at_index(ip, i);
561 				} else {
562 					/*
563 					 * reduce the count of the
564 					 * current extent by the amount
565 					 * allocated in the last extent
566 					 */
567 					ASSERT(acount < iext->ib_count);
568 					iext->ib_count -= acount;
569 					iext->ib_offset += acount;
570 				}
571 			} else {
572 				if (sz < blkcount) {
573 					if ((error = ud_break_create_new_icb(
574 						ip, i, sz << l2b)) != 0) {
575 						return (error);
576 					}
577 				}
578 				iext = &ip->i_ext[i];
579 				count -= CEIL(iext->ib_count);
580 				iext->ib_prn = ip->i_icb_prn;
581 				iext->ib_block = blkno;
582 				iext->ib_flags &= ~IB_UN_RE_AL;
583 /*
584  *				iext->ib_flags |= IB_UN_REC;
585  */
586 				i++;
587 				continue;
588 			}
589 		}
590 	} while ((iext->ib_offset + iext->ib_count) < end_req);
591 
592 out:
593 	return (error);
594 }
595 
596 
597 /*
598  * increase i_con/i_ext arrays and set new elements
599  * using long or short allocation descriptors
600  */
601 static void
602 ud_common_ad(struct ud_inode *ip, struct buf *bp)
603 {
604 	int32_t ndesc, count, lbmask;
605 	uint32_t length;
606 	struct alloc_ext_desc *aed;
607 	struct icb_ext *iext, *con;
608 	u_offset_t offset;
609 	long_ad_t *lad;
610 	short_ad_t *sad;
611 	int islong;
612 	void *addr;
613 
614 	addr = bp->b_un.b_addr + sizeof (struct alloc_ext_desc);
615 	aed = (struct alloc_ext_desc *)bp->b_un.b_addr;
616 	length = SWAP_32(aed->aed_len_aed);
617 	if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
618 		islong = 1;
619 		lad = addr;
620 		ndesc = length / sizeof (*lad);
621 	} else if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
622 		islong = 0;
623 		sad = addr;
624 		ndesc = length / sizeof (*sad);
625 	} else
626 		return;
627 
628 	/*
629 	 * realloc i_ext array
630 	 */
631 	count = (((ip->i_ext_used + ndesc) / EXT_PER_MALLOC) + 1) *
632 	    EXT_PER_MALLOC;
633 	addr = kmem_zalloc(count * sizeof (struct icb_ext), KM_SLEEP);
634 	bcopy(ip->i_ext, addr, ip->i_ext_used * sizeof (struct icb_ext));
635 	kmem_free(ip->i_ext, ip->i_ext_count * sizeof (struct icb_ext));
636 	ip->i_ext = addr;
637 	ip->i_ext_count = count;
638 
639 	/*
640 	 * scan descriptors
641 	 */
642 	lbmask = ip->i_udf->udf_lbmask;
643 	iext = &ip->i_ext[ip->i_ext_used - 1];
644 	offset = iext->ib_offset + iext->ib_count;
645 	iext++;
646 	while (ndesc--) {
647 		if (islong)
648 			length = SWAP_32(lad->lad_ext_len);
649 		else
650 			length = SWAP_32(sad->sad_ext_len);
651 
652 		if ((length & 0x3FFFFFFF) == 0)
653 			break;
654 		else if (((length >> 30) & IB_MASK) == IB_CON) {
655 			if (ip->i_con_used == ip->i_con_count) {
656 				struct icb_ext *old;
657 				int32_t old_count;
658 
659 				old = ip->i_con;
660 				old_count = ip->i_con_count *
661 				    sizeof (struct icb_ext);
662 				ip->i_con_count += EXT_PER_MALLOC;
663 				ip->i_con = kmem_zalloc(ip->i_con_count *
664 				    sizeof (struct icb_ext), KM_SLEEP);
665 
666 				if (old) {
667 					bcopy(old, ip->i_con, old_count);
668 					kmem_free(old, old_count);
669 				}
670 			}
671 			con = &ip->i_con[ip->i_con_used];
672 			if (islong) {
673 				con->ib_prn = SWAP_16(lad->lad_ext_prn);
674 				con->ib_block = SWAP_32(lad->lad_ext_loc);
675 			} else {
676 				con->ib_prn = ip->i_icb_prn;
677 				con->ib_block = SWAP_32(sad->sad_ext_loc);
678 			}
679 			con->ib_count = length & 0x3FFFFFFF;
680 			con->ib_flags = (length >> 30) & IB_MASK;
681 			ip->i_con_used++;
682 			break;
683 		}
684 
685 		if (islong) {
686 			iext->ib_prn = SWAP_16(lad->lad_ext_prn);
687 			iext->ib_block = SWAP_32(lad->lad_ext_loc);
688 			lad++;
689 		} else {
690 			iext->ib_prn = 0;
691 			iext->ib_block = SWAP_32(sad->sad_ext_loc);
692 			sad++;
693 		}
694 		iext->ib_count = length & 0x3FFFFFFF;
695 		iext->ib_offset = offset;
696 		iext->ib_marker1 = (uint32_t)0xAAAAAAAA;
697 		iext->ib_marker2 = (uint32_t)0xBBBBBBBB;
698 		offset += (iext->ib_count + lbmask) & (~lbmask);
699 		iext->ib_flags = (length >> 30) & IB_MASK;
700 		ip->i_ext_used++;
701 		iext++;
702 	}
703 }
704 
705 
706 static int32_t
707 ud_read_next_cont(struct ud_inode *ip)
708 {
709 	uint32_t dummy, error = 0;
710 	struct alloc_ext_desc *aed;
711 	struct icb_ext *cont;
712 	struct buf *bp;
713 	daddr_t bno;
714 
715 	cont = &ip->i_con[ip->i_con_read];
716 	ASSERT(cont->ib_count > 0);
717 
718 	bno = ud_xlate_to_daddr(ip->i_udf, cont->ib_prn, cont->ib_block,
719 	    1, &dummy);
720 	bp = ud_bread(ip->i_dev, bno << ip->i_udf->udf_l2d_shift,
721 	    cont->ib_count);
722 	if (bp->b_flags & B_ERROR)
723 		error = bp->b_error;
724 	else {
725 		aed = (struct alloc_ext_desc *)bp->b_un.b_addr;
726 		if (ud_verify_tag_and_desc(&aed->aed_tag, UD_ALLOC_EXT_DESC,
727 		    cont->ib_block, 1, cont->ib_count))
728 			error = EINVAL;
729 	}
730 
731 	if (error == 0)
732 		ud_common_ad(ip, bp);
733 
734 	brelse(bp);
735 	return (error);
736 }
737 
738 
739 int32_t
740 ud_read_icb_till_off(struct ud_inode *ip, u_offset_t offset)
741 {
742 	int32_t error = 0;
743 	struct icb_ext *iext;
744 
745 	ud_printf("ud_read_icb_till_off\n");
746 
747 	if (ip->i_desc_type == ICB_FLAG_ONE_AD)
748 		return (0);
749 	else if ((ip->i_astrat != STRAT_TYPE4) &&
750 	    (ip->i_astrat != STRAT_TYPE4096))
751 		return (EINVAL);
752 	else if (ip->i_ext_used == 0)
753 		return ((ip->i_size == 0) ? 0 : EINVAL);
754 
755 	/*
756 	 * supported allocation strategies are
757 	 * STRAT_TYPE4 and STRAT_TYPE4096
758 	 */
759 
760 	mutex_enter(&ip->i_con_lock);
761 	iext = &ip->i_ext[ip->i_ext_used - 1];
762 	while ((iext->ib_offset + iext->ib_count) < offset) {
763 		if (ip->i_con_used == ip->i_con_read) {
764 			error = EINVAL;
765 			break;
766 		}
767 		if (error = ud_read_next_cont(ip))
768 			break;
769 		ip->i_con_read++;
770 		iext = &ip->i_ext[ip->i_ext_used - 1];
771 	}
772 	mutex_exit(&ip->i_con_lock);
773 
774 	return (error);
775 }
776 
777 
778 /*
779  * Assumption is the off is beyond ip->i_size
780  * And we will have atleast one ext used
781  */
782 int32_t
783 ud_last_alloc_ext(struct ud_inode *ip, uint64_t off,
784 		uint32_t size, int32_t alloc_only)
785 {
786 	struct icb_ext *iext;
787 	struct udf_vfs *udf_vfsp;
788 	int32_t lbsize, lbmask;
789 	uint64_t end_req, end_count, icb_offset;
790 	uint64_t count;
791 	int32_t error = 0;
792 
793 
794 	udf_vfsp = ip->i_udf;
795 	lbsize = udf_vfsp->udf_lbsize;
796 	lbmask = udf_vfsp->udf_lbmask;
797 
798 	end_req = BASE(off) + size;
799 
800 
801 	/*
802 	 * If we are here it means the file
803 	 * is growing beyond the end of the
804 	 * current block. So round up the
805 	 * last extent
806 	 */
807 
808 	iext = &ip->i_ext[ip->i_ext_used - 1];
809 	iext->ib_count = CEIL(iext->ib_count);
810 
811 	/*
812 	 * Figure out if we can create
813 	 * a hole here
814 	 */
815 
816 
817 	end_count = iext->ib_offset + iext->ib_count;
818 
819 	if ((PCEIL(end_count) < PBASE(off)) &&
820 		((PBASE(off) - PCEIL(end_count)) >= PAGESIZE)) {
821 
822 		count = PCEIL(end_count) - CEIL(end_count);
823 		if (count >= lbsize) {
824 
825 			/*
826 			 * There is space between the begining
827 			 * of the hole to be created and
828 			 * end of the last offset
829 			 * Allocate blocks for it
830 			 */
831 
832 			iext = &ip->i_ext[ip->i_ext_used - 1];
833 			icb_offset = iext->ib_offset + CEIL(iext->ib_count);
834 
835 			if (iext->ib_flags == IB_UN_RE_AL) {
836 
837 				/*
838 				 * Previous extent is a unallocated
839 				 * extent. Create a new allocated
840 				 * extent
841 				 */
842 
843 				error = ud_create_ext(ip, ip->i_ext_used,
844 					ALLOC_SPACE | NEW_EXT,
845 					alloc_only, icb_offset, &count);
846 
847 			} else {
848 
849 				/*
850 				 * Last extent is allocated
851 				 * try to allocate adjascent to the
852 				 * last extent
853 				 */
854 
855 				error = ud_create_ext(ip, ip->i_ext_used - 1,
856 						ALLOC_SPACE, alloc_only,
857 						icb_offset, &count);
858 			}
859 
860 			if (error != 0) {
861 				return (error);
862 			}
863 		}
864 
865 		iext = &ip->i_ext[ip->i_ext_used - 1];
866 		end_count = iext->ib_offset + iext->ib_count;
867 		count = PBASE(off) - PCEIL(end_count);
868 		icb_offset = PCEIL(end_count);
869 
870 		if (iext->ib_flags == IB_UN_RE_AL) {
871 
872 			/*
873 			 * The last extent is unallocated
874 			 * Just bump the extent count
875 			 */
876 			(void) ud_create_ext(ip, ip->i_ext_used - 1,
877 					0, alloc_only, icb_offset, &count);
878 		} else {
879 
880 			/*
881 			 * Last extent is allocated
882 			 * round up the size of the extent to
883 			 * lbsize and allocate a new unallocated extent
884 			 */
885 			iext->ib_count = CEIL(iext->ib_count);
886 			(void) ud_create_ext(ip, ip->i_ext_used,
887 				NEW_EXT, alloc_only, icb_offset, &count);
888 		}
889 
890 		icb_offset = PBASE(off);
891 	} else {
892 
893 		/*
894 		 * We cannot create any hole inbetween
895 		 * the last extent and the off so
896 		 * round up the count in the last extent
897 		 */
898 
899 		iext = &ip->i_ext[ip->i_ext_used - 1];
900 		iext->ib_count = CEIL(iext->ib_count);
901 
902 	}
903 
904 
905 	iext = &ip->i_ext[ip->i_ext_used - 1];
906 	count = end_req - (iext->ib_offset + iext->ib_count);
907 	icb_offset = iext->ib_offset + CEIL(iext->ib_count);
908 
909 	if (iext->ib_flags == IB_UN_RE_AL) {
910 
911 		/*
912 		 * Last extent was a unallocated extent
913 		 * create a new extent
914 		 */
915 
916 		error = ud_create_ext(ip, ip->i_ext_used,
917 			ALLOC_SPACE | NEW_EXT, alloc_only, icb_offset, &count);
918 	} else {
919 
920 		/*
921 		 * Last extent was an allocated extent
922 		 * try to allocate adjascent to the old blocks
923 		 */
924 
925 		error = ud_create_ext(ip, ip->i_ext_used - 1,
926 			ALLOC_SPACE, alloc_only, icb_offset, &count);
927 	}
928 
929 	return (error);
930 }
931 
932 /*
933  * Break up the icb_ext at index
934  * into two icb_ext,
935  * one at index ib_count "count" and
936  * the other at index+1 with ib_count = old_ib_count - count
937  */
938 int32_t
939 ud_break_create_new_icb(struct ud_inode *ip,
940 	int32_t index, uint32_t count)
941 {
942 	int32_t i, error;
943 	struct icb_ext *iext, *next;
944 
945 
946 	ud_printf("ud_break_create_new_icb\n");
947 	iext = &ip->i_ext[index];
948 
949 	ASSERT(count < iext->ib_count);
950 
951 	if ((error = ud_bump_ext_count(ip, KM_SLEEP)) != 0) {
952 		return (error);
953 	}
954 
955 	for (i = ip->i_ext_used; i > index; i--) {
956 		ip->i_ext[i] = ip->i_ext[i - 1];
957 	}
958 
959 	next = &ip->i_ext[index + 1];
960 	iext = &ip->i_ext[index];
961 
962 	iext->ib_count = count;
963 	next->ib_count -= count;
964 	next->ib_offset = iext->ib_offset + iext->ib_count;
965 	if (iext->ib_flags != IB_UN_RE_AL) {
966 		next->ib_block = iext->ib_block +
967 			iext->ib_count >> ip->i_udf->udf_l2b_shift;
968 	}
969 	ip->i_ext_used++;
970 	return (0);
971 }
972 
973 void
974 ud_remove_ext_at_index(struct ud_inode *ip, int32_t index)
975 {
976 	int32_t i;
977 
978 	ASSERT(index <= ip->i_ext_used);
979 
980 	for (i = index; i < ip->i_ext_used; i++) {
981 		if ((i + 1) < ip->i_ext_count) {
982 			ip->i_ext[i] = ip->i_ext[i + 1];
983 		} else {
984 			bzero(&ip->i_ext[i], sizeof (struct icb_ext));
985 		}
986 	}
987 	ip->i_ext_used --;
988 }
989 
990 int32_t
991 ud_bump_ext_count(struct ud_inode *ip, int32_t sleep_flag)
992 {
993 	int32_t error = 0;
994 	struct icb_ext *iext;
995 	uint32_t old_count, elen;
996 
997 	ASSERT(ip);
998 	ASSERT(sleep_flag == KM_SLEEP);
999 
1000 	ud_printf("ud_bump_ext_count\n");
1001 
1002 	if (ip->i_ext_used >= ip->i_ext_count) {
1003 
1004 		old_count = sizeof (struct icb_ext) * ip->i_ext_count;
1005 		ip->i_ext_count += EXT_PER_MALLOC;
1006 		iext = kmem_zalloc(sizeof (struct icb_ext) *
1007 				ip->i_ext_count, sleep_flag);
1008 		bcopy(ip->i_ext, iext, old_count);
1009 		kmem_free(ip->i_ext, old_count);
1010 		ip->i_ext = iext;
1011 	}
1012 
1013 	if (ip->i_ext_used >= ip->i_cur_max_ext) {
1014 		int32_t prox;
1015 		struct icb_ext *icon;
1016 		uint32_t blkno, sz;
1017 		int32_t lbmask, l2b;
1018 
1019 		lbmask = ip->i_udf->udf_lbmask;
1020 		l2b = ip->i_udf->udf_l2b_shift;
1021 
1022 		if ((error = ud_read_icb_till_off(ip, ip->i_size)) != 0) {
1023 			return (error);
1024 		}
1025 
1026 		/*
1027 		 * If there are any old cont extents
1028 		 * allocate the new one ajscant to the old one
1029 		 */
1030 		if (ip->i_con_used != 0) {
1031 			icon = &ip->i_con[ip->i_con_used - 1];
1032 			prox = icon->ib_block + (CEIL(icon->ib_count) >> l2b);
1033 		} else {
1034 			prox = 0;
1035 		}
1036 
1037 		/*
1038 		 * Allocate space
1039 		 */
1040 		if ((error = ud_alloc_space(ip->i_vfs, ip->i_icb_prn,
1041 				prox, 1, &blkno, &sz, 0, 0)) != 0) {
1042 			return (error);
1043 		}
1044 		if (sz == 0) {
1045 			return (ENOSPC);
1046 		}
1047 
1048 		sz <<= l2b;
1049 
1050 		if (ip->i_con_used == ip->i_con_count) {
1051 			struct icb_ext *old;
1052 			int32_t old_count;
1053 
1054 			old = ip->i_con;
1055 			old_count = ip->i_con_count *
1056 				sizeof (struct icb_ext);
1057 			ip->i_con_count += EXT_PER_MALLOC;
1058 			ip->i_con = kmem_zalloc(ip->i_con_count *
1059 				sizeof (struct icb_ext), KM_SLEEP);
1060 			if (old != 0) {
1061 				bcopy(old, ip->i_con, old_count);
1062 				kmem_free(old, old_count);
1063 			}
1064 		}
1065 		icon = &ip->i_con[ip->i_con_used++];
1066 		icon->ib_flags = IB_CON;
1067 		icon->ib_prn = ip->i_icb_prn;
1068 		icon->ib_block = blkno;
1069 		icon->ib_count = sz;
1070 		icon->ib_offset = 0;
1071 		icon->ib_marker1 = (uint32_t)0xAAAAAAAA;
1072 		icon->ib_marker2 = (uint32_t)0xBBBBBBBB;
1073 
1074 		/*
1075 		 * Bump the i_cur_max_ext according to
1076 		 * the space allocated
1077 		 */
1078 		if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1079 			elen = sizeof (struct short_ad);
1080 		} else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
1081 			elen = sizeof (struct long_ad);
1082 		} else {
1083 			return (ENOSPC);
1084 		}
1085 		sz = sz - (sizeof (struct alloc_ext_desc) + elen);
1086 		ip->i_cur_max_ext += sz / elen;
1087 	}
1088 	return (error);
1089 }
1090 
1091 int32_t
1092 ud_create_ext(struct ud_inode *ip, int32_t index, uint32_t flags,
1093 	int32_t alloc_only, uint64_t offset, uint64_t *count)
1094 {
1095 	struct icb_ext *iext, *pext;
1096 	struct udf_vfs *udf_vfsp;
1097 	int32_t error = 0, blkcount, acount;
1098 	uint32_t blkno, sz, prox, mext_sz;
1099 	int32_t lbmask, l2b;
1100 
1101 	if (*count == 0) {
1102 		return (0);
1103 	}
1104 
1105 begin:
1106 	udf_vfsp = ip->i_udf;
1107 	lbmask = udf_vfsp->udf_lbmask;
1108 	l2b = udf_vfsp->udf_l2b_shift;
1109 	mext_sz = (1 << MEXT_BITS) - PAGESIZE;
1110 
1111 	if ((error = ud_bump_ext_count(ip, KM_SLEEP)) != 0) {
1112 		return (error);
1113 	}
1114 
1115 	iext = &ip->i_ext[index];
1116 	if (flags & ALLOC_SPACE) {
1117 		if ((flags & NEW_EXT) ||
1118 			(ip->i_ext_count == 0)) {
1119 
1120 			iext->ib_flags = 0;
1121 			iext->ib_prn = ip->i_icb_prn;
1122 			if (*count > mext_sz) {
1123 				blkcount = mext_sz >> l2b;
1124 			} else {
1125 				blkcount = CEIL(*count) >> l2b;
1126 			}
1127 			if ((error = ud_alloc_space(ip->i_vfs,
1128 					ip->i_icb_prn, 0, blkcount,
1129 					&blkno, &sz, 1, 0)) != 0) {
1130 				return (error);
1131 			}
1132 			if (sz == 0) {
1133 				return (ENOSPC);
1134 			}
1135 			ip->i_lbr += sz;
1136 			iext->ib_block = blkno;
1137 			acount = sz << l2b;
1138 			if ((sz << l2b) > *count) {
1139 				iext->ib_count = *count;
1140 				*count = 0;
1141 			} else {
1142 				iext->ib_count = sz << l2b;
1143 				*count -= iext->ib_count;
1144 			}
1145 			iext->ib_offset = offset;
1146 			if (ip->i_ext_used <= index)
1147 				ip->i_ext_used ++;
1148 		} else {
1149 			if ((iext->ib_count + *count) > mext_sz) {
1150 				blkcount = (mext_sz - iext->ib_count) >> l2b;
1151 			} else {
1152 				blkcount = CEIL(*count) >> l2b;
1153 			}
1154 			if (blkcount == 0) {
1155 				flags |= NEW_EXT;
1156 				index++;
1157 				goto begin;
1158 			}
1159 			prox = iext->ib_block + (CEIL(iext->ib_count) >> l2b);
1160 			if ((error = ud_alloc_space(ip->i_vfs,
1161 					ip->i_icb_prn, prox, blkcount,
1162 					&blkno, &sz, 1, 0)) != 0) {
1163 				return (error);
1164 			}
1165 			if (sz == 0) {
1166 				return (ENOSPC);
1167 			}
1168 			acount = sz << l2b;
1169 			if (acount > *count) {
1170 				acount = *count;
1171 				*count = 0;
1172 			} else {
1173 				*count -= acount;
1174 			}
1175 			ip->i_lbr += sz;
1176 			if (prox == blkno) {
1177 				iext->ib_count += acount;
1178 			} else {
1179 				if ((error = ud_bump_ext_count(ip, KM_SLEEP))
1180 						!= 0) {
1181 					return (error);
1182 				}
1183 				pext = &ip->i_ext[index];
1184 				iext = &ip->i_ext[index + 1];
1185 				iext->ib_flags = 0;
1186 				iext->ib_prn = ip->i_icb_prn;
1187 				iext->ib_block = blkno;
1188 				iext->ib_offset =
1189 					pext->ib_offset + pext->ib_count;
1190 				iext->ib_count = acount;
1191 				/*
1192 				 * Increment the index, since we have used
1193 				 * the extent at [index+1] above.
1194 				 */
1195 				index++;
1196 				if (ip->i_ext_used <= index)
1197 					ip->i_ext_used ++;
1198 			}
1199 		}
1200 		if (alloc_only == 0) {
1201 			error = ud_zero_it(ip, blkno, sz);
1202 		}
1203 		if (*count) {
1204 			offset = iext->ib_offset + CEIL(iext->ib_count);
1205 			flags |= NEW_EXT;
1206 			index++;
1207 			goto begin;
1208 		}
1209 	} else {
1210 		if (flags & NEW_EXT) {
1211 			iext->ib_flags = IB_UN_RE_AL;
1212 			iext->ib_prn = 0;
1213 			iext->ib_block = 0;
1214 			if (*count > mext_sz) {
1215 				iext->ib_count = mext_sz;
1216 				*count -= iext->ib_count;
1217 			} else {
1218 				iext->ib_count = *count;
1219 				*count = 0;
1220 			}
1221 			iext->ib_offset = offset;
1222 			if (ip->i_ext_used <= index)
1223 				ip->i_ext_used ++;
1224 		} else {
1225 			ASSERT(iext->ib_flags == IB_UN_RE_AL);
1226 			if ((iext->ib_count + *count) > mext_sz) {
1227 				acount = mext_sz - iext->ib_count;
1228 				iext->ib_count += acount;
1229 				*count -= acount;
1230 			} else {
1231 				iext->ib_count += *count;
1232 				*count = 0;
1233 			}
1234 		}
1235 		if (*count != 0) {
1236 			offset = iext->ib_offset + CEIL(iext->ib_count);
1237 			flags |= NEW_EXT;
1238 			index++;
1239 			goto begin;
1240 		}
1241 	}
1242 	iext->ib_marker1 = (uint32_t)0xAAAAAAAA;
1243 	iext->ib_marker2 = (uint32_t)0xBBBBBBBB;
1244 	return (error);
1245 }
1246 
1247 #undef	CEIL
1248 #undef	BASE
1249 
1250 int32_t
1251 ud_zero_it(struct ud_inode *ip, uint32_t start_block, uint32_t block_count)
1252 {
1253 	struct udf_vfs *udf_vfsp;
1254 	uint32_t bno, dummy;
1255 	int32_t error;
1256 	struct buf *bp;
1257 
1258 	/*
1259 	 * Donot use bio routines
1260 	 * since the buffer can sit
1261 	 * long enough in cache for the space
1262 	 * to be allocated/freed and
1263 	 * then allocated
1264 	 */
1265 	udf_vfsp = ip->i_udf;
1266 	bno = ud_xlate_to_daddr(udf_vfsp,
1267 		ip->i_icb_prn, start_block, block_count, &dummy);
1268 
1269 	dummy = block_count << udf_vfsp->udf_l2b_shift;
1270 	bp = (struct buf *)kmem_zalloc(biosize(), KM_SLEEP);
1271 	sema_init(&bp->b_sem, 0, NULL, SEMA_DEFAULT, NULL);
1272 	sema_init(&bp->b_io, 0, NULL, SEMA_DEFAULT, NULL);
1273 
1274 	bp->b_flags = B_WRITE | B_BUSY;
1275 	bp->b_edev = ip->i_dev;
1276 	bp->b_dev = cmpdev(ip->i_dev);
1277 	bp->b_blkno = bno << udf_vfsp->udf_l2d_shift;
1278 	bp->b_bcount = dummy;
1279 	bp->b_un.b_addr = kmem_zalloc(bp->b_bcount, KM_SLEEP);
1280 	bp->b_file = ip->i_vnode;
1281 	bp->b_offset = -1;
1282 
1283 	(void) bdev_strategy(bp);
1284 	if (error = biowait(bp)) {
1285 		cmn_err(CE_WARN, "error in write\n");
1286 	}
1287 
1288 	kmem_free(bp->b_un.b_addr, dummy);
1289 	sema_destroy(&bp->b_io);
1290 	sema_destroy(&bp->b_sem);
1291 	kmem_free((caddr_t)bp, biosize());
1292 
1293 	return (error);
1294 }
1295