1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <sys/types.h>
28 #include <sys/t_lock.h>
29 #include <sys/param.h>
30 #include <sys/time.h>
31 #include <sys/systm.h>
32 #include <sys/sysmacros.h>
33 #include <sys/resource.h>
34 #include <sys/signal.h>
35 #include <sys/cred.h>
36 #include <sys/user.h>
37 #include <sys/buf.h>
38 #include <sys/vfs.h>
39 #include <sys/stat.h>
40 #include <sys/vnode.h>
41 #include <sys/mode.h>
42 #include <sys/proc.h>
43 #include <sys/disp.h>
44 #include <sys/file.h>
45 #include <sys/fcntl.h>
46 #include <sys/flock.h>
47 #include <sys/kmem.h>
48 #include <sys/uio.h>
49 #include <sys/dnlc.h>
50 #include <sys/conf.h>
51 #include <sys/errno.h>
52 #include <sys/mman.h>
53 #include <sys/fbuf.h>
54 #include <sys/pathname.h>
55 #include <sys/debug.h>
56 #include <sys/vmsystm.h>
57 #include <sys/cmn_err.h>
58 #include <sys/dirent.h>
59 #include <sys/errno.h>
60 #include <sys/modctl.h>
61 #include <sys/statvfs.h>
62 #include <sys/mount.h>
63 #include <sys/sunddi.h>
64 #include <sys/bootconf.h>
65
66 #include <vm/hat.h>
67 #include <vm/page.h>
68 #include <vm/pvn.h>
69 #include <vm/as.h>
70 #include <vm/seg.h>
71 #include <vm/seg_map.h>
72 #include <vm/seg_kmem.h>
73 #include <vm/seg_vn.h>
74 #include <vm/rm.h>
75 #include <vm/page.h>
76 #include <sys/swap.h>
77
78
79 #include <fs/fs_subr.h>
80
81
82 #include <sys/fs/udf_volume.h>
83 #include <sys/fs/udf_inode.h>
84
85
86 int32_t ud_break_create_new_icb(struct ud_inode *, int32_t, uint32_t);
87 int32_t ud_bump_ext_count(struct ud_inode *, int32_t);
88 void ud_remove_ext_at_index(struct ud_inode *, int32_t);
89 int32_t ud_last_alloc_ext(struct ud_inode *, uint64_t, uint32_t, int32_t);
90 int32_t ud_create_ext(struct ud_inode *, int32_t, uint32_t,
91 int32_t, uint64_t, uint64_t *);
92 int32_t ud_zero_it(struct ud_inode *, uint32_t, uint32_t);
93
94 #define ALLOC_SPACE 0x01
95 #define NEW_EXT 0x02
96
97 #define MEXT_BITS 30
98
99 int32_t
ud_bmap_has_holes(struct ud_inode * ip)100 ud_bmap_has_holes(struct ud_inode *ip)
101 {
102 int32_t i, error = 0;
103 struct icb_ext *iext;
104
105 ud_printf("ud_bmap_has_holes\n");
106
107 ASSERT(RW_LOCK_HELD(&ip->i_contents));
108
109 /* ICB_FLAG_ONE_AD is always continuos */
110 if (ip->i_desc_type != ICB_FLAG_ONE_AD) {
111 if ((error = ud_read_icb_till_off(ip, ip->i_size)) == 0) {
112 for (i = 0; i < ip->i_ext_used; i++) {
113 iext = &ip->i_ext[i];
114 if (iext->ib_flags == IB_UN_RE_AL) {
115 error = 1;
116 break;
117 }
118 }
119 }
120 }
121
122 return (error);
123 }
124
125 int32_t
ud_bmap_read(struct ud_inode * ip,u_offset_t off,daddr_t * bnp,int32_t * lenp)126 ud_bmap_read(struct ud_inode *ip, u_offset_t off, daddr_t *bnp, int32_t *lenp)
127 {
128 struct icb_ext *iext;
129 daddr_t bno;
130 int32_t lbmask, i, l2b, l2d, error = 0, count;
131 uint32_t length, block, dummy;
132
133 ud_printf("ud_bmap_read\n");
134
135 ASSERT(RW_LOCK_HELD(&ip->i_contents));
136
137 lbmask = ip->i_udf->udf_lbmask;
138 l2b = ip->i_udf->udf_l2b_shift;
139 l2d = ip->i_udf->udf_l2d_shift;
140
141 if ((error = ud_read_icb_till_off(ip, ip->i_size)) == 0) {
142 for (i = 0; i < ip->i_ext_used; i++) {
143 iext = &ip->i_ext[i];
144 if ((iext->ib_offset <= off) &&
145 (off < (iext->ib_offset + iext->ib_count))) {
146 length = ((iext->ib_offset +
147 iext->ib_count - off) +
148 lbmask) & ~lbmask;
149 if (iext->ib_flags == IB_UN_RE_AL) {
150 *bnp = UDF_HOLE;
151 *lenp = length;
152 break;
153 }
154
155 block = iext->ib_block +
156 ((off - iext->ib_offset) >> l2b);
157 count = length >> l2b;
158
159 bno = ud_xlate_to_daddr(ip->i_udf,
160 iext->ib_prn, block, count, &dummy);
161 ASSERT(dummy != 0);
162 ASSERT(dummy <= count);
163 *bnp = bno << l2d;
164 *lenp = dummy << l2b;
165
166 break;
167 }
168 }
169 if (i == ip->i_ext_used) {
170 error = EINVAL;
171 }
172 }
173
174 return (error);
175 }
176
177
178 /*
179 * Extent allocation in the inode
180 * Initially when the inode is allocated we
181 * will allocate EXT_PER_MALLOC extents and once these
182 * are used we allocate another 10 and copy
183 * the old extents and start using the others
184 */
185 #define BASE(count) ((count) & ~lbmask)
186 #define CEIL(count) (((count) + lbmask) & ~lbmask)
187
188 #define PBASE(count) ((count) & PAGEMASK)
189 #define PCEIL(count) (((count) + PAGEOFFSET) & PAGEMASK)
190
191
192 /* ARGSUSED3 */
193 int32_t
ud_bmap_write(struct ud_inode * ip,u_offset_t off,int32_t size,int32_t alloc_only,struct cred * cr)194 ud_bmap_write(struct ud_inode *ip,
195 u_offset_t off, int32_t size, int32_t alloc_only, struct cred *cr)
196 {
197 int32_t error = 0, i, isdir, issync;
198 struct udf_vfs *udf_vfsp;
199 struct icb_ext *iext, *pext;
200 uint32_t blkno, sz;
201 u_offset_t isize;
202 uint32_t acount, prox;
203 int32_t blkcount, next;
204 int32_t lbmask, l2b;
205 uint64_t end_req, end_ext, mext_sz, icb_offset, count;
206 int32_t dtype_changed = 0, memory_allocated = 0;
207 struct fbuf *fbp = NULL;
208
209
210 ud_printf("ud_bmap_write\n");
211
212 ASSERT(RW_WRITE_HELD(&ip->i_contents));
213
214 udf_vfsp = ip->i_udf;
215 lbmask = udf_vfsp->udf_lbmask;
216 l2b = udf_vfsp->udf_l2b_shift;
217 mext_sz = (1 << MEXT_BITS) - PAGESIZE;
218
219 if (lblkno(udf_vfsp, off) < 0) {
220 return (EFBIG);
221 }
222
223 issync = ((ip->i_flag & ISYNC) != 0);
224
225 isdir = (ip->i_type == VDIR);
226 if (isdir || issync) {
227 alloc_only = 0; /* make sure */
228 }
229
230 end_req = BASE(off) + size;
231 if (ip->i_desc_type == ICB_FLAG_ONE_AD) {
232 if (end_req < ip->i_max_emb) {
233 goto out;
234 }
235
236 if (ip->i_size != 0) {
237 error = fbread(ITOV(ip), 0, ip->i_size, S_OTHER, &fbp);
238 if (error != 0) {
239 goto out;
240 }
241 } else {
242 fbp = NULL;
243 }
244 /*
245 * Change the desc_type
246 */
247 ip->i_desc_type = ICB_FLAG_SHORT_AD;
248 dtype_changed = 1;
249
250 one_ad_no_i_ext:
251 ASSERT(ip->i_ext == NULL);
252 ASSERT(ip->i_astrat == STRAT_TYPE4);
253
254 ip->i_ext_used = 0;
255 ip->i_cur_max_ext = ip->i_max_emb / sizeof (struct short_ad);
256 ip->i_cur_max_ext --;
257 if (end_req > mext_sz) {
258 next = end_req / mext_sz;
259 } else {
260 next = 1;
261 }
262 ip->i_ext_count =
263 ((next / EXT_PER_MALLOC) + 1) * EXT_PER_MALLOC;
264 iext = ip->i_ext = (struct icb_ext *)kmem_zalloc(
265 ip->i_ext_count * sizeof (struct icb_ext), KM_SLEEP);
266 memory_allocated = 1;
267
268 /* There will be atleast EXT_PER_MALLOC icb_ext's allocated */
269
270 one_ad_i_ext:
271 icb_offset = 0;
272 count = end_req;
273
274 /* Can we create a HOLE */
275
276 if ((PCEIL(ip->i_size) < PBASE(off)) &&
277 ((PBASE(off) - PCEIL(ip->i_size)) >= PAGESIZE)) {
278
279 if (ip->i_size != 0) {
280
281 /*
282 * Allocate one block for
283 * old data.(cannot be more than one page)
284 */
285
286 count = PAGESIZE;
287 if (error = ud_create_ext(ip, ip->i_ext_used,
288 ALLOC_SPACE | NEW_EXT, alloc_only,
289 icb_offset, &count)) {
290 goto embedded_error;
291 }
292 icb_offset = PAGESIZE;
293 }
294
295 /*
296 * Allocate a hole from PCEIL(ip->i_size) to PBASE(off)
297 */
298
299 count = PBASE(off) - PCEIL(ip->i_size);
300 (void) ud_create_ext(ip, ip->i_ext_used, NEW_EXT,
301 alloc_only, icb_offset, &count);
302 icb_offset = PBASE(off);
303
304 /*
305 * Allocate the rest of the space PBASE(off) to end_req
306 */
307 count = end_req - PBASE(off);
308 } else {
309 /*
310 * If no hole can be created then allocate
311 * space till the end of the request
312 */
313 count = end_req;
314 }
315
316
317
318 if (error = ud_create_ext(ip, ip->i_ext_used,
319 ALLOC_SPACE | NEW_EXT,
320 alloc_only, icb_offset, &count)) {
321 embedded_error:
322 /*
323 * Something error
324 * most probable file system is full
325 * we know that the file came in as a embedded file.
326 * undo what ever we did in this block of code
327 */
328 if (dtype_changed) {
329 ip->i_desc_type = ICB_FLAG_ONE_AD;
330 }
331 for (i = 0; i < ip->i_ext_used; i++) {
332 iext = &ip->i_ext[i];
333 if (iext->ib_flags != IB_UN_RE_AL) {
334 ud_free_space(ip->i_udf->udf_vfs,
335 iext->ib_prn, iext->ib_block,
336 (iext->ib_count + lbmask) >>
337 l2b);
338 }
339 }
340 if (memory_allocated) {
341 kmem_free(ip->i_ext,
342 ip->i_ext_count *
343 sizeof (struct icb_ext));
344 ip->i_ext = NULL;
345 ip->i_ext_count = ip->i_ext_used = 0;
346 }
347 }
348
349 if (fbp != NULL) {
350 fbrelse(fbp, S_WRITE);
351 }
352
353 return (error);
354 } else {
355
356 /*
357 * Type 4 directories being created
358 */
359 if (ip->i_ext == NULL) {
360 goto one_ad_no_i_ext;
361 }
362
363 /*
364 * Read the entire icb's to memory
365 */
366 if (ud_read_icb_till_off(ip, ip->i_size) != 0) {
367 error = EINVAL;
368 goto out;
369 }
370
371 isize = CEIL(ip->i_size);
372
373 if (end_req > isize) {
374
375 /*
376 * The new file size is greater
377 * than the old size
378 */
379
380 if (ip->i_ext == NULL) {
381 goto one_ad_no_i_ext;
382 } else if (ip->i_ext_used == 0) {
383 goto one_ad_i_ext;
384 }
385
386 error = ud_last_alloc_ext(ip, off, size, alloc_only);
387
388 return (error);
389 } else {
390
391 /*
392 * File growing the new size will be less than
393 * iext->ib_offset + CEIL(iext->ib_count)
394 */
395
396 iext = &ip->i_ext[ip->i_ext_used - 1];
397
398 if (end_req > (iext->ib_offset + iext->ib_count)) {
399
400 iext->ib_count = end_req - iext->ib_offset;
401
402 if (iext->ib_flags != IB_UN_RE_AL) {
403 error = 0;
404 goto out;
405 }
406 }
407 }
408 }
409
410 /* By this point the end of last extent is >= BASE(off) + size */
411
412 ASSERT(ip->i_ext);
413
414 /*
415 * Figure out the icb_ext that has offset "off"
416 */
417 for (i = 0; i < ip->i_ext_used; i++) {
418 iext = &ip->i_ext[i];
419 if ((iext->ib_offset <= off) &&
420 ((iext->ib_offset + iext->ib_count) > off)) {
421 break;
422 }
423 }
424
425 /*
426 * iext will have offset "off"
427 */
428
429
430 do {
431 iext = &ip->i_ext[i];
432
433 if ((iext->ib_flags & IB_UN_RE_AL) == 0) {
434
435 /*
436 * Already allocated do nothing
437 */
438
439 i++;
440 } else {
441
442 /*
443 * We are in a hole.
444 * allocate the required space
445 * while trying to create smaller holes
446 */
447
448 if ((PBASE(off) > PBASE(iext->ib_offset)) &&
449 ((PBASE(off) - PBASE(iext->ib_offset)) >=
450 PAGESIZE)) {
451
452 /*
453 * Allocate space from begining of
454 * old hole to the begining of new hole
455 * We want all holes created by us
456 * to be MMUPAGE Aligned
457 */
458
459 if (PBASE(iext->ib_offset) !=
460 BASE(iext->ib_offset)) {
461 if ((error = ud_break_create_new_icb(
462 ip, i, BASE(iext->ib_offset) -
463 PBASE(iext->ib_offset))) != 0) {
464 return (error);
465 }
466 goto alloc_cur_ext;
467 }
468
469 /*
470 * Create the new hole
471 */
472
473 if ((error = ud_break_create_new_icb(ip, i,
474 PBASE(off) - iext->ib_offset)) != 0) {
475 return (error);
476 }
477 iext = &ip->i_ext[i];
478 i++;
479 continue;
480 }
481
482 end_ext = iext->ib_offset + iext->ib_count;
483
484 if ((PBASE(end_ext) > PCEIL(end_req)) &&
485 ((PBASE(end_ext) - PCEIL(end_req)) >=
486 PAGESIZE)) {
487 /*
488 * We can create a hole
489 * from PCEIL(end_req) - BASE(end_ext)
490 */
491 if ((error = ud_break_create_new_icb(ip, i,
492 PCEIL(end_req) - iext->ib_offset)) != 0) {
493 return (error);
494 }
495 }
496
497
498 alloc_cur_ext:
499 /*
500 * Allocate the current extent
501 */
502
503
504 /*
505 * If the previous extent
506 * is allocated then try to allocate
507 * adjascent to the previous extent
508 */
509 prox = 0;
510 if (i != 0) {
511 pext = &ip->i_ext[i - 1];
512 if (pext->ib_flags != IB_UN_RE_AL) {
513 prox = pext->ib_block +
514 (CEIL(pext->ib_count) >> l2b);
515 }
516 }
517
518 iext = &ip->i_ext[i];
519 blkcount = CEIL(iext->ib_count) >> l2b;
520
521 if ((error = ud_alloc_space(ip->i_vfs,
522 ip->i_icb_prn, prox, blkcount,
523 &blkno, &sz, 1, 0)) != 0) {
524 return (error);
525 }
526 ip->i_lbr += sz;
527 if (sz == 0) {
528 return (ENOSPC);
529 }
530
531 if (alloc_only == 0) {
532 error = ud_zero_it(ip, blkno, sz);
533 }
534
535 acount = sz << l2b;
536 if ((prox == blkno) &&
537 ((pext->ib_count + acount) < mext_sz)) {
538
539 /*
540 * We are able to allocate adjascent to
541 * the previous extent. Increment the
542 * previous extent count if the size
543 * of the extent is not greater than
544 * max extent size
545 */
546
547 pext = &ip->i_ext[i - 1];
548 pext->ib_count += acount;
549
550 if (sz == blkcount) {
551 /*
552 * and get rid of the current
553 * extent since we have
554 * allocated all of its size
555 * and incremented the
556 * previous extents count
557 */
558 ud_remove_ext_at_index(ip, i);
559 } else {
560 /*
561 * reduce the count of the
562 * current extent by the amount
563 * allocated in the last extent
564 */
565 ASSERT(acount < iext->ib_count);
566 iext->ib_count -= acount;
567 iext->ib_offset += acount;
568 }
569 } else {
570 if (sz < blkcount) {
571 if ((error = ud_break_create_new_icb(
572 ip, i, sz << l2b)) != 0) {
573 return (error);
574 }
575 }
576 iext = &ip->i_ext[i];
577 count -= CEIL(iext->ib_count);
578 iext->ib_prn = ip->i_icb_prn;
579 iext->ib_block = blkno;
580 iext->ib_flags &= ~IB_UN_RE_AL;
581 /*
582 * iext->ib_flags |= IB_UN_REC;
583 */
584 i++;
585 continue;
586 }
587 }
588 } while ((iext->ib_offset + iext->ib_count) < end_req);
589
590 out:
591 return (error);
592 }
593
594
595 /*
596 * increase i_con/i_ext arrays and set new elements
597 * using long or short allocation descriptors
598 */
599 static void
ud_common_ad(struct ud_inode * ip,struct buf * bp)600 ud_common_ad(struct ud_inode *ip, struct buf *bp)
601 {
602 int32_t ndesc, count, lbmask;
603 uint32_t length;
604 struct alloc_ext_desc *aed;
605 struct icb_ext *iext, *con;
606 u_offset_t offset;
607 long_ad_t *lad;
608 short_ad_t *sad;
609 int islong;
610 void *addr;
611
612 addr = bp->b_un.b_addr + sizeof (struct alloc_ext_desc);
613 aed = (struct alloc_ext_desc *)bp->b_un.b_addr;
614 length = SWAP_32(aed->aed_len_aed);
615 if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
616 islong = 1;
617 lad = addr;
618 ndesc = length / sizeof (*lad);
619 } else if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
620 islong = 0;
621 sad = addr;
622 ndesc = length / sizeof (*sad);
623 } else
624 return;
625
626 /*
627 * realloc i_ext array
628 */
629 count = (((ip->i_ext_used + ndesc) / EXT_PER_MALLOC) + 1) *
630 EXT_PER_MALLOC;
631 addr = kmem_zalloc(count * sizeof (struct icb_ext), KM_SLEEP);
632 bcopy(ip->i_ext, addr, ip->i_ext_used * sizeof (struct icb_ext));
633 kmem_free(ip->i_ext, ip->i_ext_count * sizeof (struct icb_ext));
634 ip->i_ext = addr;
635 ip->i_ext_count = count;
636
637 /*
638 * scan descriptors
639 */
640 lbmask = ip->i_udf->udf_lbmask;
641 iext = &ip->i_ext[ip->i_ext_used - 1];
642 offset = iext->ib_offset + iext->ib_count;
643 iext++;
644 while (ndesc--) {
645 if (islong)
646 length = SWAP_32(lad->lad_ext_len);
647 else
648 length = SWAP_32(sad->sad_ext_len);
649
650 if ((length & 0x3FFFFFFF) == 0)
651 break;
652 else if (((length >> 30) & IB_MASK) == IB_CON) {
653 if (ip->i_con_used == ip->i_con_count) {
654 struct icb_ext *old;
655 int32_t old_count;
656
657 old = ip->i_con;
658 old_count = ip->i_con_count *
659 sizeof (struct icb_ext);
660 ip->i_con_count += EXT_PER_MALLOC;
661 ip->i_con = kmem_zalloc(ip->i_con_count *
662 sizeof (struct icb_ext), KM_SLEEP);
663
664 if (old) {
665 bcopy(old, ip->i_con, old_count);
666 kmem_free(old, old_count);
667 }
668 }
669 con = &ip->i_con[ip->i_con_used];
670 if (islong) {
671 con->ib_prn = SWAP_16(lad->lad_ext_prn);
672 con->ib_block = SWAP_32(lad->lad_ext_loc);
673 } else {
674 con->ib_prn = ip->i_icb_prn;
675 con->ib_block = SWAP_32(sad->sad_ext_loc);
676 }
677 con->ib_count = length & 0x3FFFFFFF;
678 con->ib_flags = (length >> 30) & IB_MASK;
679 ip->i_con_used++;
680 break;
681 }
682
683 if (islong) {
684 iext->ib_prn = SWAP_16(lad->lad_ext_prn);
685 iext->ib_block = SWAP_32(lad->lad_ext_loc);
686 lad++;
687 } else {
688 iext->ib_prn = 0;
689 iext->ib_block = SWAP_32(sad->sad_ext_loc);
690 sad++;
691 }
692 iext->ib_count = length & 0x3FFFFFFF;
693 iext->ib_offset = offset;
694 iext->ib_marker1 = (uint32_t)0xAAAAAAAA;
695 iext->ib_marker2 = (uint32_t)0xBBBBBBBB;
696 offset += (iext->ib_count + lbmask) & (~lbmask);
697 iext->ib_flags = (length >> 30) & IB_MASK;
698 ip->i_ext_used++;
699 iext++;
700 }
701 }
702
703
704 static int32_t
ud_read_next_cont(struct ud_inode * ip)705 ud_read_next_cont(struct ud_inode *ip)
706 {
707 uint32_t dummy, error = 0;
708 struct alloc_ext_desc *aed;
709 struct icb_ext *cont;
710 struct buf *bp;
711 daddr_t bno;
712
713 cont = &ip->i_con[ip->i_con_read];
714 ASSERT(cont->ib_count > 0);
715
716 bno = ud_xlate_to_daddr(ip->i_udf, cont->ib_prn, cont->ib_block,
717 1, &dummy);
718 bp = ud_bread(ip->i_dev, bno << ip->i_udf->udf_l2d_shift,
719 cont->ib_count);
720 if (bp->b_flags & B_ERROR)
721 error = bp->b_error;
722 else {
723 aed = (struct alloc_ext_desc *)bp->b_un.b_addr;
724 if (ud_verify_tag_and_desc(&aed->aed_tag, UD_ALLOC_EXT_DESC,
725 cont->ib_block, 1, cont->ib_count))
726 error = EINVAL;
727 }
728
729 if (error == 0)
730 ud_common_ad(ip, bp);
731
732 brelse(bp);
733 return (error);
734 }
735
736
737 int32_t
ud_read_icb_till_off(struct ud_inode * ip,u_offset_t offset)738 ud_read_icb_till_off(struct ud_inode *ip, u_offset_t offset)
739 {
740 int32_t error = 0;
741 struct icb_ext *iext;
742
743 ud_printf("ud_read_icb_till_off\n");
744
745 if (ip->i_desc_type == ICB_FLAG_ONE_AD)
746 return (0);
747 else if ((ip->i_astrat != STRAT_TYPE4) &&
748 (ip->i_astrat != STRAT_TYPE4096))
749 return (EINVAL);
750 else if (ip->i_ext_used == 0)
751 return ((ip->i_size == 0) ? 0 : EINVAL);
752
753 /*
754 * supported allocation strategies are
755 * STRAT_TYPE4 and STRAT_TYPE4096
756 */
757
758 mutex_enter(&ip->i_con_lock);
759 iext = &ip->i_ext[ip->i_ext_used - 1];
760 while ((iext->ib_offset + iext->ib_count) < offset) {
761 if (ip->i_con_used == ip->i_con_read) {
762 error = EINVAL;
763 break;
764 }
765 if (error = ud_read_next_cont(ip))
766 break;
767 ip->i_con_read++;
768 iext = &ip->i_ext[ip->i_ext_used - 1];
769 }
770 mutex_exit(&ip->i_con_lock);
771
772 return (error);
773 }
774
775
776 /*
777 * Assumption is the off is beyond ip->i_size
778 * And we will have atleast one ext used
779 */
780 int32_t
ud_last_alloc_ext(struct ud_inode * ip,uint64_t off,uint32_t size,int32_t alloc_only)781 ud_last_alloc_ext(struct ud_inode *ip, uint64_t off,
782 uint32_t size, int32_t alloc_only)
783 {
784 struct icb_ext *iext;
785 struct udf_vfs *udf_vfsp;
786 int32_t lbsize, lbmask;
787 uint64_t end_req, end_count, icb_offset;
788 uint64_t count;
789 int32_t error = 0;
790
791
792 udf_vfsp = ip->i_udf;
793 lbsize = udf_vfsp->udf_lbsize;
794 lbmask = udf_vfsp->udf_lbmask;
795
796 end_req = BASE(off) + size;
797
798
799 /*
800 * If we are here it means the file
801 * is growing beyond the end of the
802 * current block. So round up the
803 * last extent
804 */
805
806 iext = &ip->i_ext[ip->i_ext_used - 1];
807 iext->ib_count = CEIL(iext->ib_count);
808
809 /*
810 * Figure out if we can create
811 * a hole here
812 */
813
814
815 end_count = iext->ib_offset + iext->ib_count;
816
817 if ((PCEIL(end_count) < PBASE(off)) &&
818 ((PBASE(off) - PCEIL(end_count)) >= PAGESIZE)) {
819
820 count = PCEIL(end_count) - CEIL(end_count);
821 if (count >= lbsize) {
822
823 /*
824 * There is space between the begining
825 * of the hole to be created and
826 * end of the last offset
827 * Allocate blocks for it
828 */
829
830 iext = &ip->i_ext[ip->i_ext_used - 1];
831 icb_offset = iext->ib_offset + CEIL(iext->ib_count);
832
833 if (iext->ib_flags == IB_UN_RE_AL) {
834
835 /*
836 * Previous extent is a unallocated
837 * extent. Create a new allocated
838 * extent
839 */
840
841 error = ud_create_ext(ip, ip->i_ext_used,
842 ALLOC_SPACE | NEW_EXT,
843 alloc_only, icb_offset, &count);
844
845 } else {
846
847 /*
848 * Last extent is allocated
849 * try to allocate adjascent to the
850 * last extent
851 */
852
853 error = ud_create_ext(ip, ip->i_ext_used - 1,
854 ALLOC_SPACE, alloc_only,
855 icb_offset, &count);
856 }
857
858 if (error != 0) {
859 return (error);
860 }
861 }
862
863 iext = &ip->i_ext[ip->i_ext_used - 1];
864 end_count = iext->ib_offset + iext->ib_count;
865 count = PBASE(off) - PCEIL(end_count);
866 icb_offset = PCEIL(end_count);
867
868 if (iext->ib_flags == IB_UN_RE_AL) {
869
870 /*
871 * The last extent is unallocated
872 * Just bump the extent count
873 */
874 (void) ud_create_ext(ip, ip->i_ext_used - 1,
875 0, alloc_only, icb_offset, &count);
876 } else {
877
878 /*
879 * Last extent is allocated
880 * round up the size of the extent to
881 * lbsize and allocate a new unallocated extent
882 */
883 iext->ib_count = CEIL(iext->ib_count);
884 (void) ud_create_ext(ip, ip->i_ext_used,
885 NEW_EXT, alloc_only, icb_offset, &count);
886 }
887
888 icb_offset = PBASE(off);
889 } else {
890
891 /*
892 * We cannot create any hole inbetween
893 * the last extent and the off so
894 * round up the count in the last extent
895 */
896
897 iext = &ip->i_ext[ip->i_ext_used - 1];
898 iext->ib_count = CEIL(iext->ib_count);
899
900 }
901
902
903 iext = &ip->i_ext[ip->i_ext_used - 1];
904 count = end_req - (iext->ib_offset + iext->ib_count);
905 icb_offset = iext->ib_offset + CEIL(iext->ib_count);
906
907 if (iext->ib_flags == IB_UN_RE_AL) {
908
909 /*
910 * Last extent was a unallocated extent
911 * create a new extent
912 */
913
914 error = ud_create_ext(ip, ip->i_ext_used,
915 ALLOC_SPACE | NEW_EXT, alloc_only, icb_offset, &count);
916 } else {
917
918 /*
919 * Last extent was an allocated extent
920 * try to allocate adjascent to the old blocks
921 */
922
923 error = ud_create_ext(ip, ip->i_ext_used - 1,
924 ALLOC_SPACE, alloc_only, icb_offset, &count);
925 }
926
927 return (error);
928 }
929
930 /*
931 * Break up the icb_ext at index
932 * into two icb_ext,
933 * one at index ib_count "count" and
934 * the other at index+1 with ib_count = old_ib_count - count
935 */
936 int32_t
ud_break_create_new_icb(struct ud_inode * ip,int32_t index,uint32_t count)937 ud_break_create_new_icb(struct ud_inode *ip,
938 int32_t index, uint32_t count)
939 {
940 int32_t i, error;
941 struct icb_ext *iext, *next;
942
943
944 ud_printf("ud_break_create_new_icb\n");
945 iext = &ip->i_ext[index];
946
947 ASSERT(count < iext->ib_count);
948
949 if ((error = ud_bump_ext_count(ip, KM_SLEEP)) != 0) {
950 return (error);
951 }
952
953 for (i = ip->i_ext_used; i > index; i--) {
954 ip->i_ext[i] = ip->i_ext[i - 1];
955 }
956
957 next = &ip->i_ext[index + 1];
958 iext = &ip->i_ext[index];
959
960 iext->ib_count = count;
961 next->ib_count -= count;
962 next->ib_offset = iext->ib_offset + iext->ib_count;
963 if (iext->ib_flags != IB_UN_RE_AL) {
964 next->ib_block = iext->ib_block +
965 iext->ib_count >> ip->i_udf->udf_l2b_shift;
966 }
967 ip->i_ext_used++;
968 return (0);
969 }
970
971 void
ud_remove_ext_at_index(struct ud_inode * ip,int32_t index)972 ud_remove_ext_at_index(struct ud_inode *ip, int32_t index)
973 {
974 int32_t i;
975
976 ASSERT(index <= ip->i_ext_used);
977
978 for (i = index; i < ip->i_ext_used; i++) {
979 if ((i + 1) < ip->i_ext_count) {
980 ip->i_ext[i] = ip->i_ext[i + 1];
981 } else {
982 bzero(&ip->i_ext[i], sizeof (struct icb_ext));
983 }
984 }
985 ip->i_ext_used --;
986 }
987
988 int32_t
ud_bump_ext_count(struct ud_inode * ip,int32_t sleep_flag)989 ud_bump_ext_count(struct ud_inode *ip, int32_t sleep_flag)
990 {
991 int32_t error = 0;
992 struct icb_ext *iext;
993 uint32_t old_count, elen;
994
995 ASSERT(ip);
996 ASSERT(sleep_flag == KM_SLEEP);
997
998 ud_printf("ud_bump_ext_count\n");
999
1000 if (ip->i_ext_used >= ip->i_ext_count) {
1001
1002 old_count = sizeof (struct icb_ext) * ip->i_ext_count;
1003 ip->i_ext_count += EXT_PER_MALLOC;
1004 iext = kmem_zalloc(sizeof (struct icb_ext) *
1005 ip->i_ext_count, sleep_flag);
1006 bcopy(ip->i_ext, iext, old_count);
1007 kmem_free(ip->i_ext, old_count);
1008 ip->i_ext = iext;
1009 }
1010
1011 if (ip->i_ext_used >= ip->i_cur_max_ext) {
1012 int32_t prox;
1013 struct icb_ext *icon;
1014 uint32_t blkno, sz;
1015 int32_t lbmask, l2b;
1016
1017 lbmask = ip->i_udf->udf_lbmask;
1018 l2b = ip->i_udf->udf_l2b_shift;
1019
1020 if ((error = ud_read_icb_till_off(ip, ip->i_size)) != 0) {
1021 return (error);
1022 }
1023
1024 /*
1025 * If there are any old cont extents
1026 * allocate the new one ajscant to the old one
1027 */
1028 if (ip->i_con_used != 0) {
1029 icon = &ip->i_con[ip->i_con_used - 1];
1030 prox = icon->ib_block + (CEIL(icon->ib_count) >> l2b);
1031 } else {
1032 prox = 0;
1033 }
1034
1035 /*
1036 * Allocate space
1037 */
1038 if ((error = ud_alloc_space(ip->i_vfs, ip->i_icb_prn,
1039 prox, 1, &blkno, &sz, 0, 0)) != 0) {
1040 return (error);
1041 }
1042 if (sz == 0) {
1043 return (ENOSPC);
1044 }
1045
1046 sz <<= l2b;
1047
1048 if (ip->i_con_used == ip->i_con_count) {
1049 struct icb_ext *old;
1050 int32_t old_count;
1051
1052 old = ip->i_con;
1053 old_count = ip->i_con_count *
1054 sizeof (struct icb_ext);
1055 ip->i_con_count += EXT_PER_MALLOC;
1056 ip->i_con = kmem_zalloc(ip->i_con_count *
1057 sizeof (struct icb_ext), KM_SLEEP);
1058 if (old != 0) {
1059 bcopy(old, ip->i_con, old_count);
1060 kmem_free(old, old_count);
1061 }
1062 }
1063 icon = &ip->i_con[ip->i_con_used++];
1064 icon->ib_flags = IB_CON;
1065 icon->ib_prn = ip->i_icb_prn;
1066 icon->ib_block = blkno;
1067 icon->ib_count = sz;
1068 icon->ib_offset = 0;
1069 icon->ib_marker1 = (uint32_t)0xAAAAAAAA;
1070 icon->ib_marker2 = (uint32_t)0xBBBBBBBB;
1071
1072 /*
1073 * Bump the i_cur_max_ext according to
1074 * the space allocated
1075 */
1076 if (ip->i_desc_type == ICB_FLAG_SHORT_AD) {
1077 elen = sizeof (struct short_ad);
1078 } else if (ip->i_desc_type == ICB_FLAG_LONG_AD) {
1079 elen = sizeof (struct long_ad);
1080 } else {
1081 return (ENOSPC);
1082 }
1083 sz = sz - (sizeof (struct alloc_ext_desc) + elen);
1084 ip->i_cur_max_ext += sz / elen;
1085 }
1086 return (error);
1087 }
1088
1089 int32_t
ud_create_ext(struct ud_inode * ip,int32_t index,uint32_t flags,int32_t alloc_only,uint64_t offset,uint64_t * count)1090 ud_create_ext(struct ud_inode *ip, int32_t index, uint32_t flags,
1091 int32_t alloc_only, uint64_t offset, uint64_t *count)
1092 {
1093 struct icb_ext *iext, *pext;
1094 struct udf_vfs *udf_vfsp;
1095 int32_t error = 0, blkcount, acount;
1096 uint32_t blkno, sz, prox, mext_sz;
1097 int32_t lbmask, l2b;
1098
1099 if (*count == 0) {
1100 return (0);
1101 }
1102
1103 begin:
1104 udf_vfsp = ip->i_udf;
1105 lbmask = udf_vfsp->udf_lbmask;
1106 l2b = udf_vfsp->udf_l2b_shift;
1107 mext_sz = (1 << MEXT_BITS) - PAGESIZE;
1108
1109 if ((error = ud_bump_ext_count(ip, KM_SLEEP)) != 0) {
1110 return (error);
1111 }
1112
1113 iext = &ip->i_ext[index];
1114 if (flags & ALLOC_SPACE) {
1115 if ((flags & NEW_EXT) ||
1116 (ip->i_ext_count == 0)) {
1117
1118 iext->ib_flags = 0;
1119 iext->ib_prn = ip->i_icb_prn;
1120 if (*count > mext_sz) {
1121 blkcount = mext_sz >> l2b;
1122 } else {
1123 blkcount = CEIL(*count) >> l2b;
1124 }
1125 if ((error = ud_alloc_space(ip->i_vfs,
1126 ip->i_icb_prn, 0, blkcount,
1127 &blkno, &sz, 1, 0)) != 0) {
1128 return (error);
1129 }
1130 if (sz == 0) {
1131 return (ENOSPC);
1132 }
1133 ip->i_lbr += sz;
1134 iext->ib_block = blkno;
1135 acount = sz << l2b;
1136 if ((sz << l2b) > *count) {
1137 iext->ib_count = *count;
1138 *count = 0;
1139 } else {
1140 iext->ib_count = sz << l2b;
1141 *count -= iext->ib_count;
1142 }
1143 iext->ib_offset = offset;
1144 if (ip->i_ext_used <= index)
1145 ip->i_ext_used ++;
1146 } else {
1147 if ((iext->ib_count + *count) > mext_sz) {
1148 blkcount = (mext_sz - iext->ib_count) >> l2b;
1149 } else {
1150 blkcount = CEIL(*count) >> l2b;
1151 }
1152 if (blkcount == 0) {
1153 flags |= NEW_EXT;
1154 index++;
1155 goto begin;
1156 }
1157 prox = iext->ib_block + (CEIL(iext->ib_count) >> l2b);
1158 if ((error = ud_alloc_space(ip->i_vfs,
1159 ip->i_icb_prn, prox, blkcount,
1160 &blkno, &sz, 1, 0)) != 0) {
1161 return (error);
1162 }
1163 if (sz == 0) {
1164 return (ENOSPC);
1165 }
1166 acount = sz << l2b;
1167 if (acount > *count) {
1168 acount = *count;
1169 *count = 0;
1170 } else {
1171 *count -= acount;
1172 }
1173 ip->i_lbr += sz;
1174 if (prox == blkno) {
1175 iext->ib_count += acount;
1176 } else {
1177 if ((error = ud_bump_ext_count(ip, KM_SLEEP))
1178 != 0) {
1179 return (error);
1180 }
1181 pext = &ip->i_ext[index];
1182 iext = &ip->i_ext[index + 1];
1183 iext->ib_flags = 0;
1184 iext->ib_prn = ip->i_icb_prn;
1185 iext->ib_block = blkno;
1186 iext->ib_offset =
1187 pext->ib_offset + pext->ib_count;
1188 iext->ib_count = acount;
1189 /*
1190 * Increment the index, since we have used
1191 * the extent at [index+1] above.
1192 */
1193 index++;
1194 if (ip->i_ext_used <= index)
1195 ip->i_ext_used ++;
1196 }
1197 }
1198 if (alloc_only == 0) {
1199 error = ud_zero_it(ip, blkno, sz);
1200 }
1201 if (*count) {
1202 offset = iext->ib_offset + CEIL(iext->ib_count);
1203 flags |= NEW_EXT;
1204 index++;
1205 goto begin;
1206 }
1207 } else {
1208 if (flags & NEW_EXT) {
1209 iext->ib_flags = IB_UN_RE_AL;
1210 iext->ib_prn = 0;
1211 iext->ib_block = 0;
1212 if (*count > mext_sz) {
1213 iext->ib_count = mext_sz;
1214 *count -= iext->ib_count;
1215 } else {
1216 iext->ib_count = *count;
1217 *count = 0;
1218 }
1219 iext->ib_offset = offset;
1220 if (ip->i_ext_used <= index)
1221 ip->i_ext_used ++;
1222 } else {
1223 ASSERT(iext->ib_flags == IB_UN_RE_AL);
1224 if ((iext->ib_count + *count) > mext_sz) {
1225 acount = mext_sz - iext->ib_count;
1226 iext->ib_count += acount;
1227 *count -= acount;
1228 } else {
1229 iext->ib_count += *count;
1230 *count = 0;
1231 }
1232 }
1233 if (*count != 0) {
1234 offset = iext->ib_offset + CEIL(iext->ib_count);
1235 flags |= NEW_EXT;
1236 index++;
1237 goto begin;
1238 }
1239 }
1240 iext->ib_marker1 = (uint32_t)0xAAAAAAAA;
1241 iext->ib_marker2 = (uint32_t)0xBBBBBBBB;
1242 return (error);
1243 }
1244
1245 #undef CEIL
1246 #undef BASE
1247
1248 int32_t
ud_zero_it(struct ud_inode * ip,uint32_t start_block,uint32_t block_count)1249 ud_zero_it(struct ud_inode *ip, uint32_t start_block, uint32_t block_count)
1250 {
1251 struct udf_vfs *udf_vfsp;
1252 uint32_t bno, dummy;
1253 int32_t error;
1254 struct buf *bp;
1255
1256 /*
1257 * Donot use bio routines
1258 * since the buffer can sit
1259 * long enough in cache for the space
1260 * to be allocated/freed and
1261 * then allocated
1262 */
1263 udf_vfsp = ip->i_udf;
1264 bno = ud_xlate_to_daddr(udf_vfsp,
1265 ip->i_icb_prn, start_block, block_count, &dummy);
1266
1267 dummy = block_count << udf_vfsp->udf_l2b_shift;
1268 bp = (struct buf *)kmem_zalloc(biosize(), KM_SLEEP);
1269 sema_init(&bp->b_sem, 0, NULL, SEMA_DEFAULT, NULL);
1270 sema_init(&bp->b_io, 0, NULL, SEMA_DEFAULT, NULL);
1271
1272 bp->b_flags = B_WRITE | B_BUSY;
1273 bp->b_edev = ip->i_dev;
1274 bp->b_dev = cmpdev(ip->i_dev);
1275 bp->b_blkno = bno << udf_vfsp->udf_l2d_shift;
1276 bp->b_bcount = dummy;
1277 bp->b_un.b_addr = kmem_zalloc(bp->b_bcount, KM_SLEEP);
1278 bp->b_file = ip->i_vnode;
1279 bp->b_offset = -1;
1280
1281 (void) bdev_strategy(bp);
1282 if (error = biowait(bp)) {
1283 cmn_err(CE_WARN, "error in write\n");
1284 }
1285
1286 kmem_free(bp->b_un.b_addr, dummy);
1287 sema_destroy(&bp->b_io);
1288 sema_destroy(&bp->b_sem);
1289 kmem_free((caddr_t)bp, biosize());
1290
1291 return (error);
1292 }
1293