1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <sys/types.h>
28 #include <sys/t_lock.h>
29 #include <sys/param.h>
30 #include <sys/time.h>
31 #include <sys/systm.h>
32 #include <sys/sysmacros.h>
33 #include <sys/resource.h>
34 #include <sys/signal.h>
35 #include <sys/cred.h>
36 #include <sys/user.h>
37 #include <sys/buf.h>
38 #include <sys/vfs.h>
39 #include <sys/stat.h>
40 #include <sys/vnode.h>
41 #include <sys/mode.h>
42 #include <sys/proc.h>
43 #include <sys/disp.h>
44 #include <sys/file.h>
45 #include <sys/fcntl.h>
46 #include <sys/flock.h>
47 #include <sys/kmem.h>
48 #include <sys/uio.h>
49 #include <sys/dnlc.h>
50 #include <sys/conf.h>
51 #include <sys/errno.h>
52 #include <sys/mman.h>
53 #include <sys/fbuf.h>
54 #include <sys/pathname.h>
55 #include <sys/debug.h>
56 #include <sys/vmsystm.h>
57 #include <sys/cmn_err.h>
58 #include <sys/dirent.h>
59 #include <sys/errno.h>
60 #include <sys/modctl.h>
61 #include <sys/statvfs.h>
62 #include <sys/mount.h>
63 #include <sys/sunddi.h>
64 #include <sys/bootconf.h>
65 #include <sys/policy.h>
66
67 #include <vm/hat.h>
68 #include <vm/page.h>
69 #include <vm/pvn.h>
70 #include <vm/as.h>
71 #include <vm/seg.h>
72 #include <vm/seg_map.h>
73 #include <vm/seg_kmem.h>
74 #include <vm/seg_vn.h>
75 #include <vm/rm.h>
76 #include <vm/page.h>
77 #include <sys/swap.h>
78
79 #include <fs/fs_subr.h>
80
81 #include <sys/fs/udf_volume.h>
82 #include <sys/fs/udf_inode.h>
83
84 #ifdef DEBUG
85 extern struct ud_inode *ud_search_icache(struct vfs *, uint16_t, uint32_t);
86 #endif
87
88 int32_t ud_alloc_space_bmap(struct vfs *, struct ud_part *,
89 uint32_t, uint32_t, uint32_t *, uint32_t *, int32_t);
90 int32_t ud_check_free_and_mark_used(struct vfs *,
91 struct ud_part *, uint32_t, uint32_t *);
92 int32_t ud_check_free(uint8_t *, uint8_t *, uint32_t, uint32_t);
93 void ud_mark_used(uint8_t *, uint32_t, uint32_t);
94 void ud_mark_free(uint8_t *, uint32_t, uint32_t);
95 int32_t ud_alloc_space_stbl(struct vfs *, struct ud_part *,
96 uint32_t, uint32_t, uint32_t *, uint32_t *, int32_t);
97 int32_t ud_free_space_bmap(struct vfs *,
98 struct ud_part *, uint32_t, uint32_t);
99 int32_t ud_free_space_stbl(struct vfs *,
100 struct ud_part *, uint32_t, uint32_t);
101
102
103 /*
104 * WORKSAROUND to the buffer cache crap
105 * If the requested block exists in the buffer cache
106 * buffer cache does not care about the count
107 * it just returns the old buffer(does not even
108 * set resid value). Same problem exists if the
109 * block that is requested is not the first block
110 * in the cached buffer then this will return
111 * a different buffer. We work around the above by
112 * using a fixed size request to the buffer cache
113 * all the time. This is currently udf_lbsize.
114 * (Actually it is restricted to udf_lbsize
115 * because iget always does udf_lbsize requests)
116 */
117
118
119 /*
120 * allocate blkcount blocks continuously
121 * near "proximity" block in partion defined by prn.
122 * if proximity != 0 means less_is_ok = 0
123 * return the starting block no and count
124 * of blocks allocated in start_blkno & size
125 * if less_is_ok == 0 then allocate only if
126 * entire requirement can be met.
127 */
128 int32_t
ud_alloc_space(struct vfs * vfsp,uint16_t prn,uint32_t proximity,uint32_t blkcount,uint32_t * start_blkno,uint32_t * size,int32_t less_is_ok,int32_t metadata)129 ud_alloc_space(struct vfs *vfsp, uint16_t prn,
130 uint32_t proximity, uint32_t blkcount,
131 uint32_t *start_blkno, uint32_t *size,
132 int32_t less_is_ok, int32_t metadata)
133 {
134 int32_t i, error = 0;
135 struct udf_vfs *udf_vfsp;
136 struct ud_part *ud_part;
137
138 ud_printf("ud_alloc_space\n");
139
140
141 /*
142 * prom_printf("ud_alloc_space %x %x %x %x\n",
143 * proximity, blkcount, less_is_ok, metadata);
144 */
145
146 if (blkcount == 0) {
147 *start_blkno = 0;
148 *size = 0;
149 return (0);
150 }
151
152 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
153 ud_part = udf_vfsp->udf_parts;
154 for (i = 0; i < udf_vfsp->udf_npart; i++) {
155 if (prn == ud_part->udp_number) {
156 break;
157 }
158 ud_part ++;
159 }
160
161 if (i == udf_vfsp->udf_npart) {
162 return (1);
163 }
164 *start_blkno = 0;
165 *size = 0;
166 if (metadata) {
167 error = ud_alloc_from_cache(udf_vfsp, ud_part, start_blkno);
168 if (error == 0) {
169 *size = 1;
170 return (0);
171 }
172 }
173 if (ud_part->udp_nfree != 0) {
174 if (ud_part->udp_flags == UDP_BITMAPS) {
175 error = ud_alloc_space_bmap(vfsp, ud_part, proximity,
176 blkcount, start_blkno, size, less_is_ok);
177 } else {
178 error = ud_alloc_space_stbl(vfsp, ud_part, proximity,
179 blkcount, start_blkno, size, less_is_ok);
180 }
181 if (error == 0) {
182 mutex_enter(&udf_vfsp->udf_lock);
183 ASSERT(ud_part->udp_nfree >= *size);
184 ASSERT(udf_vfsp->udf_freeblks >= *size);
185 ud_part->udp_nfree -= *size;
186 udf_vfsp->udf_freeblks -= *size;
187 mutex_exit(&udf_vfsp->udf_lock);
188 }
189 } else {
190 error = ENOSPC;
191 }
192 /*
193 * prom_printf("end %x %x %x\n", error, *start_blkno, *size);
194 */
195
196 return (error);
197 }
198
199 #ifdef SKIP_USED_BLOCKS
200 /*
201 * This table is manually constructed
202 */
203 int8_t skip[256] = {
204 8, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
205 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
206 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
207 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
208 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
209 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
210 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
211 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
212 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
213 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
214 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
215 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
216 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
217 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
218 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
219 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
220 };
221 #endif
222
223 #define HDR_BLKS (24 * 8)
224
225 int32_t
ud_alloc_space_bmap(struct vfs * vfsp,struct ud_part * ud_part,uint32_t proximity,uint32_t blkcount,uint32_t * start_blkno,uint32_t * size,int32_t less_is_ok)226 ud_alloc_space_bmap(struct vfs *vfsp,
227 struct ud_part *ud_part, uint32_t proximity,
228 uint32_t blkcount, uint32_t *start_blkno,
229 uint32_t *size, int32_t less_is_ok)
230 {
231 struct buf *bp = NULL;
232 struct udf_vfs *udf_vfsp;
233 uint32_t old_loc, old_size, new_size;
234 uint8_t *addr, *eaddr;
235 uint32_t loop_count, loop_begin, loop_end;
236 uint32_t bno, begin, dummy, temp, lbsz, bb_count;
237 uint32_t bblk = 0, eblk = 0;
238 int32_t fragmented;
239
240 ud_printf("ud_alloc_space_bmap\n");
241
242 ASSERT(ud_part);
243 ASSERT(ud_part->udp_flags == UDP_BITMAPS);
244
245 if (ud_part->udp_unall_len == 0) {
246 return (ENOSPC);
247 }
248 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
249 lbsz = udf_vfsp->udf_lbsize;
250 bb_count = udf_vfsp->udf_lbsize << 3;
251
252 if (proximity != 0) {
253 /*
254 * directly try allocating
255 * at proximity
256 */
257 temp = blkcount;
258 if (ud_check_free_and_mark_used(vfsp,
259 ud_part, proximity, &temp) == 0) {
260 if (temp != 0) {
261 *start_blkno = proximity;
262 *size = temp;
263 return (0);
264 }
265 }
266 *start_blkno = 0;
267 *size = 0;
268 }
269
270 mutex_enter(&udf_vfsp->udf_lock);
271 fragmented = udf_vfsp->udf_fragmented;
272 mutex_exit(&udf_vfsp->udf_lock);
273 retry:
274 old_loc = old_size = 0;
275
276 mutex_enter(&udf_vfsp->udf_lock);
277 loop_begin = (ud_part->udp_last_alloc + CLSTR_MASK) & ~CLSTR_MASK;
278 mutex_exit(&udf_vfsp->udf_lock);
279
280 loop_end = ud_part->udp_nblocks + HDR_BLKS;
281 loop_count = (loop_begin) ? 2 : 1;
282 while (loop_count--) {
283 for (bno = loop_begin + HDR_BLKS; bno + blkcount < loop_end; ) {
284
285
286 /*
287 * Each bread is restricted to lbsize
288 * due to the way bread is implemented
289 */
290 if ((bp == NULL) ||
291 ((eblk - bno) < blkcount)) {
292 if (bp != NULL) {
293 brelse(bp);
294 }
295 begin = ud_part->udp_unall_loc +
296 bno / bb_count;
297 bp = ud_bread(vfsp->vfs_dev,
298 ud_xlate_to_daddr(udf_vfsp,
299 ud_part->udp_number,
300 begin, 1, &dummy) <<
301 udf_vfsp->udf_l2d_shift, lbsz);
302 if (bp->b_flags & B_ERROR) {
303 brelse(bp);
304 return (EIO);
305 }
306 bblk = begin * bb_count;
307 eblk = bblk + bb_count;
308 addr = (uint8_t *)bp->b_un.b_addr;
309 eaddr = addr + bp->b_bcount;
310 }
311
312 if (blkcount > (eblk - bno)) {
313 temp = eblk - bno;
314 } else {
315 temp = blkcount;
316 }
317 if ((new_size = ud_check_free(addr, eaddr,
318 bno - bblk, temp)) == temp) {
319 ud_mark_used(addr, bno - bblk, temp);
320 bdwrite(bp);
321 *start_blkno = bno - HDR_BLKS;
322 *size = temp;
323 mutex_enter(&udf_vfsp->udf_lock);
324 ud_part->udp_last_alloc =
325 bno + temp - HDR_BLKS;
326 mutex_exit(&udf_vfsp->udf_lock);
327 return (0);
328 }
329 if (less_is_ok) {
330 if (old_size < new_size) {
331 old_loc = bno - HDR_BLKS;
332 old_size = new_size;
333 }
334 }
335 if (new_size != 0) {
336 bno += new_size;
337 } else {
338 #ifdef SKIP_USED_BLOCKS
339 /*
340 * Skipping 0's
341 * implement a allocated block skip
342 * using a while loop with an
343 * preinitialised array of 256 elements
344 * for number of blocks skipped
345 */
346 bno &= ~3;
347 while (skip[addr[(bno - bblk) >> 3]] == 8)
348 bno += 8;
349 bno += skip[addr[(bno - bblk) >> 3]];
350 #else
351 bno++;
352 #endif
353 }
354 if (!fragmented) {
355 bno = (bno + CLSTR_MASK) & ~CLSTR_MASK;
356 }
357 }
358 if (bp != NULL) {
359 brelse(bp);
360 bp = NULL;
361 }
362 if (loop_count) {
363 loop_end = loop_begin + HDR_BLKS;
364 loop_begin = 0;
365 }
366 }
367 if ((old_size == 0) && (!fragmented)) {
368 mutex_enter(&udf_vfsp->udf_lock);
369 fragmented = udf_vfsp->udf_fragmented = 1;
370 mutex_exit(&udf_vfsp->udf_lock);
371 goto retry;
372 }
373 if (less_is_ok && (old_size != 0)) {
374
375 /*
376 * Check once again
377 * somebody else might have
378 * already allocated behind us
379 */
380 if (ud_check_free_and_mark_used(vfsp,
381 ud_part, old_loc, &old_size) == 0) {
382 if (old_size != 0) {
383 *start_blkno = old_loc;
384 *size = old_size;
385 mutex_enter(&udf_vfsp->udf_lock);
386 ud_part->udp_last_alloc = old_loc + old_size;
387 mutex_exit(&udf_vfsp->udf_lock);
388 return (0);
389 }
390 }
391
392 /*
393 * Failed what ever the reason
394 */
395 goto retry;
396 }
397 return (ENOSPC);
398 }
399
400 /*
401 * start is the block from the begining
402 * of the partition ud_part
403 */
404 int32_t
ud_check_free_and_mark_used(struct vfs * vfsp,struct ud_part * ud_part,uint32_t start,uint32_t * count)405 ud_check_free_and_mark_used(struct vfs *vfsp,
406 struct ud_part *ud_part, uint32_t start, uint32_t *count)
407 {
408 struct buf *bp;
409 struct udf_vfs *udf_vfsp;
410 uint32_t begin, dummy, bb_count;
411
412 /*
413 * Adjust start for the header
414 */
415 start += HDR_BLKS;
416 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
417 bb_count = udf_vfsp->udf_lbsize << 3;
418
419 /*
420 * Read just on block worth of bitmap
421 */
422 begin = ud_part->udp_unall_loc + (start / bb_count);
423 bp = ud_bread(vfsp->vfs_dev,
424 ud_xlate_to_daddr(udf_vfsp, ud_part->udp_number,
425 begin, 1, &dummy) << udf_vfsp->udf_l2d_shift,
426 udf_vfsp->udf_lbsize);
427 if (bp->b_flags & B_ERROR) {
428 brelse(bp);
429 return (EIO);
430 }
431
432 /*
433 * Adjust the count if necessary
434 */
435 start -= begin * bb_count;
436 if ((start + *count) > bb_count) {
437 *count = bb_count - start;
438 ASSERT(*count > 0);
439 }
440 if (ud_check_free((uint8_t *)bp->b_un.b_addr,
441 (uint8_t *)bp->b_un.b_addr + bp->b_bcount, start,
442 *count) != *count) {
443 brelse(bp);
444 return (1);
445 }
446 ud_mark_used((uint8_t *)bp->b_un.b_addr, start, *count);
447 bdwrite(bp);
448
449 return (0);
450 }
451
452 int32_t
ud_check_free(uint8_t * addr,uint8_t * eaddr,uint32_t start,uint32_t count)453 ud_check_free(uint8_t *addr, uint8_t *eaddr, uint32_t start, uint32_t count)
454 {
455 int32_t i = 0;
456
457 for (i = 0; i < count; i++) {
458 if (&addr[start >> 3] >= eaddr) {
459 break;
460 }
461 if ((addr[start >> 3] & (1 << (start & 0x7))) == 0) {
462 break;
463 }
464 start ++;
465 }
466 return (i);
467 }
468
469 void
ud_mark_used(uint8_t * addr,uint32_t start,uint32_t count)470 ud_mark_used(uint8_t *addr, uint32_t start, uint32_t count)
471 {
472 int32_t i = 0;
473
474 for (i = 0; i < count; i++) {
475 addr[start >> 3] &= ~(1 << (start & 0x7));
476 start++;
477 }
478 }
479
480 void
ud_mark_free(uint8_t * addr,uint32_t start,uint32_t count)481 ud_mark_free(uint8_t *addr, uint32_t start, uint32_t count)
482 {
483 int32_t i = 0;
484
485 for (i = 0; i < count; i++) {
486 addr[start >> 3] |= (1 << (start & 0x7));
487 start++;
488 }
489 }
490
491 /* ARGSUSED */
492 int32_t
ud_alloc_space_stbl(struct vfs * vfsp,struct ud_part * ud_part,uint32_t proximity,uint32_t blkcount,uint32_t * start_blkno,uint32_t * size,int32_t less_is_ok)493 ud_alloc_space_stbl(struct vfs *vfsp,
494 struct ud_part *ud_part, uint32_t proximity,
495 uint32_t blkcount, uint32_t *start_blkno,
496 uint32_t *size, int32_t less_is_ok)
497 {
498 uint16_t adesc;
499 uint32_t temp, sz;
500 int32_t error, index, count, larg_index, larg_sz;
501 struct buf *bp;
502 struct udf_vfs *udf_vfsp;
503 struct unall_space_ent *use;
504
505 ASSERT(ud_part);
506 ASSERT(ud_part->udp_flags == UDP_SPACETBLS);
507
508 ud_printf("ud_alloc_space_stbl\n");
509
510 if (ud_part->udp_unall_len == 0) {
511 return (ENOSPC);
512 }
513
514 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
515 ASSERT((ud_part->udp_unall_len + 40) <= udf_vfsp->udf_lbsize);
516
517 bp = ud_bread(vfsp->vfs_dev,
518 ud_xlate_to_daddr(udf_vfsp, ud_part->udp_number,
519 ud_part->udp_unall_loc, 1, &temp), udf_vfsp->udf_lbsize);
520
521 use = (struct unall_space_ent *)bp->b_un.b_addr;
522 sz = SWAP_32(use->use_len_ad);
523 adesc = SWAP_16(use->use_icb_tag.itag_flags) & 0x7;
524 if (adesc == ICB_FLAG_SHORT_AD) {
525 struct short_ad *sad;
526
527 sad = (struct short_ad *)use->use_ad;
528 count = sz / sizeof (struct short_ad);
529
530 /*
531 * Search the entire list for
532 * a extent which can give the entire data
533 * Do only first fit
534 */
535 larg_index = larg_sz = 0;
536 for (index = 0; index < count; index++, sad++) {
537 temp = SWAP_32(sad->sad_ext_len) >>
538 udf_vfsp->udf_l2b_shift;
539 if (temp == blkcount) {
540 /*
541 * We found the right fit
542 * return the values and
543 * compress the table
544 */
545 less_is_ok = 1;
546 larg_index = index;
547 larg_sz = temp;
548 goto compress_sad;
549 } else if (temp > blkcount) {
550 /*
551 * We found an entry larger than the
552 * requirement. Change the start block
553 * number and the count to reflect the
554 * allocation
555 */
556 *start_blkno = SWAP_32(sad->sad_ext_loc);
557 *size = blkcount;
558 temp = (temp - blkcount) <<
559 udf_vfsp->udf_l2b_shift;
560 sad->sad_ext_len = SWAP_32(temp);
561 temp = SWAP_32(sad->sad_ext_loc) + blkcount;
562 sad->sad_ext_loc = SWAP_32(temp);
563 goto end;
564 }
565 /*
566 * Let us keep track of the largest
567 * extent available if less_is_ok.
568 */
569 if (less_is_ok) {
570 if (temp > larg_sz) {
571 larg_sz = temp;
572 larg_index = index;
573 }
574 }
575 }
576 compress_sad:
577 if ((less_is_ok) && (larg_sz != 0)) {
578 /*
579 * If we came here we could
580 * not find a extent to cover the entire size
581 * return whatever could be allocated
582 * and compress the table
583 */
584 sad = (struct short_ad *)use->use_ad;
585 sad += larg_index;
586 *start_blkno = SWAP_32(sad->sad_ext_loc);
587 *size = larg_sz;
588 for (index = larg_index; index < count;
589 index++, sad++) {
590 *sad = *(sad+1);
591 }
592 sz -= sizeof (struct short_ad);
593 use->use_len_ad = SWAP_32(sz);
594 } else {
595 error = ENOSPC;
596 }
597 goto end;
598 } else if (adesc == ICB_FLAG_LONG_AD) {
599 struct long_ad *lad;
600
601 lad = (struct long_ad *)use->use_ad;
602 count = sz / sizeof (struct long_ad);
603
604 /*
605 * Search the entire list for
606 * a extent which can give the entire data
607 * Do only first fit
608 */
609 larg_index = larg_sz = 0;
610 for (index = 0; index < count; index++, lad++) {
611 temp = SWAP_32(lad->lad_ext_len) >>
612 udf_vfsp->udf_l2b_shift;
613 if (temp == blkcount) {
614 /*
615 * We found the right fit
616 * return the values and
617 * compress the table
618 */
619 less_is_ok = 1;
620 larg_index = index;
621 larg_sz = temp;
622 goto compress_lad;
623 } else if (temp > blkcount) {
624 /*
625 * We found an entry larger than the
626 * requirement. Change the start block
627 * number and the count to reflect the
628 * allocation
629 */
630 *start_blkno = SWAP_32(lad->lad_ext_loc);
631 *size = blkcount;
632 temp = (temp - blkcount) <<
633 udf_vfsp->udf_l2b_shift;
634 lad->lad_ext_len = SWAP_32(temp);
635 temp = SWAP_32(lad->lad_ext_loc) + blkcount;
636 lad->lad_ext_loc = SWAP_32(temp);
637 goto end;
638 }
639 /*
640 * Let us keep track of the largest
641 * extent available if less_is_ok.
642 */
643 if (less_is_ok) {
644 if (temp > larg_sz) {
645 larg_sz = temp;
646 larg_index = index;
647 }
648 }
649 }
650 compress_lad:
651 if ((less_is_ok) && (larg_sz != 0)) {
652 /*
653 * If we came here we could
654 * not find a extent to cover the entire size
655 * return whatever could be allocated
656 * and compress the table
657 */
658 lad = (struct long_ad *)use->use_ad;
659 lad += larg_index;
660 *start_blkno = SWAP_32(lad->lad_ext_loc);
661 *size = larg_sz;
662 for (index = larg_index; index < count;
663 index++, lad++) {
664 *lad = *(lad+1);
665 }
666 sz -= sizeof (struct long_ad);
667 use->use_len_ad = SWAP_32(sz);
668 } else {
669 error = ENOSPC;
670 }
671 goto end;
672 } else {
673 error = ENOSPC;
674 }
675 end:
676 if (!error) {
677 bdwrite(bp);
678 } else {
679 brelse(bp);
680 }
681 return (error);
682 }
683
684
685 /*
686 * release blkcount blocks starting from beginblk
687 * Call appropriate bmap/space table fucntions
688 */
689 void
ud_free_space(struct vfs * vfsp,uint16_t prn,uint32_t beginblk,uint32_t blkcount)690 ud_free_space(struct vfs *vfsp, uint16_t prn,
691 uint32_t beginblk, uint32_t blkcount)
692 {
693 int32_t i, error;
694 struct ud_part *ud_part;
695 struct udf_vfs *udf_vfsp;
696
697 ud_printf("ud_free_space\n");
698
699 if (blkcount == 0) {
700 return;
701 }
702
703 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
704 ud_part = udf_vfsp->udf_parts;
705 for (i = 0; i < udf_vfsp->udf_npart; i++) {
706 if (prn == ud_part->udp_number) {
707 break;
708 }
709 ud_part ++;
710 }
711
712 if (i == udf_vfsp->udf_npart) {
713 return;
714 }
715
716 if (ud_part->udp_flags == UDP_BITMAPS) {
717 error = ud_free_space_bmap(vfsp, ud_part, beginblk, blkcount);
718 } else {
719 error = ud_free_space_stbl(vfsp, ud_part, beginblk, blkcount);
720 }
721
722 if (error) {
723 udf_vfsp->udf_mark_bad = 1;
724 }
725 }
726
727 /*
728 * If there is a freed table then
729 * release blocks to the freed table
730 * other wise release to the un allocated table.
731 * Findout the offset into the bitmap and
732 * mark the blocks as free blocks
733 */
734 int32_t
ud_free_space_bmap(struct vfs * vfsp,struct ud_part * ud_part,uint32_t beginblk,uint32_t blkcount)735 ud_free_space_bmap(struct vfs *vfsp,
736 struct ud_part *ud_part,
737 uint32_t beginblk, uint32_t blkcount)
738 {
739 struct buf *bp;
740 struct udf_vfs *udf_vfsp;
741 uint32_t block, begin, end, blkno, count, map_end_blk, dummy;
742
743 ud_printf("ud_free_space_bmap\n");
744
745 ASSERT(ud_part);
746 ASSERT(ud_part->udp_flags == UDP_BITMAPS);
747 /*
748 * prom_printf("%x %x\n", udblock, udcount);
749 */
750
751 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
752 if ((ud_part->udp_freed_len == 0) &&
753 (ud_part->udp_unall_len == 0)) {
754 return (ENOSPC);
755 }
756 /*
757 * decide unallocated/freed table to use
758 */
759 if (ud_part->udp_freed_len == 0) {
760 begin = ud_part->udp_unall_loc;
761 map_end_blk = ud_part->udp_unall_len << 3;
762 } else {
763 begin = ud_part->udp_freed_loc;
764 map_end_blk = ud_part->udp_freed_len << 3;
765 }
766
767 if (beginblk + blkcount > map_end_blk) {
768 return (ENOSPC);
769 }
770
771 /* adjust for the bitmap header */
772 beginblk += HDR_BLKS;
773
774 end = begin + ((beginblk + blkcount) / (udf_vfsp->udf_lbsize << 3));
775 begin += (beginblk / (udf_vfsp->udf_lbsize << 3));
776
777 for (block = begin; block <= end; block++) {
778
779 bp = ud_bread(vfsp->vfs_dev,
780 ud_xlate_to_daddr(udf_vfsp, ud_part->udp_number, block, 1,
781 &dummy) << udf_vfsp->udf_l2d_shift, udf_vfsp->udf_lbsize);
782 if (bp->b_flags & B_ERROR) {
783 brelse(bp);
784 return (EIO);
785 }
786 ASSERT(dummy == 1);
787
788 mutex_enter(&udf_vfsp->udf_lock);
789
790 /*
791 * add freed blocks to the bitmap
792 */
793
794 blkno = beginblk - (block * (udf_vfsp->udf_lbsize << 3));
795 if (blkno + blkcount > (udf_vfsp->udf_lbsize << 3)) {
796 count = (udf_vfsp->udf_lbsize << 3) - blkno;
797 } else {
798 count = blkcount;
799 }
800
801 /*
802 * if (begin != end) {
803 * printf("%x %x %x %x %x %x\n",
804 * begin, end, block, blkno, count);
805 * printf("%x %x %x\n", bp->b_un.b_addr, blkno, count);
806 * }
807 */
808
809 ud_mark_free((uint8_t *)bp->b_un.b_addr, blkno, count);
810
811 beginblk += count;
812 blkcount -= count;
813
814 if (ud_part->udp_freed_len == 0) {
815 ud_part->udp_nfree += count;
816 udf_vfsp->udf_freeblks += count;
817 }
818 mutex_exit(&udf_vfsp->udf_lock);
819
820 bdwrite(bp);
821 }
822
823 return (0);
824 }
825
826
827 /* ARGSUSED */
828 /*
829 * search the entire table if there is
830 * a entry with which we can merge the
831 * current entry. Other wise create
832 * a new entry at the end of the table
833 */
834 int32_t
ud_free_space_stbl(struct vfs * vfsp,struct ud_part * ud_part,uint32_t beginblk,uint32_t blkcount)835 ud_free_space_stbl(struct vfs *vfsp,
836 struct ud_part *ud_part,
837 uint32_t beginblk, uint32_t blkcount)
838 {
839 uint16_t adesc;
840 int32_t error = 0, index, count;
841 uint32_t block, dummy, sz;
842 struct buf *bp;
843 struct udf_vfs *udf_vfsp;
844 struct unall_space_ent *use;
845
846 ud_printf("ud_free_space_stbl\n");
847
848 ASSERT(ud_part);
849 ASSERT(ud_part->udp_flags == UDP_SPACETBLS);
850
851 if ((ud_part->udp_freed_len == 0) && (ud_part->udp_unall_len == 0)) {
852 return (ENOSPC);
853 }
854
855 if (ud_part->udp_freed_len != 0) {
856 block = ud_part->udp_freed_loc;
857 } else {
858 block = ud_part->udp_unall_loc;
859 }
860
861 udf_vfsp = (struct udf_vfs *)vfsp->vfs_data;
862 ASSERT((ud_part->udp_unall_len + 40) <= udf_vfsp->udf_lbsize);
863
864 bp = ud_bread(vfsp->vfs_dev,
865 ud_xlate_to_daddr(udf_vfsp, ud_part->udp_number, block, 1, &dummy),
866 udf_vfsp->udf_lbsize);
867
868 use = (struct unall_space_ent *)bp->b_un.b_addr;
869 sz = SWAP_32(use->use_len_ad);
870 adesc = SWAP_16(use->use_icb_tag.itag_flags) & 0x7;
871 if (adesc == ICB_FLAG_SHORT_AD) {
872 struct short_ad *sad;
873
874 sad = (struct short_ad *)use->use_ad;
875 count = sz / sizeof (struct short_ad);
876 /*
877 * Check if the blocks being freed
878 * are continuous with any of the
879 * existing extents
880 */
881 for (index = 0; index < count; index++, sad++) {
882 if (beginblk == (SWAP_32(sad->sad_ext_loc) +
883 (SWAP_32(sad->sad_ext_len) /
884 udf_vfsp->udf_lbsize))) {
885 dummy = SWAP_32(sad->sad_ext_len) +
886 blkcount * udf_vfsp->udf_lbsize;
887 sad->sad_ext_len = SWAP_32(dummy);
888 goto end;
889 } else if ((beginblk + blkcount) ==
890 SWAP_32(sad->sad_ext_loc)) {
891 sad->sad_ext_loc = SWAP_32(beginblk);
892 goto end;
893 }
894 }
895
896 /*
897 * We need to add a new entry
898 * Check if we space.
899 */
900 if ((40 + sz + sizeof (struct short_ad)) >
901 udf_vfsp->udf_lbsize) {
902 error = ENOSPC;
903 goto end;
904 }
905
906 /*
907 * We have enough space
908 * just add the entry at the end
909 */
910 dummy = SWAP_32(use->use_len_ad);
911 sad = (struct short_ad *)&use->use_ad[dummy];
912 sz = blkcount * udf_vfsp->udf_lbsize;
913 sad->sad_ext_len = SWAP_32(sz);
914 sad->sad_ext_loc = SWAP_32(beginblk);
915 dummy += sizeof (struct short_ad);
916 use->use_len_ad = SWAP_32(dummy);
917 } else if (adesc == ICB_FLAG_LONG_AD) {
918 struct long_ad *lad;
919
920 lad = (struct long_ad *)use->use_ad;
921 count = sz / sizeof (struct long_ad);
922 /*
923 * Check if the blocks being freed
924 * are continuous with any of the
925 * existing extents
926 */
927 for (index = 0; index < count; index++, lad++) {
928 if (beginblk == (SWAP_32(lad->lad_ext_loc) +
929 (SWAP_32(lad->lad_ext_len) /
930 udf_vfsp->udf_lbsize))) {
931 dummy = SWAP_32(lad->lad_ext_len) +
932 blkcount * udf_vfsp->udf_lbsize;
933 lad->lad_ext_len = SWAP_32(dummy);
934 goto end;
935 } else if ((beginblk + blkcount) ==
936 SWAP_32(lad->lad_ext_loc)) {
937 lad->lad_ext_loc = SWAP_32(beginblk);
938 goto end;
939 }
940 }
941
942 /*
943 * We need to add a new entry
944 * Check if we space.
945 */
946 if ((40 + sz + sizeof (struct long_ad)) >
947 udf_vfsp->udf_lbsize) {
948 error = ENOSPC;
949 goto end;
950 }
951
952 /*
953 * We have enough space
954 * just add the entry at the end
955 */
956 dummy = SWAP_32(use->use_len_ad);
957 lad = (struct long_ad *)&use->use_ad[dummy];
958 sz = blkcount * udf_vfsp->udf_lbsize;
959 lad->lad_ext_len = SWAP_32(sz);
960 lad->lad_ext_loc = SWAP_32(beginblk);
961 lad->lad_ext_prn = SWAP_16(ud_part->udp_number);
962 dummy += sizeof (struct long_ad);
963 use->use_len_ad = SWAP_32(dummy);
964 } else {
965 error = ENOSPC;
966 goto end;
967 }
968
969 end:
970 if (!error) {
971 bdwrite(bp);
972 } else {
973 brelse(bp);
974 }
975 return (error);
976 }
977
978 /* ARGSUSED */
979 int32_t
ud_ialloc(struct ud_inode * pip,struct ud_inode ** ipp,struct vattr * vap,struct cred * cr)980 ud_ialloc(struct ud_inode *pip,
981 struct ud_inode **ipp, struct vattr *vap, struct cred *cr)
982 {
983 int32_t err;
984 uint32_t blkno, size, loc;
985 uint32_t imode, ichar, lbsize, ea_len, dummy;
986 uint16_t prn, flags;
987 struct buf *bp;
988 struct file_entry *fe;
989 struct timespec32 time;
990 struct timespec32 settime;
991 struct icb_tag *icb;
992 struct ext_attr_hdr *eah;
993 struct dev_spec_ear *ds;
994 struct udf_vfs *udf_vfsp;
995 timestruc_t now;
996 uid_t uid;
997 gid_t gid;
998
999
1000 ASSERT(pip);
1001 ASSERT(vap != NULL);
1002
1003 ud_printf("ud_ialloc\n");
1004
1005 if (((vap->va_mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
1006 ((vap->va_mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime)))
1007 return (EOVERFLOW);
1008
1009 udf_vfsp = pip->i_udf;
1010 lbsize = udf_vfsp->udf_lbsize;
1011 prn = pip->i_icb_prn;
1012
1013 if ((err = ud_alloc_space(pip->i_vfs, prn,
1014 0, 1, &blkno, &size, 0, 1)) != 0) {
1015 return (err);
1016 }
1017 loc = ud_xlate_to_daddr(udf_vfsp, prn, blkno, 1, &dummy);
1018 ASSERT(dummy == 1);
1019
1020 bp = ud_bread(pip->i_dev, loc << udf_vfsp->udf_l2d_shift, lbsize);
1021 if (bp->b_flags & B_ERROR) {
1022 ud_free_space(pip->i_vfs, prn, blkno, size);
1023 return (EIO);
1024 }
1025 bzero(bp->b_un.b_addr, bp->b_bcount);
1026 fe = (struct file_entry *)bp->b_un.b_addr;
1027
1028 uid = crgetuid(cr);
1029 fe->fe_uid = SWAP_32(uid);
1030
1031 /*
1032 * To determine the group-id of the created file:
1033 * 1) If the gid is set in the attribute list (non-Sun & pre-4.0
1034 * clients are not likely to set the gid), then use it if
1035 * the process is privileged, belongs to the target group,
1036 * or the group is the same as the parent directory.
1037 * 2) If the filesystem was not mounted with the Old-BSD-compatible
1038 * GRPID option, and the directory's set-gid bit is clear,
1039 * then use the process's gid.
1040 * 3) Otherwise, set the group-id to the gid of the parent directory.
1041 */
1042 if ((vap->va_mask & AT_GID) &&
1043 ((vap->va_gid == pip->i_gid) || groupmember(vap->va_gid, cr) ||
1044 secpolicy_vnode_create_gid(cr) == 0)) {
1045 /*
1046 * XXX - is this only the case when a 4.0 NFS client, or a
1047 * client derived from that code, makes a call over the wire?
1048 */
1049 fe->fe_gid = SWAP_32(vap->va_gid);
1050 } else {
1051 gid = crgetgid(cr);
1052 fe->fe_gid = (pip->i_char & ISGID) ?
1053 SWAP_32(pip->i_gid) : SWAP_32(gid);
1054 }
1055
1056 imode = MAKEIMODE(vap->va_type, vap->va_mode);
1057 ichar = imode & (VSUID | VSGID | VSVTX);
1058 imode = UD_UPERM2DPERM(imode);
1059
1060 /*
1061 * Under solaris only the owner can
1062 * change the attributes of files so set
1063 * the change attribute bit only for user
1064 */
1065 imode |= IATTR;
1066
1067 /*
1068 * File delete permissions on Solaris are
1069 * the permissions on the directory but not the file
1070 * when we create a file just inherit the directorys
1071 * write permission to be the file delete permissions
1072 * Atleast we will be consistent in the files we create
1073 */
1074 imode |= (pip->i_perm & (IWRITE | IWRITE >> 5 | IWRITE >> 10)) << 3;
1075
1076 fe->fe_perms = SWAP_32(imode);
1077
1078 /*
1079 * udf does not have a "." entry in dir's
1080 * so even directories have only one link
1081 */
1082 fe->fe_lcount = SWAP_16(1);
1083
1084 fe->fe_info_len = 0;
1085 fe->fe_lbr = 0;
1086
1087 gethrestime(&now);
1088 time.tv_sec = now.tv_sec;
1089 time.tv_nsec = now.tv_nsec;
1090 if (vap->va_mask & AT_ATIME) {
1091 TIMESPEC_TO_TIMESPEC32(&settime, &vap->va_atime)
1092 ud_utime2dtime(&settime, &fe->fe_acc_time);
1093 } else
1094 ud_utime2dtime(&time, &fe->fe_acc_time);
1095 if (vap->va_mask & AT_MTIME) {
1096 TIMESPEC_TO_TIMESPEC32(&settime, &vap->va_mtime)
1097 ud_utime2dtime(&settime, &fe->fe_mod_time);
1098 } else
1099 ud_utime2dtime(&time, &fe->fe_mod_time);
1100 ud_utime2dtime(&time, &fe->fe_attr_time);
1101
1102 ud_update_regid(&fe->fe_impl_id);
1103
1104 mutex_enter(&udf_vfsp->udf_lock);
1105 fe->fe_uniq_id = SWAP_64(udf_vfsp->udf_maxuniq);
1106 udf_vfsp->udf_maxuniq++;
1107 mutex_exit(&udf_vfsp->udf_lock);
1108
1109 ea_len = 0;
1110 if ((vap->va_type == VBLK) || (vap->va_type == VCHR)) {
1111 eah = (struct ext_attr_hdr *)fe->fe_spec;
1112 ea_len = (sizeof (struct ext_attr_hdr) + 3) & ~3;
1113 eah->eah_ial = SWAP_32(ea_len);
1114
1115 ds = (struct dev_spec_ear *)&fe->fe_spec[ea_len];
1116 ea_len += ud_make_dev_spec_ear(ds,
1117 getmajor(vap->va_rdev), getminor(vap->va_rdev));
1118 ea_len = (ea_len + 3) & ~3;
1119 eah->eah_aal = SWAP_32(ea_len);
1120 ud_make_tag(udf_vfsp, &eah->eah_tag,
1121 UD_EXT_ATTR_HDR, blkno, ea_len);
1122 }
1123
1124 fe->fe_len_ear = SWAP_32(ea_len);
1125 fe->fe_len_adesc = 0;
1126
1127 icb = &fe->fe_icb_tag;
1128 icb->itag_prnde = 0;
1129 icb->itag_strategy = SWAP_16(STRAT_TYPE4);
1130 icb->itag_param = 0;
1131 icb->itag_max_ent = SWAP_16(1);
1132 switch (vap->va_type) {
1133 case VREG :
1134 icb->itag_ftype = FTYPE_FILE;
1135 break;
1136 case VDIR :
1137 icb->itag_ftype = FTYPE_DIRECTORY;
1138 break;
1139 case VBLK :
1140 icb->itag_ftype = FTYPE_BLOCK_DEV;
1141 break;
1142 case VCHR :
1143 icb->itag_ftype = FTYPE_CHAR_DEV;
1144 break;
1145 case VLNK :
1146 icb->itag_ftype = FTYPE_SYMLINK;
1147 break;
1148 case VFIFO :
1149 icb->itag_ftype = FTYPE_FIFO;
1150 break;
1151 case VSOCK :
1152 icb->itag_ftype = FTYPE_C_ISSOCK;
1153 break;
1154 default :
1155 brelse(bp);
1156 goto error;
1157 }
1158 icb->itag_lb_loc = 0;
1159 icb->itag_lb_prn = 0;
1160 flags = ICB_FLAG_ONE_AD;
1161 if ((pip->i_char & ISGID) && (vap->va_type == VDIR)) {
1162 ichar |= ISGID;
1163 } else {
1164 if ((ichar & ISGID) &&
1165 secpolicy_vnode_setids_setgids(cr,
1166 (gid_t)SWAP_32(fe->fe_gid)) != 0) {
1167 ichar &= ~ISGID;
1168 }
1169 }
1170 if (ichar & ISUID) {
1171 flags |= ICB_FLAG_SETUID;
1172 }
1173 if (ichar & ISGID) {
1174 flags |= ICB_FLAG_SETGID;
1175 }
1176 if (ichar & ISVTX) {
1177 flags |= ICB_FLAG_STICKY;
1178 }
1179 icb->itag_flags = SWAP_16(flags);
1180 ud_make_tag(udf_vfsp, &fe->fe_tag, UD_FILE_ENTRY, blkno,
1181 offsetof(struct file_entry, fe_spec) +
1182 SWAP_32(fe->fe_len_ear) + SWAP_32(fe->fe_len_adesc));
1183
1184 BWRITE2(bp);
1185
1186 mutex_enter(&udf_vfsp->udf_lock);
1187 if (vap->va_type == VDIR) {
1188 udf_vfsp->udf_ndirs++;
1189 } else {
1190 udf_vfsp->udf_nfiles++;
1191 }
1192 mutex_exit(&udf_vfsp->udf_lock);
1193
1194 #ifdef DEBUG
1195 {
1196 struct ud_inode *ip;
1197
1198 if ((ip = ud_search_icache(pip->i_vfs, prn, blkno)) != NULL) {
1199 cmn_err(CE_NOTE, "duplicate %p %x\n",
1200 (void *)ip, (uint32_t)ip->i_icb_lbano);
1201 }
1202 }
1203 #endif
1204
1205 if ((err = ud_iget(pip->i_vfs, prn, blkno, ipp, bp, cr)) != 0) {
1206 error:
1207 ud_free_space(pip->i_vfs, prn, blkno, size);
1208 return (err);
1209 }
1210
1211 return (0);
1212
1213 noinodes:
1214 cmn_err(CE_NOTE, "%s: out of inodes\n", pip->i_udf->udf_volid);
1215 return (ENOSPC);
1216 }
1217
1218
1219 void
ud_ifree(struct ud_inode * ip,vtype_t type)1220 ud_ifree(struct ud_inode *ip, vtype_t type)
1221 {
1222 struct udf_vfs *udf_vfsp;
1223 struct buf *bp;
1224
1225 ud_printf("ud_ifree\n");
1226
1227 if (ip->i_vfs == NULL) {
1228 return;
1229 }
1230
1231 udf_vfsp = (struct udf_vfs *)ip->i_vfs->vfs_data;
1232 bp = ud_bread(ip->i_dev, ip->i_icb_lbano <<
1233 udf_vfsp->udf_l2d_shift, udf_vfsp->udf_lbsize);
1234 if (bp->b_flags & B_ERROR) {
1235 /*
1236 * Error get rid of bp
1237 */
1238 brelse(bp);
1239 } else {
1240 /*
1241 * Just trash the inode
1242 */
1243 bzero(bp->b_un.b_addr, 0x10);
1244 BWRITE(bp);
1245 }
1246 ud_free_space(ip->i_vfs, ip->i_icb_prn, ip->i_icb_block, 1);
1247 mutex_enter(&udf_vfsp->udf_lock);
1248 if (type == VDIR) {
1249 if (udf_vfsp->udf_ndirs > 1) {
1250 udf_vfsp->udf_ndirs--;
1251 }
1252 } else {
1253 if (udf_vfsp->udf_nfiles > 0) {
1254 udf_vfsp->udf_nfiles --;
1255 }
1256 }
1257 mutex_exit(&udf_vfsp->udf_lock);
1258 }
1259
1260
1261 /*
1262 * Free storage space associated with the specified inode. The portion
1263 * to be freed is specified by lp->l_start and lp->l_len (already
1264 * normalized to a "whence" of 0).
1265 *
1266 * This is an experimental facility whose continued existence is not
1267 * guaranteed. Currently, we only support the special case
1268 * of l_len == 0, meaning free to end of file.
1269 *
1270 * Blocks are freed in reverse order. This FILO algorithm will tend to
1271 * maintain a contiguous free list much longer than FIFO.
1272 * See also ufs_itrunc() in ufs_inode.c.
1273 *
1274 * Bug: unused bytes in the last retained block are not cleared.
1275 * This may result in a "hole" in the file that does not read as zeroes.
1276 */
1277 int32_t
ud_freesp(struct vnode * vp,struct flock64 * lp,int32_t flag,struct cred * cr)1278 ud_freesp(struct vnode *vp,
1279 struct flock64 *lp,
1280 int32_t flag, struct cred *cr)
1281 {
1282 int32_t i;
1283 struct ud_inode *ip = VTOI(vp);
1284 int32_t error;
1285
1286 ASSERT(vp->v_type == VREG);
1287 ASSERT(lp->l_start >= (offset_t)0); /* checked by convoff */
1288
1289 ud_printf("udf_freesp\n");
1290
1291 if (lp->l_len != 0) {
1292 return (EINVAL);
1293 }
1294
1295 rw_enter(&ip->i_contents, RW_READER);
1296 if (ip->i_size == (u_offset_t)lp->l_start) {
1297 rw_exit(&ip->i_contents);
1298 return (0);
1299 }
1300
1301 /*
1302 * Check if there is any active mandatory lock on the
1303 * range that will be truncated/expanded.
1304 */
1305 if (MANDLOCK(vp, ip->i_char)) {
1306 offset_t save_start;
1307
1308 save_start = lp->l_start;
1309
1310 if (ip->i_size < lp->l_start) {
1311 /*
1312 * "Truncate up" case: need to make sure there
1313 * is no lock beyond current end-of-file. To
1314 * do so, we need to set l_start to the size
1315 * of the file temporarily.
1316 */
1317 lp->l_start = ip->i_size;
1318 }
1319 lp->l_type = F_WRLCK;
1320 lp->l_sysid = 0;
1321 lp->l_pid = ttoproc(curthread)->p_pid;
1322 i = (flag & (FNDELAY|FNONBLOCK)) ? 0 : SLPFLCK;
1323 rw_exit(&ip->i_contents);
1324 if ((i = reclock(vp, lp, i, 0, lp->l_start, NULL)) != 0 ||
1325 lp->l_type != F_UNLCK) {
1326 return (i ? i : EAGAIN);
1327 }
1328 rw_enter(&ip->i_contents, RW_READER);
1329
1330 lp->l_start = save_start;
1331 }
1332 /*
1333 * Make sure a write isn't in progress (allocating blocks)
1334 * by acquiring i_rwlock (we promised ufs_bmap we wouldn't
1335 * truncate while it was allocating blocks).
1336 * Grab the locks in the right order.
1337 */
1338 rw_exit(&ip->i_contents);
1339 rw_enter(&ip->i_rwlock, RW_WRITER);
1340 rw_enter(&ip->i_contents, RW_WRITER);
1341 error = ud_itrunc(ip, lp->l_start, 0, cr);
1342 rw_exit(&ip->i_contents);
1343 rw_exit(&ip->i_rwlock);
1344 return (error);
1345 }
1346
1347
1348
1349 /*
1350 * Cache is implemented by
1351 * allocating a cluster of blocks
1352 */
1353 int32_t
ud_alloc_from_cache(struct udf_vfs * udf_vfsp,struct ud_part * part,uint32_t * blkno)1354 ud_alloc_from_cache(struct udf_vfs *udf_vfsp,
1355 struct ud_part *part, uint32_t *blkno)
1356 {
1357 uint32_t bno, sz;
1358 int32_t error, index, free = 0;
1359
1360 ud_printf("ud_alloc_from_cache\n");
1361
1362 ASSERT(udf_vfsp);
1363
1364 mutex_enter(&udf_vfsp->udf_lock);
1365 if (part->udp_cache_count == 0) {
1366 mutex_exit(&udf_vfsp->udf_lock);
1367 /* allocate new cluster */
1368 if ((error = ud_alloc_space(udf_vfsp->udf_vfs,
1369 part->udp_number, 0, CLSTR_SIZE, &bno, &sz, 1, 0)) != 0) {
1370 return (error);
1371 }
1372 if (sz == 0) {
1373 return (ENOSPC);
1374 }
1375 mutex_enter(&udf_vfsp->udf_lock);
1376 if (part->udp_cache_count == 0) {
1377 for (index = 0; index < sz; index++, bno++) {
1378 part->udp_cache[index] = bno;
1379 }
1380 part->udp_cache_count = sz;
1381 } else {
1382 free = 1;
1383 }
1384 }
1385 part->udp_cache_count--;
1386 *blkno = part->udp_cache[part->udp_cache_count];
1387 mutex_exit(&udf_vfsp->udf_lock);
1388 if (free) {
1389 ud_free_space(udf_vfsp->udf_vfs, part->udp_number, bno, sz);
1390 }
1391 return (0);
1392 }
1393
1394 /*
1395 * Will be called from unmount
1396 */
1397 int32_t
ud_release_cache(struct udf_vfs * udf_vfsp)1398 ud_release_cache(struct udf_vfs *udf_vfsp)
1399 {
1400 int32_t i, error = 0;
1401 struct ud_part *part;
1402 uint32_t start, nblks;
1403
1404 ud_printf("ud_release_cache\n");
1405
1406 mutex_enter(&udf_vfsp->udf_lock);
1407 part = udf_vfsp->udf_parts;
1408 for (i = 0; i < udf_vfsp->udf_npart; i++, part++) {
1409 if (part->udp_cache_count) {
1410 nblks = part->udp_cache_count;
1411 start = part->udp_cache[0];
1412 part->udp_cache_count = 0;
1413 mutex_exit(&udf_vfsp->udf_lock);
1414 ud_free_space(udf_vfsp->udf_vfs,
1415 part->udp_number, start, nblks);
1416 mutex_enter(&udf_vfsp->udf_lock);
1417 }
1418 }
1419 mutex_exit(&udf_vfsp->udf_lock);
1420 return (error);
1421 }
1422