1 /*- 2 * Copyright (c) 2001, 2002 Scott Long <scottl@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* udf_vnops.c */ 30 /* Take care of the vnode side of things */ 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/namei.h> 35 #include <sys/kernel.h> 36 #include <sys/malloc.h> 37 #include <sys/stat.h> 38 #include <sys/bio.h> 39 #include <sys/buf.h> 40 #include <sys/mount.h> 41 #include <sys/vnode.h> 42 #include <sys/dirent.h> 43 #include <sys/queue.h> 44 #include <sys/unistd.h> 45 46 #include <vm/uma.h> 47 48 #include <fs/udf/ecma167-udf.h> 49 #include <fs/udf/osta.h> 50 #include <fs/udf/udf.h> 51 52 static int udf_access(struct vop_access_args *); 53 static int udf_getattr(struct vop_getattr_args *); 54 static int udf_ioctl(struct vop_ioctl_args *); 55 static int udf_pathconf(struct vop_pathconf_args *); 56 static int udf_read(struct vop_read_args *); 57 static int udf_readdir(struct vop_readdir_args *); 58 static int udf_readlink(struct vop_readlink_args *ap); 59 static int udf_strategy(struct vop_strategy_args *); 60 static int udf_print(struct vop_print_args *); 61 static int udf_bmap(struct vop_bmap_args *); 62 static int udf_lookup(struct vop_cachedlookup_args *); 63 static int udf_reclaim(struct vop_reclaim_args *); 64 static void udf_dumpblock(void *, int) __unused; 65 static int udf_readatoffset(struct udf_node *, int *, int, struct buf **, uint8_t **); 66 static int udf_bmap_internal(struct udf_node *, uint32_t, daddr_t *, uint32_t *); 67 68 vop_t **udf_vnodeop_p; 69 static struct vnodeopv_entry_desc udf_vnodeop_entries[] = { 70 { &vop_default_desc, (vop_t *) vop_defaultop }, 71 { &vop_access_desc, (vop_t *) udf_access }, 72 { &vop_bmap_desc, (vop_t *) udf_bmap }, 73 { &vop_cachedlookup_desc, (vop_t *) udf_lookup }, 74 { &vop_getattr_desc, (vop_t *) udf_getattr }, 75 { &vop_ioctl_desc, (vop_t *) udf_ioctl }, 76 { &vop_islocked_desc, (vop_t *) vop_stdislocked }, 77 { &vop_lock_desc, (vop_t *) vop_stdlock }, 78 { &vop_lookup_desc, (vop_t *) vfs_cache_lookup }, 79 { &vop_pathconf_desc, (vop_t *) udf_pathconf }, 80 { &vop_print_desc, (vop_t *) udf_print }, 81 { &vop_read_desc, (vop_t *) udf_read }, 82 { &vop_readdir_desc, (vop_t *) udf_readdir }, 83 { &vop_readlink_desc, (vop_t *) udf_readlink }, 84 { &vop_reclaim_desc, (vop_t *) udf_reclaim }, 85 { &vop_strategy_desc, (vop_t *) udf_strategy }, 86 { &vop_unlock_desc, (vop_t *) vop_stdunlock }, 87 { NULL, NULL } 88 }; 89 static struct vnodeopv_desc udf_vnodeop_opv_desc = 90 { &udf_vnodeop_p, udf_vnodeop_entries }; 91 VNODEOP_SET(udf_vnodeop_opv_desc); 92 93 MALLOC_DEFINE(M_UDFFID, "UDF FID", "UDF FileId structure"); 94 95 /* Look up a udf_node based on the ino_t passed in and return it's vnode */ 96 int 97 udf_hashlookup(struct udf_mnt *udfmp, ino_t id, int flags, struct vnode **vpp) 98 { 99 struct udf_node *node; 100 int error; 101 102 *vpp = NULL; 103 104 loop: 105 mtx_lock(&udfmp->hash_mtx); 106 TAILQ_FOREACH(node, &udfmp->udf_tqh, tq) { 107 if (node->hash_id == id) { 108 VI_LOCK(node->i_vnode); 109 mtx_unlock(&udfmp->hash_mtx); 110 error = vget(node->i_vnode, flags | LK_INTERLOCK, 111 curthread); 112 if (error == ENOENT) 113 goto loop; 114 if (error) 115 return (error); 116 *vpp = node->i_vnode; 117 return (0); 118 } 119 } 120 121 mtx_unlock(&udfmp->hash_mtx); 122 return (0); 123 } 124 125 int 126 udf_hashins(struct udf_node *node) 127 { 128 struct udf_mnt *udfmp; 129 130 udfmp = node->udfmp; 131 132 mtx_lock(&udfmp->hash_mtx); 133 TAILQ_INSERT_TAIL(&udfmp->udf_tqh, node, tq); 134 mtx_unlock(&udfmp->hash_mtx); 135 lockmgr(&node->i_vnode->v_lock, LK_EXCLUSIVE, (struct mtx *)0, 136 curthread); 137 138 return (0); 139 } 140 141 int 142 udf_hashrem(struct udf_node *node) 143 { 144 struct udf_mnt *udfmp; 145 146 udfmp = node->udfmp; 147 148 mtx_lock(&udfmp->hash_mtx); 149 TAILQ_REMOVE(&udfmp->udf_tqh, node, tq); 150 mtx_unlock(&udfmp->hash_mtx); 151 152 return (0); 153 } 154 155 int 156 udf_allocv(struct mount *mp, struct vnode **vpp, struct thread *td) 157 { 158 int error; 159 struct vnode *vp; 160 161 error = getnewvnode(VT_UDF, mp, udf_vnodeop_p, &vp); 162 if (error) { 163 printf("udf_allocv: failed to allocate new vnode\n"); 164 return (error); 165 } 166 167 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 168 *vpp = vp; 169 return (0); 170 } 171 172 /* Convert file entry permission (5 bits per owner/group/user) to a mode_t */ 173 static mode_t 174 udf_permtomode(struct udf_node *node) 175 { 176 uint32_t perm; 177 uint32_t flags; 178 mode_t mode; 179 180 perm = node->fentry->perm; 181 flags = node->fentry->icbtag.flags; 182 183 mode = perm & UDF_FENTRY_PERM_USER_MASK; 184 mode |= ((perm & UDF_FENTRY_PERM_GRP_MASK) >> 2); 185 mode |= ((perm & UDF_FENTRY_PERM_OWNER_MASK) >> 4); 186 mode |= ((flags & UDF_ICB_TAG_FLAGS_STICKY) << 4); 187 mode |= ((flags & UDF_ICB_TAG_FLAGS_SETGID) << 6); 188 mode |= ((flags & UDF_ICB_TAG_FLAGS_SETUID) << 8); 189 190 return (mode); 191 } 192 193 static int 194 udf_access(struct vop_access_args *a) 195 { 196 struct vnode *vp; 197 struct udf_node *node; 198 mode_t a_mode, mode; 199 200 vp = a->a_vp; 201 node = VTON(vp); 202 a_mode = a->a_mode; 203 204 if (a_mode & VWRITE) { 205 switch (vp->v_type) { 206 case VDIR: 207 case VLNK: 208 case VREG: 209 return (EROFS); 210 /* NOT REACHED */ 211 default: 212 break; 213 } 214 } 215 216 mode = udf_permtomode(node); 217 218 return (vaccess(vp->v_type, mode, node->fentry->uid, node->fentry->gid, 219 a_mode, a->a_cred, NULL)); 220 } 221 222 static int mon_lens[2][12] = { 223 {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}, 224 {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31} 225 }; 226 227 static int 228 udf_isaleapyear(int year) 229 { 230 int i; 231 232 i = (year % 4) ? 0 : 1; 233 i &= (year % 100) ? 1 : 0; 234 i |= (year % 400) ? 0 : 1; 235 236 return i; 237 } 238 239 /* 240 * XXX This is just a rough hack. Daylight savings isn't calculated and tv_nsec 241 * is ignored. 242 * Timezone calculation compliments of Julian Elischer <julian@elischer.org>. 243 */ 244 static void 245 udf_timetotimespec(struct timestamp *time, struct timespec *t) 246 { 247 int i, lpyear, daysinyear; 248 union { 249 uint16_t u_tz_offset; 250 int16_t s_tz_offset; 251 } tz; 252 253 t->tv_nsec = 0; 254 255 /* DirectCD seems to like using bogus year values */ 256 if (time->year < 1970) { 257 t->tv_sec = 0; 258 return; 259 } 260 261 /* Calculate the time and day */ 262 t->tv_sec = time->second; 263 t->tv_sec += time->minute * 60; 264 t->tv_sec += time->hour * 3600; 265 t->tv_sec += time->day * 3600 * 24; 266 267 /* Calclulate the month */ 268 lpyear = udf_isaleapyear(time->year); 269 for (i = 1; i < time->month; i++) 270 t->tv_sec += mon_lens[lpyear][i] * 3600 * 24; 271 272 /* Speed up the calculation */ 273 if (time->year > 1979) 274 t->tv_sec += 315532800; 275 if (time->year > 1989) 276 t->tv_sec += 315619200; 277 if (time->year > 1999) 278 t->tv_sec += 315532800; 279 for (i = 2000; i < time->year; i++) { 280 daysinyear = udf_isaleapyear(i) + 365 ; 281 t->tv_sec += daysinyear * 3600 * 24; 282 } 283 284 /* 285 * Calculate the time zone. The timezone is 12 bit signed 2's 286 * compliment, so we gotta do some extra magic to handle it right. 287 */ 288 tz.u_tz_offset = time->type_tz; 289 tz.u_tz_offset &= 0x0fff; 290 if (tz.u_tz_offset & 0x0800) 291 tz.u_tz_offset |= 0xf000; /* extend the sign to 16 bits */ 292 if ((time->type_tz & 0x1000) && (tz.s_tz_offset != -2047)) 293 t->tv_sec -= tz.s_tz_offset * 60; 294 295 return; 296 } 297 298 static int 299 udf_getattr(struct vop_getattr_args *a) 300 { 301 struct vnode *vp; 302 struct udf_node *node; 303 struct vattr *vap; 304 struct file_entry *fentry; 305 struct timespec ts; 306 307 ts.tv_sec = 0; 308 309 vp = a->a_vp; 310 vap = a->a_vap; 311 node = VTON(vp); 312 fentry = node->fentry; 313 314 vap->va_fsid = dev2udev(node->i_dev); 315 vap->va_fileid = node->hash_id; 316 vap->va_mode = udf_permtomode(node); 317 vap->va_nlink = fentry->link_cnt; 318 /* 319 * XXX The spec says that -1 is valid for uid/gid and indicates an 320 * invalid uid/gid. How should this be represented? 321 */ 322 vap->va_uid = (fentry->uid == -1) ? 0 : fentry->uid; 323 vap->va_gid = (fentry->gid == -1) ? 0 : fentry->gid; 324 udf_timetotimespec(&fentry->atime, &vap->va_atime); 325 udf_timetotimespec(&fentry->mtime, &vap->va_mtime); 326 vap->va_ctime = vap->va_mtime; /* XXX Stored as an Extended Attribute */ 327 vap->va_rdev = 0; /* XXX */ 328 if (vp->v_type & VDIR) { 329 /* 330 * Directories that are recorded within their ICB will show 331 * as having 0 blocks recorded. Since tradition dictates 332 * that directories consume at least one logical block, 333 * make it appear so. 334 */ 335 if (fentry->logblks_rec != 0) { 336 vap->va_size = fentry->logblks_rec * node->udfmp->bsize; 337 } else { 338 vap->va_size = node->udfmp->bsize; 339 } 340 } else { 341 vap->va_size = fentry->inf_len; 342 } 343 vap->va_flags = 0; 344 vap->va_gen = 1; 345 vap->va_blocksize = node->udfmp->bsize; 346 vap->va_bytes = fentry->inf_len; 347 vap->va_type = vp->v_type; 348 vap->va_filerev = 0; /* XXX */ 349 return (0); 350 } 351 352 /* 353 * File specific ioctls. DeCSS candidate? 354 */ 355 static int 356 udf_ioctl(struct vop_ioctl_args *a) 357 { 358 printf("%s called\n", __FUNCTION__); 359 return (EOPNOTSUPP); 360 } 361 362 /* 363 * I'm not sure that this has much value in a read-only filesystem, but 364 * cd9660 has it too. 365 */ 366 static int 367 udf_pathconf(struct vop_pathconf_args *a) 368 { 369 370 switch (a->a_name) { 371 case _PC_LINK_MAX: 372 *a->a_retval = 65535; 373 return (0); 374 case _PC_NAME_MAX: 375 *a->a_retval = NAME_MAX; 376 return (0); 377 case _PC_PATH_MAX: 378 *a->a_retval = PATH_MAX; 379 return (0); 380 case _PC_NO_TRUNC: 381 *a->a_retval = 1; 382 return (0); 383 default: 384 return (EINVAL); 385 } 386 } 387 388 static int 389 udf_read(struct vop_read_args *a) 390 { 391 struct vnode *vp = a->a_vp; 392 struct uio *uio = a->a_uio; 393 struct udf_node *node = VTON(vp); 394 struct buf *bp; 395 uint8_t *data; 396 int error = 0; 397 int size, fsize, offset; 398 399 if (uio->uio_offset < 0) 400 return (EINVAL); 401 402 fsize = node->fentry->inf_len; 403 404 while (uio->uio_offset < fsize && uio->uio_resid > 0) { 405 offset = uio->uio_offset; 406 size = uio->uio_resid; 407 error = udf_readatoffset(node, &size, offset, &bp, &data); 408 if (error) 409 return (error); 410 error = uiomove((caddr_t)data, size, uio); 411 if (bp != NULL) 412 brelse(bp); 413 if (error) 414 break; 415 }; 416 417 return (error); 418 } 419 420 /* Convienience routine to dump a block in hex */ 421 static void 422 udf_dumpblock(void *data, int len) 423 { 424 int i, j; 425 426 for (i = 0; i < len; i++) { 427 printf("\noffset= %d: ", i); 428 for (j = 0; j < 8; j++) { 429 if (i + j == len) 430 break; 431 printf("0x%02x ", (uint8_t)((uint8_t*)(data))[i + j]); 432 } 433 i += j - 1; 434 } 435 printf("\n"); 436 } 437 438 /* 439 * Call the OSTA routines to translate the name from a CS0 dstring to a 440 * 16-bit Unicode String. Hooks need to be placed in here to translate from 441 * Unicode to the encoding that the kernel/user expects. For now, compact 442 * the encoding to 8 bits if possible. Return the length of the translated 443 * string. 444 * XXX This horribly pessimizes the 8bit case 445 */ 446 static int 447 udf_transname(char *cs0string, char *destname, int len) 448 { 449 unicode_t *transname; 450 int i, unilen = 0; 451 452 /* allocate a buffer big enough to hold an 8->16 bit expansion */ 453 transname = uma_zalloc(udf_zone_trans, M_WAITOK); 454 if (transname == NULL) { 455 printf("udf: out of memory?\n"); 456 return 0; 457 } 458 459 if ((unilen = udf_UncompressUnicode(len, cs0string, transname)) == -1) { 460 printf("udf: Unicode translation failed\n"); 461 uma_zfree(udf_zone_trans, transname); 462 return 0; 463 } 464 465 /* At this point, the name is in 16-bit Unicode. Compact it down 466 * to 8-bit 467 */ 468 for (i = 0; i < unilen ; i++) { 469 if (transname[i] & 0xff00) { 470 destname[i] = '.'; /* Fudge the 16bit chars */ 471 } else { 472 destname[i] = transname[i] & 0xff; 473 } 474 } 475 476 destname[unilen] = 0; 477 uma_zfree(udf_zone_trans, transname); 478 479 return unilen; 480 } 481 482 /* 483 * Compare a CS0 dstring with a name passed in from the VFS layer. Return 484 * 0 on a successful match, nonzero therwise. Unicode work may need to be done 485 * here also. 486 */ 487 static int 488 udf_cmpname(char *cs0string, char *cmpname, int cs0len, int cmplen) 489 { 490 char transname[MAXNAMLEN+1]; /* XXX stack */ 491 492 if ((cs0len = udf_transname(cs0string, &transname[0], cs0len)) == 0) 493 return -1; 494 495 /* Easy check. If they aren't the same length, they aren't equal */ 496 if (cs0len != cmplen) 497 return -1; 498 499 return (bcmp(transname, cmpname, cmplen)); 500 } 501 502 struct udf_uiodir { 503 struct dirent *dirent; 504 u_long *cookies; 505 int ncookies; 506 int acookies; 507 int eofflag; 508 }; 509 510 static int 511 udf_uiodir(struct udf_uiodir *uiodir, int de_size, struct uio *uio, long cookie) 512 { 513 if (uiodir->cookies != NULL) { 514 if (++uiodir->acookies > uiodir->ncookies) { 515 uiodir->eofflag = 0; 516 return (-1); 517 } 518 *uiodir->cookies++ = cookie; 519 } 520 521 if (uio->uio_resid < de_size) { 522 uiodir->eofflag = 0; 523 return (-1); 524 } 525 526 return (uiomove((caddr_t)uiodir->dirent, de_size, uio)); 527 } 528 529 /* Prebuild the . and .. dirents. d_fileno will need to be filled in */ 530 static struct dirent udf_de_dot = 531 { 0, sizeof(struct dirent), DT_DIR, 1, "." }; 532 static struct dirent udf_de_dotdot = 533 { 0, sizeof(struct dirent), DT_DIR, 2, ".." }; 534 535 static int 536 udf_readdir(struct vop_readdir_args *a) 537 { 538 struct vnode *vp; 539 struct buf *bp; 540 struct uio *uio; 541 struct dirent dir; 542 struct udf_node *node; 543 struct udf_mnt *udfmp; 544 struct fileid_desc *fid; 545 struct udf_uiodir uiodir; 546 u_long *cookies = NULL; 547 uint8_t *data; 548 int ncookies; 549 int error = 0, offset, off, size, de_size, fid_size, fsize; 550 int total_fid_size = 0, frag_size = 0, fid_fragment = 0; 551 552 vp = a->a_vp; 553 uio = a->a_uio; 554 node = VTON(vp); 555 udfmp = node->udfmp; 556 de_size = sizeof(struct dirent); 557 fid_size = UDF_FID_SIZE; 558 fsize = node->fentry->inf_len; 559 uiodir.eofflag = 1; 560 561 if (a->a_ncookies != NULL) { 562 /* 563 * Guess how many entries are needed. If we run out, this 564 * function will be called again and thing will pick up were 565 * it left off. 566 */ 567 ncookies = uio->uio_resid / 8; 568 MALLOC(cookies, u_long *, sizeof(u_long) * ncookies, 569 M_TEMP, M_WAITOK); 570 if (cookies == NULL) 571 return (ENOMEM); 572 uiodir.ncookies = ncookies; 573 uiodir.cookies = cookies; 574 uiodir.acookies = 0; 575 } else { 576 uiodir.cookies = NULL; 577 } 578 579 /* 580 * offset is the absolute offset into the file data. off is the offset 581 * into the data, minus the blocks that weren't read because they fell 582 * before offset. 583 */ 584 offset = uio->uio_offset; 585 off = 0; 586 587 /* 588 * Iterate through the file id descriptors. Give the parent dir 589 * entry special attention. size will be the size of the extent 590 * returned in data. If there is more than one extent, things get 591 * ugly. 592 */ 593 size = 0; 594 error = udf_readatoffset(node, &size, offset, &bp, &data); 595 if (error) { 596 if (a->a_ncookies != NULL) 597 FREE(cookies, M_TEMP); 598 return (error); 599 } 600 601 while (offset + off < fsize) { 602 603 fid = (struct fileid_desc*)&data[off]; 604 605 /* 606 * Check to see if the fid is fragmented. The first test 607 * ensures that we don't wander off the end of the buffer 608 * looking for the l_iu and l_fi fields. 609 */ 610 if (off + fid_size > size || 611 off + fid->l_iu + fid->l_fi + fid_size > size) { 612 struct fileid_desc *fid_buf; 613 uint8_t *buf; 614 615 /* Copy what we have of the fid into a buffer */ 616 frag_size = size - off; 617 MALLOC(buf, uint8_t*, max(frag_size, fid_size), 618 M_UDFFID, M_NOWAIT | M_ZERO); 619 if (buf == NULL) 620 panic("No memory?"); 621 bcopy(fid, buf, frag_size); 622 623 /* Reduce all of the casting magic */ 624 fid_buf = (struct fileid_desc*)buf; 625 626 if (bp != NULL) 627 brelse(bp); 628 629 /* Fetch the next allocation */ 630 offset += size; 631 size = 0; 632 error = udf_readatoffset(node, &size, offset, &bp, 633 &data); 634 if (error) 635 break; 636 637 /* 638 * If the fragment was so small that we didn't get 639 * the l_iu and l_fi fields, copy those in. 640 */ 641 if (fid_size > frag_size) 642 bcopy(data, &buf[frag_size], 643 fid_size - frag_size); 644 645 /* 646 * Now that we have enough of the fid to work with, 647 * allocate a new fid, copy the fragment into it, 648 * and copy the rest of the fid from the new 649 * allocation. 650 */ 651 total_fid_size = fid_size + fid_buf->l_iu + 652 fid_buf->l_fi; 653 MALLOC(fid, struct fileid_desc *, total_fid_size, 654 M_UDFFID, M_NOWAIT | M_ZERO); 655 if (fid == NULL) { 656 if (bp != NULL) 657 brelse(bp); 658 error = ENOMEM; 659 break; 660 } 661 bcopy(fid_buf, fid, frag_size); 662 bcopy(data, &((uint8_t*)(fid))[frag_size], 663 total_fid_size - frag_size); 664 665 fid_fragment = 1; 666 FREE(buf, M_UDFFID); 667 } else { 668 total_fid_size = fid->l_iu + fid->l_fi + fid_size; 669 } 670 671 /* XXX Should we return an error on a bad fid? */ 672 if (udf_checktag(&fid->tag, TAGID_FID)) { 673 printf("Invalid FID tag\n"); 674 break; 675 } 676 677 /* Is this a deleted file? */ 678 if (fid->file_char & 0x4) 679 goto update_offset; 680 681 if (fid->l_iu != 0) { 682 printf("Possibly invalid fid found.\n"); 683 goto update_offset; 684 } 685 686 if ((fid->l_fi == 0) && (fid->file_char & 0x08)) { 687 /* Do up the '.' and '..' entries. Dummy values are 688 * used for the cookies since the offset here is 689 * usually zero, and NFS doesn't like that value 690 * XXX Should the magic dirents be locked? 691 */ 692 udf_de_dot.d_fileno = node->hash_id; 693 uiodir.dirent = &udf_de_dot; 694 error = udf_uiodir(&uiodir, de_size, uio, 1); 695 if (error) 696 break; 697 698 udf_de_dotdot.d_fileno = udf_getid(&fid->icb); 699 uiodir.dirent = &udf_de_dotdot; 700 error = udf_uiodir(&uiodir, de_size, uio, 2); 701 } else { 702 dir.d_namlen = udf_transname(&fid->data[fid->l_iu], 703 &dir.d_name[0], fid->l_fi); 704 dir.d_fileno = udf_getid(&fid->icb); 705 dir.d_type = (fid->file_char & 0x02) ? DT_DIR : 706 DT_UNKNOWN; 707 dir.d_reclen = GENERIC_DIRSIZ(&dir); 708 uiodir.dirent = &dir; 709 error = udf_uiodir(&uiodir, dir.d_reclen, uio, off); 710 } 711 if (error) { 712 printf("uiomove returned %d\n", error); 713 break; 714 } 715 716 update_offset: /* 717 * Update the offset. Align on a 4 byte boundary because the 718 * UDF spec says so. If it was a fragmented entry, clean up. 719 */ 720 if (fid_fragment) { 721 off = (total_fid_size - frag_size + 3) & ~0x03; 722 FREE(fid, M_UDFFID); 723 fid_fragment = 0; 724 } else { 725 off += (total_fid_size + 3) & ~0x03; 726 } 727 } 728 729 /* tell the calling layer whether we need to be called again */ 730 *a->a_eofflag = uiodir.eofflag; 731 uio->uio_offset = offset + off; 732 733 if (bp != NULL) 734 brelse(bp); 735 736 if (a->a_ncookies != NULL) { 737 if (error) 738 free(cookies, M_TEMP); 739 else { 740 *a->a_ncookies = uiodir.acookies; 741 *a->a_cookies = cookies; 742 } 743 } 744 745 return (error); 746 } 747 748 /* Are there any implementations out there that do soft-links? */ 749 static int 750 udf_readlink(struct vop_readlink_args *ap) 751 { 752 printf("%s called\n", __FUNCTION__); 753 return (EOPNOTSUPP); 754 } 755 756 static int 757 udf_strategy(struct vop_strategy_args *a) 758 { 759 struct buf *bp; 760 struct vnode *vp; 761 struct udf_node *node; 762 int maxsize; 763 764 bp = a->a_bp; 765 vp = bp->b_vp; 766 node = VTON(vp); 767 768 /* cd9660 has this test reversed, but it seems more logical this way */ 769 if (bp->b_blkno != bp->b_lblkno) { 770 /* 771 * Files that are embedded in the fentry don't translate well 772 * to a block number. Reject. 773 */ 774 if (udf_bmap_internal(node, bp->b_lblkno * node->udfmp->bsize, 775 &bp->b_lblkno, &maxsize)) { 776 clrbuf(bp); 777 bp->b_blkno = -1; 778 } 779 } 780 if ((long)bp->b_blkno == -1) { 781 bufdone(bp); 782 return (0); 783 } 784 vp = node->i_devvp; 785 bp->b_dev = vp->v_rdev; 786 VOP_STRATEGY(vp, bp); 787 return (0); 788 } 789 790 static int 791 udf_print(struct vop_print_args *a) 792 { 793 printf("%s called\n", __FUNCTION__); 794 return (EOPNOTSUPP); 795 } 796 797 static int 798 udf_bmap(struct vop_bmap_args *a) 799 { 800 struct udf_node *node; 801 uint32_t max_size; 802 daddr_t lsector; 803 int error; 804 805 node = VTON(a->a_vp); 806 807 if (a->a_vpp != NULL) 808 *a->a_vpp = node->i_devvp; 809 if (a->a_bnp == NULL) 810 return (0); 811 if (a->a_runb) 812 *a->a_runb = 0; 813 814 error = udf_bmap_internal(node, a->a_bn * node->udfmp->bsize, &lsector, 815 &max_size); 816 if (error > 0) 817 return (error); 818 819 /* Translate logical to physical sector number */ 820 *a->a_bnp = lsector << (node->udfmp->bshift - DEV_BSHIFT); 821 822 /* Punt on read-ahead for now */ 823 if (a->a_runp) 824 *a->a_runp = 0; 825 826 return (0); 827 } 828 829 /* 830 * The all powerful VOP_LOOKUP(). 831 */ 832 static int 833 udf_lookup(struct vop_cachedlookup_args *a) 834 { 835 struct vnode *dvp; 836 struct vnode *tdp = NULL; 837 struct vnode **vpp = a->a_vpp; 838 struct buf *bp = NULL; 839 struct udf_node *node; 840 struct udf_mnt *udfmp; 841 struct fileid_desc *fid = NULL; 842 struct thread *td; 843 u_long nameiop; 844 u_long flags; 845 char *nameptr; 846 long namelen; 847 ino_t id = 0; 848 uint8_t *data; 849 int offset, off, error, size; 850 int numdirpasses, fid_size, fsize, icb_len; 851 int total_fid_size = 0, fid_fragment = 0; 852 853 dvp = a->a_dvp; 854 node = VTON(dvp); 855 udfmp = node->udfmp; 856 nameiop = a->a_cnp->cn_nameiop; 857 flags = a->a_cnp->cn_flags; 858 nameptr = a->a_cnp->cn_nameptr; 859 namelen = a->a_cnp->cn_namelen; 860 fid_size = UDF_FID_SIZE; 861 fsize = node->fentry->inf_len; 862 icb_len = sizeof(struct long_ad); 863 td = a->a_cnp->cn_thread; 864 865 /* 866 * If this is a LOOKUP and we've already partially searched through 867 * the directory, pick up where we left off and flag that the 868 * directory may need to be searched twice. For a full description, 869 * see /sys/isofs/cd9660/cd9660_lookup.c:cd9660_lookup() 870 */ 871 if (nameiop != LOOKUP || node->diroff == 0 || node->diroff > size) { 872 offset = 0; 873 numdirpasses = 1; 874 } else { 875 offset = node->diroff; 876 numdirpasses = 2; 877 nchstats.ncs_2passes++; 878 } 879 880 /* 881 * The name lookup algorithm is quite similar to what is in readdir. 882 * Can this be broken out and shared? 883 */ 884 lookloop: 885 size = 0; 886 off = 0; 887 error = udf_readatoffset(node, &size, offset, &bp, &data); 888 if (error) 889 return (error); 890 891 while (offset + off < fsize) { 892 fid = (struct fileid_desc*)&data[off]; 893 894 /* 895 * Check to see if the fid is fragmented. The first test 896 * ensures that we don't wander off the end of the buffer 897 * looking for the l_iu and l_fi fields. 898 */ 899 if (off + fid_size > size || 900 off + fid_size + fid->l_iu + fid->l_fi > size) { 901 struct fileid_desc *fid_buf; 902 uint8_t *buf; 903 int frag_size = 0; 904 905 /* Copy what we have of the fid into a buffer */ 906 frag_size = size - off; 907 MALLOC(buf, uint8_t*, max(frag_size, fid_size), 908 M_UDFFID, M_NOWAIT | M_ZERO); 909 if (buf == NULL) 910 panic("No memory?"); 911 bcopy(fid, buf, frag_size); 912 913 /* Reduce all of the casting magic */ 914 fid_buf = (struct fileid_desc*)buf; 915 916 if (bp != NULL) 917 brelse(bp); 918 919 /* Fetch the next allocation */ 920 offset += size; 921 size = 0; 922 error = udf_readatoffset(node, &size, offset, &bp, 923 &data); 924 if (error) 925 return (error); 926 927 /* 928 * If the fragment was so small that we didn't get 929 * the l_iu and l_fi fields, copy those in. 930 */ 931 if (fid_size > frag_size) 932 bcopy(data, &buf[frag_size], 933 fid_size - frag_size); 934 935 /* 936 * Now that we have enough of the fid to work with, 937 * allocate a new fid, copy the fragment into it, 938 * and copy the rest of the fid from the new 939 * allocation. 940 */ 941 total_fid_size = fid_size + fid_buf->l_iu + 942 fid_buf->l_fi; 943 MALLOC(fid, struct fileid_desc *, total_fid_size, 944 M_UDFFID, M_NOWAIT | M_ZERO); 945 if (fid == NULL) { 946 if (bp != NULL) 947 brelse(bp); 948 return (ENOMEM); 949 } 950 bcopy(fid_buf, fid, frag_size); 951 bcopy(data, &((uint8_t*)(fid))[frag_size], 952 total_fid_size - frag_size); 953 954 off = (total_fid_size - frag_size + 3) & ~0x03; 955 fid_fragment = 1; 956 FREE(buf, M_UDFFID); 957 } else { 958 /* 959 * Update the offset here to avoid looking at this fid 960 * again on a subsequent lookup. 961 */ 962 total_fid_size = fid->l_iu + fid->l_fi + fid_size; 963 off += (total_fid_size + 3) & ~0x03; 964 } 965 966 /* XXX Should we return an error on a bad fid? */ 967 if (udf_checktag(&fid->tag, TAGID_FID)) 968 break; 969 970 if ((fid->l_fi == 0) && (fid->file_char & 0x08)) { 971 if (flags & ISDOTDOT) { 972 id = udf_getid(&fid->icb); 973 break; 974 } 975 } else { 976 if (!(udf_cmpname(&fid->data[fid->l_iu], 977 nameptr, fid->l_fi, namelen))) { 978 id = udf_getid(&fid->icb); 979 break; 980 } 981 } 982 983 /* 984 * If we got this far then this fid isn't what we were 985 * looking for. It's therefore safe to clean up from a 986 * fragmented fid. 987 */ 988 if (fid_fragment) { 989 FREE(fid, M_UDFFID); 990 fid_fragment = 0; 991 } 992 } 993 994 /* Did we have a match? */ 995 if (id) { 996 error = udf_vget(udfmp->im_mountp, id, LK_EXCLUSIVE, &tdp); 997 if (bp != NULL) 998 brelse(bp); 999 if (error) 1000 return (error); 1001 1002 /* Remember where this entry was if it's the final component */ 1003 if ((flags & ISLASTCN) && nameiop == LOOKUP) 1004 node->diroff = offset + off; 1005 if (numdirpasses == 2) 1006 nchstats.ncs_pass2++; 1007 if (!(flags & LOCKPARENT) || !(flags & ISLASTCN)) { 1008 a->a_cnp->cn_flags |= PDIRUNLOCK; 1009 VOP_UNLOCK(dvp, 0, td); 1010 } 1011 1012 *vpp = tdp; 1013 1014 /* Put this entry in the cache */ 1015 if (flags & MAKEENTRY) 1016 cache_enter(dvp, *vpp, a->a_cnp); 1017 1018 if (fid_fragment) 1019 FREE(fid, M_UDFFID); 1020 1021 return (0); 1022 } 1023 1024 /* Name wasn't found on this pass. Do another pass? */ 1025 if (numdirpasses == 2) { 1026 numdirpasses--; 1027 offset = 0; 1028 goto lookloop; 1029 } 1030 1031 if (bp != NULL) 1032 brelse(bp); 1033 1034 /* Enter name into cache as non-existant */ 1035 if (flags & MAKEENTRY) 1036 cache_enter(dvp, *vpp, a->a_cnp); 1037 1038 if ((flags & ISLASTCN) && (nameiop == CREATE || nameiop == RENAME)) 1039 return (EROFS); 1040 return (ENOENT); 1041 1042 } 1043 1044 static int 1045 udf_reclaim(struct vop_reclaim_args *a) 1046 { 1047 struct vnode *vp; 1048 struct udf_node *unode; 1049 1050 vp = a->a_vp; 1051 unode = VTON(vp); 1052 1053 cache_purge(vp); 1054 if (unode != NULL) { 1055 udf_hashrem(unode); 1056 if (unode->i_devvp) { 1057 vrele(unode->i_devvp); 1058 unode->i_devvp = 0; 1059 } 1060 1061 if (unode->fentry != NULL) 1062 FREE(unode->fentry, M_UDFFENTRY); 1063 lockdestroy(&unode->i_vnode->v_lock); 1064 uma_zfree(udf_zone_node, unode); 1065 vp->v_data = NULL; 1066 } 1067 1068 return (0); 1069 } 1070 1071 /* 1072 * Read the block and then set the data pointer to correspond with the 1073 * offset passed in. Only read in at most 'size' bytes, and then set 'size' 1074 * to the number of bytes pointed to. If 'size' is zero, try to read in a 1075 * whole extent. 1076 * XXX 'size' is limited to the logical block size for now due to problems 1077 * with udf_read() 1078 */ 1079 static int 1080 udf_readatoffset(struct udf_node *node, int *size, int offset, struct buf **bp, uint8_t **data) 1081 { 1082 struct udf_mnt *udfmp; 1083 struct file_entry *fentry = NULL; 1084 struct buf *bp1; 1085 uint32_t max_size; 1086 daddr_t sector; 1087 int error; 1088 1089 udfmp = node->udfmp; 1090 1091 error = udf_bmap_internal(node, offset, §or, &max_size); 1092 if (error == -1) { 1093 /* 1094 * This error means that the file *data* is stored in the 1095 * allocation descriptor field of the file entry. 1096 */ 1097 fentry = node->fentry; 1098 *data = &fentry->data[fentry->l_ea]; 1099 *size = fentry->l_ad; 1100 *bp = NULL; 1101 return (0); 1102 } else if (error != 0) { 1103 return (error); 1104 } 1105 1106 /* Adjust the size so that it is within range */ 1107 if (*size == 0 || *size > max_size) 1108 *size = max_size; 1109 *size = min(*size, MAXBSIZE); 1110 1111 if ((error = udf_readlblks(udfmp, sector, *size, bp))) { 1112 printf("udf_readlblks returned %d\n", error); 1113 return (error); 1114 } 1115 1116 bp1 = *bp; 1117 *data = (uint8_t *)&bp1->b_data[offset % udfmp->bsize]; 1118 return (0); 1119 } 1120 1121 /* 1122 * Translate a file offset into a logical block and then into a physical 1123 * block. 1124 */ 1125 static int 1126 udf_bmap_internal(struct udf_node *node, uint32_t offset, daddr_t *sector, uint32_t *max_size) 1127 { 1128 struct udf_mnt *udfmp; 1129 struct file_entry *fentry; 1130 void *icb; 1131 struct icb_tag *tag; 1132 uint32_t icblen = 0; 1133 daddr_t lsector; 1134 int ad_offset, ad_num = 0; 1135 int i, p_offset; 1136 1137 udfmp = node->udfmp; 1138 fentry = node->fentry; 1139 tag = &fentry->icbtag; 1140 1141 switch (tag->strat_type) { 1142 case 4: 1143 break; 1144 1145 case 4096: 1146 printf("Cannot deal with strategy4096 yet!\n"); 1147 return (ENODEV); 1148 1149 default: 1150 printf("Unknown strategy type %d\n", tag->strat_type); 1151 return (ENODEV); 1152 } 1153 1154 switch (tag->flags & 0x7) { 1155 case 0: 1156 /* 1157 * The allocation descriptor field is filled with short_ad's. 1158 * If the offset is beyond the current extent, look for the 1159 * next extent. 1160 */ 1161 do { 1162 offset -= icblen; 1163 ad_offset = sizeof(struct short_ad) * ad_num; 1164 if (ad_offset > fentry->l_ad) { 1165 printf("File offset out of bounds\n"); 1166 return (EINVAL); 1167 } 1168 icb = GETICB(long_ad, fentry, fentry->l_ea + ad_offset); 1169 icblen = GETICBLEN(short_ad, icb); 1170 ad_num++; 1171 } while(offset >= icblen); 1172 1173 lsector = (offset >> udfmp->bshift) + 1174 ((struct short_ad *)(icb))->pos; 1175 1176 *max_size = GETICBLEN(short_ad, icb) - offset; 1177 1178 break; 1179 case 1: 1180 /* 1181 * The allocation descriptor field is filled with long_ad's 1182 * If the offset is beyond the current extent, look for the 1183 * next extent. 1184 */ 1185 do { 1186 offset -= icblen; 1187 ad_offset = sizeof(struct long_ad) * ad_num; 1188 if (ad_offset > fentry->l_ad) { 1189 printf("File offset out of bounds\n"); 1190 return (EINVAL); 1191 } 1192 icb = GETICB(long_ad, fentry, fentry->l_ea + ad_offset); 1193 icblen = GETICBLEN(long_ad, icb); 1194 ad_num++; 1195 } while(offset >= icblen); 1196 1197 lsector = (offset >> udfmp->bshift) + 1198 ((struct long_ad *)(icb))->loc.lb_num; 1199 1200 *max_size = GETICBLEN(long_ad, icb) - offset; 1201 1202 break; 1203 case 3: 1204 /* 1205 * This type means that the file *data* is stored in the 1206 * allocation descriptor field of the file entry. 1207 */ 1208 *max_size = 0; 1209 *sector = node->hash_id + udfmp->bsize; 1210 1211 return (-1); 1212 case 2: 1213 /* DirectCD does not use extended_ad's */ 1214 default: 1215 printf("Unsupported allocation descriptor %d\n", 1216 tag->flags & 0x7); 1217 return (ENODEV); 1218 } 1219 1220 *sector = lsector + udfmp->part_start; 1221 1222 /* 1223 * Check the sparing table. Each entry represents the beginning of 1224 * a packet. 1225 */ 1226 if (udfmp->s_table != NULL) { 1227 for (i = 0; i< udfmp->s_table_entries; i++) { 1228 p_offset = lsector - udfmp->s_table->entries[i].org; 1229 if ((p_offset < udfmp->p_sectors) && (p_offset >= 0)) { 1230 *sector = udfmp->s_table->entries[i].map + 1231 p_offset; 1232 break; 1233 } 1234 } 1235 } 1236 1237 return (0); 1238 } 1239