1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2013 Juniper Networks, Inc.
5 * Copyright (c) 2022-2023 Klara, Inc.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include "opt_tarfs.h"
30
31 #include <sys/param.h>
32 #include <sys/stat.h>
33 #include <sys/systm.h>
34 #include <sys/buf.h>
35 #include <sys/fcntl.h>
36 #include <sys/libkern.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mount.h>
40 #include <sys/namei.h>
41 #include <sys/proc.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/vnode.h>
45
46 #include <vm/vm_param.h>
47
48 #include <fs/tarfs/tarfs.h>
49 #include <fs/tarfs/tarfs_dbg.h>
50
51 MALLOC_DEFINE(M_TARFSNAME, "tarfs name", "tarfs file names");
52 MALLOC_DEFINE(M_TARFSBLK, "tarfs blk", "tarfs block maps");
53
54 SYSCTL_NODE(_vfs, OID_AUTO, tarfs, CTLFLAG_RW, 0, "Tar filesystem");
55
56 unsigned int tarfs_ioshift = TARFS_IOSHIFT_DEFAULT;
57
58 static int
tarfs_sysctl_handle_ioshift(SYSCTL_HANDLER_ARGS)59 tarfs_sysctl_handle_ioshift(SYSCTL_HANDLER_ARGS)
60 {
61 unsigned int tmp;
62 int error;
63
64 tmp = *(unsigned int *)arg1;
65 if ((error = SYSCTL_OUT(req, &tmp, sizeof(tmp))) != 0)
66 return (error);
67 if (req->newptr != NULL) {
68 if ((error = SYSCTL_IN(req, &tmp, sizeof(tmp))) != 0)
69 return (error);
70 if (tmp == 0)
71 tmp = TARFS_IOSHIFT_DEFAULT;
72 if (tmp < TARFS_IOSHIFT_MIN)
73 tmp = TARFS_IOSHIFT_MIN;
74 if (tmp > TARFS_IOSHIFT_MAX)
75 tmp = TARFS_IOSHIFT_MAX;
76 *(unsigned int *)arg1 = tmp;
77 }
78 return (0);
79 }
80
81 SYSCTL_PROC(_vfs_tarfs, OID_AUTO, ioshift,
82 CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RWTUN,
83 &tarfs_ioshift, 0, tarfs_sysctl_handle_ioshift, "IU",
84 "Tar filesystem preferred I/O size (log 2)");
85
86 #ifdef TARFS_DEBUG
87 int tarfs_debug;
88 SYSCTL_INT(_vfs_tarfs, OID_AUTO, debug, CTLFLAG_RWTUN,
89 &tarfs_debug, 0, "Tar filesystem debug mask");
90 #endif /* TARFS_DEBUG */
91
92 struct tarfs_node *
tarfs_lookup_node(struct tarfs_node * tnp,struct tarfs_node * f,struct componentname * cnp)93 tarfs_lookup_node(struct tarfs_node *tnp, struct tarfs_node *f,
94 struct componentname *cnp)
95 {
96 boolean_t found;
97 struct tarfs_node *entry;
98
99 TARFS_DPF(LOOKUP, "%s: name: %.*s\n", __func__, (int)cnp->cn_namelen,
100 cnp->cn_nameptr);
101
102 found = false;
103 TAILQ_FOREACH(entry, &tnp->dir.dirhead, dirents) {
104 if (f != NULL && entry != f)
105 continue;
106
107 if (entry->namelen == cnp->cn_namelen &&
108 bcmp(entry->name, cnp->cn_nameptr,
109 entry->namelen) == 0) {
110 found = 1;
111 break;
112 }
113 }
114
115 if (found) {
116 if (entry->type == VREG && entry->other != NULL) {
117 TARFS_DPF_IFF(LOOKUP, "%s: following hard link %p\n",
118 __func__, entry);
119 entry = entry->other;
120 }
121 TARFS_DPF(LOOKUP, "%s: found tarfs_node %p\n", __func__,
122 entry);
123 return (entry);
124 }
125
126 TARFS_DPF(LOOKUP, "%s: no match found\n", __func__);
127 return (NULL);
128 }
129
130 struct tarfs_node *
tarfs_lookup_dir(struct tarfs_node * tnp,off_t cookie)131 tarfs_lookup_dir(struct tarfs_node *tnp, off_t cookie)
132 {
133 struct tarfs_node *current;
134
135 TARFS_DPF(LOOKUP, "%s: tarfs_node %p, cookie %jd\n", __func__, tnp,
136 cookie);
137 TARFS_DPF(LOOKUP, "%s: name: %s\n", __func__,
138 (tnp->name == NULL) ? "<<root>>" : tnp->name);
139
140 if (cookie == tnp->dir.lastcookie &&
141 tnp->dir.lastnode != NULL) {
142 TARFS_DPF(LOOKUP, "%s: Using cached entry: tarfs_node %p, "
143 "cookie %jd\n", __func__, tnp->dir.lastnode,
144 tnp->dir.lastcookie);
145 return (tnp->dir.lastnode);
146 }
147
148 TAILQ_FOREACH(current, &tnp->dir.dirhead, dirents) {
149 TARFS_DPF(LOOKUP, "%s: tarfs_node %p, current %p, ino %lu\n",
150 __func__, tnp, current, current->ino);
151 TARFS_DPF_IFF(LOOKUP, current->name != NULL,
152 "%s: name: %s\n", __func__, current->name);
153 if (current->ino == cookie) {
154 TARFS_DPF(LOOKUP, "%s: Found entry: tarfs_node %p, "
155 "cookie %lu\n", __func__, current,
156 current->ino);
157 break;
158 }
159 }
160
161 return (current);
162 }
163
164 int
tarfs_alloc_node(struct tarfs_mount * tmp,const char * name,size_t namelen,__enum_uint8 (vtype)type,off_t off,size_t sz,time_t mtime,uid_t uid,gid_t gid,mode_t mode,unsigned int flags,const char * linkname,dev_t rdev,struct tarfs_node * parent,struct tarfs_node ** retnode)165 tarfs_alloc_node(struct tarfs_mount *tmp, const char *name, size_t namelen,
166 __enum_uint8(vtype) type, off_t off, size_t sz, time_t mtime, uid_t uid, gid_t gid,
167 mode_t mode, unsigned int flags, const char *linkname, dev_t rdev,
168 struct tarfs_node *parent, struct tarfs_node **retnode)
169 {
170 struct tarfs_node *tnp;
171
172 TARFS_DPF(ALLOC, "%s(%.*s)\n", __func__, (int)namelen, name);
173
174 if (parent != NULL && parent->type != VDIR)
175 return (ENOTDIR);
176 tnp = malloc(sizeof(struct tarfs_node), M_TARFSNODE, M_WAITOK | M_ZERO);
177 mtx_init(&tnp->lock, "tarfs node lock", NULL, MTX_DEF);
178 tnp->gen = arc4random();
179 tnp->tmp = tmp;
180 if (namelen > 0) {
181 tnp->name = malloc(namelen + 1, M_TARFSNAME, M_WAITOK);
182 tnp->namelen = namelen;
183 memcpy(tnp->name, name, namelen);
184 tnp->name[namelen] = '\0';
185 }
186 tnp->type = type;
187 tnp->uid = uid;
188 tnp->gid = gid;
189 tnp->mode = mode;
190 tnp->nlink = 1;
191 vfs_timestamp(&tnp->atime);
192 tnp->mtime.tv_sec = mtime;
193 tnp->birthtime = tnp->atime;
194 tnp->ctime = tnp->mtime;
195 if (parent != NULL) {
196 tnp->ino = alloc_unr(tmp->ino_unr);
197 }
198 tnp->offset = off;
199 tnp->size = tnp->physize = sz;
200 switch (type) {
201 case VDIR:
202 MPASS(parent != tnp);
203 MPASS(parent != NULL || tmp->root == NULL);
204 TAILQ_INIT(&tnp->dir.dirhead);
205 tnp->nlink++;
206 if (parent == NULL) {
207 tnp->ino = TARFS_ROOTINO;
208 }
209 tnp->physize = 0;
210 break;
211 case VLNK:
212 tnp->link.name = malloc(sz + 1, M_TARFSNAME,
213 M_WAITOK);
214 tnp->link.namelen = sz;
215 memcpy(tnp->link.name, linkname, sz);
216 tnp->link.name[sz] = '\0';
217 break;
218 case VREG:
219 /* create dummy block map */
220 tnp->nblk = 1;
221 tnp->blk = malloc(sizeof(*tnp->blk), M_TARFSBLK, M_WAITOK);
222 tnp->blk[0].i = 0;
223 tnp->blk[0].o = 0;
224 tnp->blk[0].l = tnp->physize;
225 break;
226 case VFIFO:
227 /* Nothing extra to do */
228 break;
229 case VBLK:
230 case VCHR:
231 tnp->rdev = rdev;
232 tnp->physize = 0;
233 break;
234 default:
235 panic("%s: type %d not allowed", __func__, type);
236 }
237 if (parent != NULL) {
238 TARFS_NODE_LOCK(parent);
239 TAILQ_INSERT_TAIL(&parent->dir.dirhead, tnp, dirents);
240 parent->size += sizeof(struct tarfs_node);
241 tnp->parent = parent;
242 if (type == VDIR) {
243 parent->nlink++;
244 }
245 TARFS_NODE_UNLOCK(parent);
246 } else {
247 tnp->parent = tnp;
248 }
249 MPASS(tnp->ino != 0);
250
251 TARFS_ALLNODES_LOCK(tmp);
252 TAILQ_INSERT_TAIL(&tmp->allnodes, tnp, entries);
253 TARFS_ALLNODES_UNLOCK(tmp);
254
255 *retnode = tnp;
256 tmp->nfiles++;
257 return (0);
258 }
259
260 #define is09(ch) ((ch) >= '0' && (ch) <= '9')
261
262 int
tarfs_load_blockmap(struct tarfs_node * tnp,size_t realsize)263 tarfs_load_blockmap(struct tarfs_node *tnp, size_t realsize)
264 {
265 struct tarfs_blk *blk = NULL;
266 char *map = NULL;
267 size_t nmap = 0, nblk = 0;
268 char *p, *q;
269 ssize_t res;
270 unsigned int i;
271 long n;
272
273 /*
274 * Load the entire map into memory. We don't know how big it is,
275 * but as soon as we start reading it we will know how many
276 * entries it contains, and then we can count newlines.
277 */
278 do {
279 nmap++;
280 if (tnp->size < nmap * TARFS_BLOCKSIZE) {
281 TARFS_DPF(MAP, "%s: map too large\n", __func__);
282 goto bad;
283 }
284 /* grow the map */
285 map = realloc(map, nmap * TARFS_BLOCKSIZE + 1, M_TARFSBLK,
286 M_ZERO | M_WAITOK);
287 /* read an additional block */
288 res = tarfs_io_read_buf(tnp->tmp, false,
289 map + (nmap - 1) * TARFS_BLOCKSIZE,
290 tnp->offset + (nmap - 1) * TARFS_BLOCKSIZE,
291 TARFS_BLOCKSIZE);
292 if (res < 0)
293 return (-res);
294 else if (res < TARFS_BLOCKSIZE)
295 return (EIO);
296 map[nmap * TARFS_BLOCKSIZE] = '\0'; /* sentinel */
297 if (nblk == 0) {
298 n = strtol(p = map, &q, 10);
299 if (q == p || *q != '\n' || n < 1)
300 goto syntax;
301 nblk = n;
302 }
303 for (n = 0, p = map; *p != '\0'; ++p) {
304 if (*p == '\n') {
305 ++n;
306 }
307 }
308 TARFS_DPF(MAP, "%s: %ld newlines in map\n", __func__, n);
309 } while (n < nblk * 2 + 1);
310 TARFS_DPF(MAP, "%s: block map length %zu\n", __func__, nblk);
311 blk = malloc(sizeof(*blk) * nblk, M_TARFSBLK, M_WAITOK | M_ZERO);
312 p = strchr(map, '\n') + 1;
313 for (i = 0; i < nblk; i++) {
314 if (i == 0)
315 blk[i].i = nmap * TARFS_BLOCKSIZE;
316 else
317 blk[i].i = blk[i - 1].i + blk[i - 1].l;
318 n = strtol(p, &q, 10);
319 if (q == p || *q != '\n' || n < 0)
320 goto syntax;
321 p = q + 1;
322 blk[i].o = n;
323 n = strtol(p, &q, 10);
324 if (q == p || *q != '\n' || n < 0)
325 goto syntax;
326 p = q + 1;
327 blk[i].l = n;
328 TARFS_DPF(MAP, "%s: %3d %12zu %12zu %12zu\n", __func__,
329 i, blk[i].i, blk[i].o, blk[i].l);
330 /*
331 * Check block alignment if the block is of non-zero
332 * length (a zero-length block indicates the end of a
333 * trailing hole). Checking i indirectly checks the
334 * previous block's l. It's ok for the final block to
335 * have an uneven length.
336 */
337 if (blk[i].l == 0) {
338 TARFS_DPF(MAP, "%s: zero-length block\n", __func__);
339 } else if (blk[i].i % TARFS_BLOCKSIZE != 0 ||
340 blk[i].o % TARFS_BLOCKSIZE != 0) {
341 TARFS_DPF(MAP, "%s: misaligned map entry\n", __func__);
342 goto bad;
343 }
344 /*
345 * Check that this block starts after the end of the
346 * previous one.
347 */
348 if (i > 0 && blk[i].o < blk[i - 1].o + blk[i - 1].l) {
349 TARFS_DPF(MAP, "%s: overlapping map entries\n", __func__);
350 goto bad;
351 }
352 /*
353 * Check that the block is within the file, both
354 * physically and logically.
355 */
356 if (blk[i].i + blk[i].l > tnp->physize ||
357 blk[i].o + blk[i].l > realsize) {
358 TARFS_DPF(MAP, "%s: map overflow\n", __func__);
359 goto bad;
360 }
361 }
362 free(map, M_TARFSBLK);
363
364 /* store in node */
365 free(tnp->blk, M_TARFSBLK);
366 tnp->nblk = nblk;
367 tnp->blk = blk;
368 tnp->size = realsize;
369 return (0);
370 syntax:
371 TARFS_DPF(MAP, "%s: syntax error in block map\n", __func__);
372 bad:
373 free(map, M_TARFSBLK);
374 free(blk, M_TARFSBLK);
375 return (EINVAL);
376 }
377
378 void
tarfs_free_node(struct tarfs_node * tnp)379 tarfs_free_node(struct tarfs_node *tnp)
380 {
381 struct tarfs_mount *tmp;
382
383 MPASS(tnp != NULL);
384 tmp = tnp->tmp;
385
386 switch (tnp->type) {
387 case VREG:
388 if (tnp->nlink-- > 1)
389 return;
390 if (tnp->other != NULL)
391 tarfs_free_node(tnp->other);
392 break;
393 case VDIR:
394 if (tnp->nlink-- > 2)
395 return;
396 if (tnp->parent != NULL && tnp->parent != tnp)
397 tarfs_free_node(tnp->parent);
398 break;
399 case VLNK:
400 if (tnp->link.name)
401 free(tnp->link.name, M_TARFSNAME);
402 break;
403 default:
404 break;
405 }
406 if (tnp->name != NULL)
407 free(tnp->name, M_TARFSNAME);
408 if (tnp->blk != NULL)
409 free(tnp->blk, M_TARFSBLK);
410 if (tnp->ino >= TARFS_MININO)
411 free_unr(tmp->ino_unr, tnp->ino);
412 TAILQ_REMOVE(&tmp->allnodes, tnp, entries);
413 free(tnp, M_TARFSNODE);
414 tmp->nfiles--;
415 }
416
417 int
tarfs_read_file(struct tarfs_node * tnp,size_t len,struct uio * uiop)418 tarfs_read_file(struct tarfs_node *tnp, size_t len, struct uio *uiop)
419 {
420 struct uio auio;
421 size_t resid = len;
422 size_t copylen;
423 unsigned int i;
424 int error;
425
426 TARFS_DPF(VNODE, "%s(%s, %zu, %zu)\n", __func__,
427 tnp->name, uiop->uio_offset, resid);
428 for (i = 0; i < tnp->nblk && resid > 0; ++i) {
429 if (uiop->uio_offset > tnp->blk[i].o + tnp->blk[i].l) {
430 /* skip this block */
431 continue;
432 }
433 while (resid > 0 &&
434 uiop->uio_offset < tnp->blk[i].o) {
435 /* move out some zeroes... */
436 copylen = tnp->blk[i].o - uiop->uio_offset;
437 if (copylen > resid)
438 copylen = resid;
439 if (copylen > ZERO_REGION_SIZE)
440 copylen = ZERO_REGION_SIZE;
441 auio = *uiop;
442 auio.uio_offset = 0;
443 auio.uio_resid = copylen;
444 error = uiomove(__DECONST(void *, zero_region),
445 copylen, &auio);
446 if (error != 0)
447 return (error);
448 TARFS_DPF(MAP, "%s(%s) = zero %zu\n", __func__,
449 tnp->name, copylen - auio.uio_resid);
450 uiop->uio_offset += copylen - auio.uio_resid;
451 uiop->uio_resid -= copylen - auio.uio_resid;
452 resid -= copylen - auio.uio_resid;
453 }
454 while (resid > 0 &&
455 uiop->uio_offset < tnp->blk[i].o + tnp->blk[i].l) {
456 /* now actual data */
457 copylen = tnp->blk[i].l;
458 if (copylen > resid)
459 copylen = resid;
460 auio = *uiop;
461 auio.uio_offset = tnp->offset + tnp->blk[i].i +
462 uiop->uio_offset - tnp->blk[i].o;
463 auio.uio_resid = copylen;
464 error = tarfs_io_read(tnp->tmp, false, &auio);
465 if (error != 0)
466 return (error);
467 TARFS_DPF(MAP, "%s(%s) = data %zu\n", __func__,
468 tnp->name, copylen - auio.uio_resid);
469 uiop->uio_offset += copylen - auio.uio_resid;
470 uiop->uio_resid -= copylen - auio.uio_resid;
471 resid -= copylen - auio.uio_resid;
472 }
473 }
474 TARFS_DPF(VNODE, "%s(%s) = %zu\n", __func__,
475 tnp->name, len - resid);
476 return (0);
477 }
478
479 /*
480 * XXX ugly file flag parser which could easily be a finite state machine
481 * driven by a small precomputed table.
482 *
483 * Note that unlike strtofflags(3), we make no attempt to handle negated
484 * flags, since they shouldn't appear in tar files.
485 */
486 static const struct tarfs_flag {
487 const char *name;
488 unsigned int flag;
489 } tarfs_flags[] = {
490 { "nodump", UF_NODUMP },
491 { "uchg", UF_IMMUTABLE },
492 { "uappnd", UF_APPEND },
493 { "opaque", UF_OPAQUE },
494 { "uunlnk", UF_NOUNLINK },
495 { "arch", SF_ARCHIVED },
496 { "schg", SF_IMMUTABLE },
497 { "sappnd", SF_APPEND },
498 { "sunlnk", SF_NOUNLINK },
499 { NULL, 0 },
500 };
501
502 unsigned int
tarfs_strtofflags(const char * str,char ** end)503 tarfs_strtofflags(const char *str, char **end)
504 {
505 const struct tarfs_flag *tf;
506 const char *p, *q;
507 unsigned int ret;
508
509 ret = 0;
510 for (p = q = str; *q != '\0'; p = q + 1) {
511 for (q = p; *q != '\0' && *q != ','; ++q) {
512 if (*q < 'a' || *q > 'z') {
513 goto end;
514 }
515 /* nothing */
516 }
517 for (tf = tarfs_flags; tf->name != NULL; tf++) {
518 if (strncmp(tf->name, p, q - p) == 0 &&
519 tf->name[q - p] == '\0') {
520 TARFS_DPF(ALLOC, "%s: %.*s = 0x%06x\n", __func__,
521 (int)(q - p), p, tf->flag);
522 ret |= tf->flag;
523 break;
524 }
525 }
526 if (tf->name == NULL) {
527 TARFS_DPF(ALLOC, "%s: %.*s = 0x??????\n",
528 __func__, (int)(q - p), p);
529 goto end;
530 }
531 }
532 end:
533 if (*end != NULL) {
534 *end = __DECONST(char *, q);
535 }
536 return (ret);
537 }
538