1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * autod_readdir.c
28 */
29
30 #include <stdio.h>
31 #include <ctype.h>
32 #include <string.h>
33 #include <syslog.h>
34 #include <sys/types.h>
35 #include <sys/param.h>
36 #include <errno.h>
37 #include <pwd.h>
38 #include <locale.h>
39 #include <stdlib.h>
40 #include <unistd.h>
41 #include <assert.h>
42 #include <fcntl.h>
43 #include "automount.h"
44
45 static void build_dir_entry_list(struct autofs_rddir_cache *rdcp,
46 struct dir_entry *list);
47 static int autofs_rddir_cache_enter(char *map, ulong_t bucket_size,
48 struct autofs_rddir_cache **rdcpp);
49 int autofs_rddir_cache_lookup(char *map, struct autofs_rddir_cache **rdcpp);
50 static int autofs_rddir_cache_delete(struct autofs_rddir_cache *rdcp);
51 static int create_dirents(struct autofs_rddir_cache *rdcp, ulong_t offset,
52 autofs_rddirres *res);
53 struct dir_entry *rddir_entry_lookup(char *name, struct dir_entry *list);
54 static void free_offset_tbl(struct off_tbl *head);
55 static void free_dir_list(struct dir_entry *head);
56
57 #define OFFSET_BUCKET_SIZE 100
58
59 rwlock_t autofs_rddir_cache_lock; /* readdir cache lock */
60 struct autofs_rddir_cache *rddir_head; /* readdir cache head */
61
62 int
do_readdir(autofs_rddirargs * rda,autofs_rddirres * rd)63 do_readdir(autofs_rddirargs *rda, autofs_rddirres *rd)
64 {
65 struct dir_entry *list = NULL, *l;
66 struct autofs_rddir_cache *rdcp = NULL;
67 int error;
68 int cache_time = RDDIR_CACHE_TIME;
69
70 if (automountd_nobrowse) {
71 /*
72 * Browsability was disabled return an empty list.
73 */
74 rd->rd_status = AUTOFS_OK;
75 rd->rd_rddir.rddir_size = 0;
76 rd->rd_rddir.rddir_eof = 1;
77 rd->rd_rddir.rddir_entries = NULL;
78
79 return (0);
80 }
81
82 rw_rdlock(&autofs_rddir_cache_lock);
83 error = autofs_rddir_cache_lookup(rda->rda_map, &rdcp);
84 if (error) {
85 rw_unlock(&autofs_rddir_cache_lock);
86 rw_wrlock(&autofs_rddir_cache_lock);
87 error = autofs_rddir_cache_lookup(rda->rda_map, &rdcp);
88 if (error) {
89 if (trace > 2)
90 trace_prt(1,
91 "map %s not found, adding...\n", rda->rda_map);
92 /*
93 * entry doesn't exist, add it.
94 */
95 error = autofs_rddir_cache_enter(rda->rda_map,
96 OFFSET_BUCKET_SIZE, &rdcp);
97 }
98 }
99 rw_unlock(&autofs_rddir_cache_lock);
100
101 if (error)
102 return (error);
103
104 assert(rdcp != NULL);
105 assert(rdcp->in_use);
106
107 if (!rdcp->full) {
108 rw_wrlock(&rdcp->rwlock);
109 if (!rdcp->full) {
110 /*
111 * cache entry hasn't been filled up, do it now.
112 */
113 char *stack[STACKSIZ];
114 char **stkptr;
115
116 /*
117 * Initialize the stack of open files
118 * for this thread
119 */
120 stack_op(INIT, NULL, stack, &stkptr);
121 (void) getmapkeys(rda->rda_map, &list, &error,
122 &cache_time, stack, &stkptr, rda->uid);
123 if (!error)
124 build_dir_entry_list(rdcp, list);
125 else if (list) {
126 free_dir_list(list);
127 list = NULL;
128 }
129 }
130 } else
131 rw_rdlock(&rdcp->rwlock);
132
133 rd->rd_bufsize = rda->rda_count;
134 if (!error) {
135 error = create_dirents(rdcp, rda->rda_offset, rd);
136 if (error) {
137 if (rdcp->offtp) {
138 free_offset_tbl(rdcp->offtp);
139 rdcp->offtp = NULL;
140 }
141 if (rdcp->entp) {
142 free_dir_list(rdcp->entp);
143 rdcp->entp = NULL;
144 }
145 rdcp->full = 0;
146 list = NULL;
147 }
148 }
149
150 if (trace > 2) {
151 /*
152 * print this list only once
153 */
154 for (l = list; l != NULL; l = l->next)
155 trace_prt(0, "%s\n", l->name);
156 trace_prt(0, "\n");
157 }
158
159 if (!error) {
160 rd->rd_status = AUTOFS_OK;
161 if (cache_time) {
162 /*
163 * keep list of entries for up to
164 * 'cache_time' seconds
165 */
166 rdcp->ttl = time((time_t *)NULL) + cache_time;
167 } else {
168 /*
169 * the underlying name service indicated not
170 * to cache contents.
171 */
172 if (rdcp->offtp) {
173 free_offset_tbl(rdcp->offtp);
174 rdcp->offtp = NULL;
175 }
176 if (rdcp->entp) {
177 free_dir_list(rdcp->entp);
178 rdcp->entp = NULL;
179 }
180 rdcp->full = 0;
181 }
182 } else {
183 /*
184 * return an empty list
185 */
186 rd->rd_rddir.rddir_size = 0;
187 rd->rd_rddir.rddir_eof = 1;
188 rd->rd_rddir.rddir_entries = NULL;
189
190 /*
191 * Invalidate cache and set error
192 */
193 switch (error) {
194 case ENOENT:
195 rd->rd_status = AUTOFS_NOENT;
196 break;
197 case ENOMEM:
198 rd->rd_status = AUTOFS_NOMEM;
199 break;
200 default:
201 rd->rd_status = AUTOFS_ECOMM;
202 }
203 }
204 rw_unlock(&rdcp->rwlock);
205
206 mutex_lock(&rdcp->lock);
207 rdcp->in_use--;
208 mutex_unlock(&rdcp->lock);
209
210 assert(rdcp->in_use >= 0);
211
212 return (error);
213 }
214
215 #define roundtoint(x) (((x) + sizeof (int) - 1) & ~(sizeof (int) - 1))
216 #define DIRENT64_RECLEN(namelen) \
217 (((int)(((dirent64_t *)0)->d_name) + 1 + (namelen) + 7) & ~ 7)
218
219 static int
create_dirents(struct autofs_rddir_cache * rdcp,ulong_t offset,autofs_rddirres * res)220 create_dirents(
221 struct autofs_rddir_cache *rdcp,
222 ulong_t offset,
223 autofs_rddirres *res)
224 {
225 uint_t total_bytes_wanted;
226 int bufsize;
227 ushort_t this_reclen;
228 int outcount = 0;
229 int namelen;
230 struct dir_entry *list = NULL, *l, *nl;
231 struct dirent64 *dp;
232 char *outbuf;
233 struct off_tbl *offtp, *next = NULL;
234 int this_bucket = 0;
235 int error = 0;
236 int x = 0, y = 0;
237
238 assert(RW_LOCK_HELD(&rdcp->rwlock));
239 for (offtp = rdcp->offtp; offtp != NULL; offtp = next) {
240 x++;
241 next = offtp->next;
242 this_bucket = (next == NULL);
243 if (!this_bucket)
244 this_bucket = (offset < next->offset);
245 if (this_bucket) {
246 /*
247 * has to be in this bucket
248 */
249 assert(offset >= offtp->offset);
250 list = offtp->first;
251 break;
252 }
253 /*
254 * loop to look in next bucket
255 */
256 }
257
258 for (l = list; l != NULL && l->offset < offset; l = l->next)
259 y++;
260
261 if (l == NULL) {
262 /*
263 * reached end of directory
264 */
265 error = 0;
266 goto empty;
267 }
268
269 if (trace > 2)
270 trace_prt(1, "%s: offset searches (%d, %d)\n", rdcp->map, x, y);
271
272 total_bytes_wanted = res->rd_bufsize;
273 bufsize = total_bytes_wanted + sizeof (struct dirent64);
274 outbuf = malloc(bufsize);
275 if (outbuf == NULL) {
276 syslog(LOG_ERR, "memory allocation error\n");
277 error = ENOMEM;
278 goto empty;
279 }
280 memset(outbuf, 0, bufsize);
281 /* LINTED pointer alignment */
282 dp = (struct dirent64 *)outbuf;
283
284 while (l) {
285 nl = l->next;
286 namelen = strlen(l->name);
287 this_reclen = DIRENT64_RECLEN(namelen);
288 if (outcount + this_reclen > total_bytes_wanted) {
289 break;
290 }
291 dp->d_ino = (ino64_t)l->nodeid;
292 if (nl) {
293 /*
294 * get the next elements offset
295 */
296 dp->d_off = (off64_t)nl->offset;
297 } else {
298 /*
299 * This is the last element
300 * make offset one plus the current.
301 */
302 dp->d_off = (off64_t)l->offset + 1;
303 }
304 (void) strcpy(dp->d_name, l->name);
305 dp->d_reclen = (ushort_t)this_reclen;
306 outcount += dp->d_reclen;
307 dp = (struct dirent64 *)((int)dp + dp->d_reclen);
308 assert(outcount <= total_bytes_wanted);
309 l = l->next;
310 }
311
312 res->rd_rddir.rddir_size = (long)outcount;
313 if (outcount > 0) {
314 /*
315 * have some entries
316 */
317 res->rd_rddir.rddir_eof = (l == NULL);
318 /* LINTED pointer alignment */
319 res->rd_rddir.rddir_entries = (struct dirent64 *)outbuf;
320 error = 0;
321 } else {
322 /*
323 * total_bytes_wanted is not large enough for one
324 * directory entry
325 */
326 res->rd_rddir.rddir_eof = 0;
327 res->rd_rddir.rddir_entries = NULL;
328 free(outbuf);
329 error = EIO;
330 }
331 return (error);
332
333 empty:
334 res->rd_rddir.rddir_size = 0L;
335 res->rd_rddir.rddir_eof = TRUE;
336 res->rd_rddir.rddir_entries = NULL;
337 return (error);
338 }
339
340
341 /*
342 * add new entry to cache for 'map'
343 */
344 static int
autofs_rddir_cache_enter(char * map,ulong_t bucket_size,struct autofs_rddir_cache ** rdcpp)345 autofs_rddir_cache_enter(
346 char *map,
347 ulong_t bucket_size,
348 struct autofs_rddir_cache **rdcpp)
349 {
350 struct autofs_rddir_cache *p;
351 assert(RW_LOCK_HELD(&autofs_rddir_cache_lock));
352
353 /*
354 * Add to front of the list at this time
355 */
356 p = (struct autofs_rddir_cache *)malloc(sizeof (*p));
357 if (p == NULL) {
358 syslog(LOG_ERR,
359 "autofs_rddir_cache_enter: memory allocation failed\n");
360 return (ENOMEM);
361 }
362 memset((char *)p, 0, sizeof (*p));
363
364 p->map = malloc(strlen(map) + 1);
365 if (p->map == NULL) {
366 syslog(LOG_ERR,
367 "autofs_rddir_cache_enter: memory allocation failed\n");
368 free(p);
369 return (ENOMEM);
370 }
371 strcpy(p->map, map);
372
373 p->bucket_size = bucket_size;
374 /*
375 * no need to grab mutex lock since I haven't yet made the
376 * node visible to the list
377 */
378 p->in_use = 1;
379 (void) rwlock_init(&p->rwlock, USYNC_THREAD, NULL);
380 (void) mutex_init(&p->lock, USYNC_THREAD, NULL);
381
382 if (rddir_head == NULL)
383 rddir_head = p;
384 else {
385 p->next = rddir_head;
386 rddir_head = p;
387 }
388 *rdcpp = p;
389
390 return (0);
391 }
392
393 /*
394 * find 'map' in readdir cache
395 */
396 int
autofs_rddir_cache_lookup(char * map,struct autofs_rddir_cache ** rdcpp)397 autofs_rddir_cache_lookup(char *map, struct autofs_rddir_cache **rdcpp)
398 {
399 struct autofs_rddir_cache *p;
400
401 assert(RW_LOCK_HELD(&autofs_rddir_cache_lock));
402 for (p = rddir_head; p != NULL; p = p->next) {
403 if (strcmp(p->map, map) == 0) {
404 /*
405 * found matching entry
406 */
407 *rdcpp = p;
408 mutex_lock(&p->lock);
409 p->in_use++;
410 mutex_unlock(&p->lock);
411 return (0);
412 }
413 }
414 /*
415 * didn't find entry
416 */
417 return (ENOENT);
418 }
419
420 /*
421 * free the offset table
422 */
423 static void
free_offset_tbl(struct off_tbl * head)424 free_offset_tbl(struct off_tbl *head)
425 {
426 struct off_tbl *p, *next = NULL;
427
428 for (p = head; p != NULL; p = next) {
429 next = p->next;
430 free(p);
431 }
432 }
433
434 /*
435 * free the directory entries
436 */
437 static void
free_dir_list(struct dir_entry * head)438 free_dir_list(struct dir_entry *head)
439 {
440 struct dir_entry *p, *next = NULL;
441
442 for (p = head; p != NULL; p = next) {
443 next = p->next;
444 assert(p->name);
445 free(p->name);
446 free(p);
447 }
448 }
449
450 static void
autofs_rddir_cache_entry_free(struct autofs_rddir_cache * p)451 autofs_rddir_cache_entry_free(struct autofs_rddir_cache *p)
452 {
453 assert(RW_LOCK_HELD(&autofs_rddir_cache_lock));
454 assert(!p->in_use);
455 if (p->map)
456 free(p->map);
457 if (p->offtp)
458 free_offset_tbl(p->offtp);
459 if (p->entp)
460 free_dir_list(p->entp);
461 free(p);
462 }
463
464 /*
465 * Remove entry from the rddircache
466 * the caller must own the autofs_rddir_cache_lock.
467 */
468 static int
autofs_rddir_cache_delete(struct autofs_rddir_cache * rdcp)469 autofs_rddir_cache_delete(struct autofs_rddir_cache *rdcp)
470 {
471 struct autofs_rddir_cache *p, *prev;
472
473 assert(RW_LOCK_HELD(&autofs_rddir_cache_lock));
474 /*
475 * Search cache for entry
476 */
477 prev = NULL;
478 for (p = rddir_head; p != NULL; p = p->next) {
479 if (p == rdcp) {
480 /*
481 * entry found, remove from list if not in use
482 */
483 if (p->in_use)
484 return (EBUSY);
485 if (prev)
486 prev->next = p->next;
487 else
488 rddir_head = p->next;
489 autofs_rddir_cache_entry_free(p);
490 return (0);
491 }
492 prev = p;
493 }
494 syslog(LOG_ERR, "Couldn't find entry %x in cache\n", p);
495 return (ENOENT);
496 }
497
498 /*
499 * Return entry that matches name, NULL otherwise.
500 * Assumes the readers lock for this list has been grabed.
501 */
502 struct dir_entry *
rddir_entry_lookup(char * name,struct dir_entry * list)503 rddir_entry_lookup(char *name, struct dir_entry *list)
504 {
505 return (btree_lookup(list, name));
506 }
507
508 static void
build_dir_entry_list(struct autofs_rddir_cache * rdcp,struct dir_entry * list)509 build_dir_entry_list(struct autofs_rddir_cache *rdcp, struct dir_entry *list)
510 {
511 struct dir_entry *p;
512 ulong_t offset = AUTOFS_DAEMONCOOKIE, offset_list = AUTOFS_DAEMONCOOKIE;
513 struct off_tbl *offtp, *last = NULL;
514 ino_t inonum = 4;
515
516 assert(RW_LOCK_HELD(&rdcp->rwlock));
517 assert(rdcp->entp == NULL);
518 rdcp->entp = list;
519 for (p = list; p != NULL; p = p->next) {
520 p->nodeid = inonum;
521 p->offset = offset;
522 if (offset >= offset_list) {
523 /*
524 * add node to index table
525 */
526 offtp = (struct off_tbl *)
527 malloc(sizeof (struct off_tbl));
528 if (offtp != NULL) {
529 offtp->offset = offset;
530 offtp->first = p;
531 offtp->next = NULL;
532 offset_list += rdcp->bucket_size;
533 } else {
534 syslog(LOG_ERR,
535 "WARNING: build_dir_entry_list: could not add offset to index table\n");
536 continue;
537 }
538 /*
539 * add to cache
540 */
541 if (rdcp->offtp == NULL)
542 rdcp->offtp = offtp;
543 else
544 last->next = offtp;
545 last = offtp;
546 }
547 offset++;
548 inonum += 2; /* use even numbers in daemon */
549 }
550 rdcp->full = 1;
551 }
552
553 mutex_t cleanup_lock;
554 cond_t cleanup_start_cv;
555 cond_t cleanup_done_cv;
556
557 /*
558 * cache cleanup thread starting point
559 */
560 void
cache_cleanup(void)561 cache_cleanup(void)
562 {
563 timestruc_t reltime;
564 struct autofs_rddir_cache *p, *next = NULL;
565 int error;
566
567 mutex_init(&cleanup_lock, USYNC_THREAD, NULL);
568 cond_init(&cleanup_start_cv, USYNC_THREAD, NULL);
569 cond_init(&cleanup_done_cv, USYNC_THREAD, NULL);
570
571 mutex_lock(&cleanup_lock);
572 for (;;) {
573 reltime.tv_sec = RDDIR_CACHE_TIME/2;
574 reltime.tv_nsec = 0;
575
576 /*
577 * delay RDDIR_CACHE_TIME seconds, or until some other thread
578 * requests that I cleanup the caches
579 */
580 if (error = cond_reltimedwait(
581 &cleanup_start_cv, &cleanup_lock, &reltime)) {
582 if (error != ETIME) {
583 if (trace > 1)
584 trace_prt(1,
585 "cleanup thread wakeup (%d)\n", error);
586 continue;
587 }
588 }
589 mutex_unlock(&cleanup_lock);
590
591 /*
592 * Perform the cache cleanup
593 */
594 rw_wrlock(&autofs_rddir_cache_lock);
595 for (p = rddir_head; p != NULL; p = next) {
596 next = p->next;
597 if (p->in_use > 0) {
598 /*
599 * cache entry busy, skip it
600 */
601 if (trace > 1) {
602 trace_prt(1,
603 "%s cache in use\n", p->map);
604 }
605 continue;
606 }
607 /*
608 * Cache entry is not in use, and nobody can grab a
609 * new reference since I'm holding the
610 * autofs_rddir_cache_lock
611 */
612
613 /*
614 * error will be zero if some thread signaled us asking
615 * that the caches be freed. In such case, free caches
616 * even if they're still valid and nobody is referencing
617 * them at this time. Otherwise, free caches only
618 * if their time to live (ttl) has expired.
619 */
620 if (error == ETIME && (p->ttl > time((time_t *)NULL))) {
621 /*
622 * Scheduled cache cleanup, if cache is still
623 * valid don't free.
624 */
625 if (trace > 1) {
626 trace_prt(1,
627 "%s cache still valid\n", p->map);
628 }
629 continue;
630 }
631 if (trace > 1)
632 trace_prt(1, "%s freeing cache\n", p->map);
633 assert(!p->in_use);
634 error = autofs_rddir_cache_delete(p);
635 assert(!error);
636 }
637 rw_unlock(&autofs_rddir_cache_lock);
638
639 /*
640 * wakeup the thread/threads waiting for the
641 * cleanup to finish
642 */
643 mutex_lock(&cleanup_lock);
644 cond_broadcast(&cleanup_done_cv);
645 }
646 /* NOTREACHED */
647 }
648