xref: /titanic_44/usr/src/cmd/fs.d/autofs/autod_readdir.c (revision 3bfb48feb84bb78924286a801c68f80198912fa7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  *	autod_readdir.c
28  */
29 
30 #pragma ident	"%Z%%M%	%I%	%E% SMI"
31 
32 #include <stdio.h>
33 #include <ctype.h>
34 #include <string.h>
35 #include <syslog.h>
36 #include <sys/types.h>
37 #include <sys/param.h>
38 #include <errno.h>
39 #include <pwd.h>
40 #include <locale.h>
41 #include <stdlib.h>
42 #include <unistd.h>
43 #include <assert.h>
44 #include <fcntl.h>
45 #include "automount.h"
46 
47 static void build_dir_entry_list(struct autofs_rddir_cache *rdcp,
48 				struct dir_entry *list);
49 static int autofs_rddir_cache_enter(char *map, ulong_t bucket_size,
50 				struct autofs_rddir_cache **rdcpp);
51 int autofs_rddir_cache_lookup(char *map, struct autofs_rddir_cache **rdcpp);
52 static int autofs_rddir_cache_delete(struct autofs_rddir_cache *rdcp);
53 static int create_dirents(struct autofs_rddir_cache *rdcp, ulong_t offset,
54 				autofs_rddirres *res);
55 struct dir_entry *rddir_entry_lookup(char *name, struct dir_entry *list);
56 static void free_offset_tbl(struct off_tbl *head);
57 static void free_dir_list(struct dir_entry *head);
58 
59 #define	OFFSET_BUCKET_SIZE	100
60 
61 rwlock_t autofs_rddir_cache_lock;		/* readdir cache lock */
62 struct autofs_rddir_cache *rddir_head;		/* readdir cache head */
63 
64 int
do_readdir(autofs_rddirargs * rda,autofs_rddirres * rd)65 do_readdir(autofs_rddirargs *rda, autofs_rddirres *rd)
66 {
67 	struct dir_entry *list = NULL, *l;
68 	struct autofs_rddir_cache *rdcp = NULL;
69 	int error;
70 	int cache_time = RDDIR_CACHE_TIME;
71 
72 	if (automountd_nobrowse) {
73 		/*
74 		 * Browsability was disabled return an empty list.
75 		 */
76 		rd->rd_status = AUTOFS_OK;
77 		rd->rd_rddir.rddir_size = 0;
78 		rd->rd_rddir.rddir_eof = 1;
79 		rd->rd_rddir.rddir_entries = NULL;
80 
81 		return (0);
82 	}
83 
84 	rw_rdlock(&autofs_rddir_cache_lock);
85 	error = autofs_rddir_cache_lookup(rda->rda_map, &rdcp);
86 	if (error) {
87 		rw_unlock(&autofs_rddir_cache_lock);
88 		rw_wrlock(&autofs_rddir_cache_lock);
89 		error = autofs_rddir_cache_lookup(rda->rda_map, &rdcp);
90 		if (error) {
91 			if (trace > 2)
92 				trace_prt(1,
93 				"map %s not found, adding...\n", rda->rda_map);
94 			/*
95 			 * entry doesn't exist, add it.
96 			 */
97 			error = autofs_rddir_cache_enter(rda->rda_map,
98 					OFFSET_BUCKET_SIZE, &rdcp);
99 		}
100 	}
101 	rw_unlock(&autofs_rddir_cache_lock);
102 
103 	if (error)
104 		return (error);
105 
106 	assert(rdcp != NULL);
107 	assert(rdcp->in_use);
108 
109 	if (!rdcp->full) {
110 		rw_wrlock(&rdcp->rwlock);
111 		if (!rdcp->full) {
112 			/*
113 			 * cache entry hasn't been filled up, do it now.
114 			 */
115 			char *stack[STACKSIZ];
116 			char **stkptr;
117 
118 			/*
119 			 * Initialize the stack of open files
120 			 * for this thread
121 			 */
122 			stack_op(INIT, NULL, stack, &stkptr);
123 			(void) getmapkeys(rda->rda_map, &list, &error,
124 			    &cache_time, stack, &stkptr, rda->uid);
125 			if (!error)
126 				build_dir_entry_list(rdcp, list);
127 			else if (list) {
128 				free_dir_list(list);
129 				list = NULL;
130 			}
131 		}
132 	} else
133 		rw_rdlock(&rdcp->rwlock);
134 
135 	rd->rd_bufsize = rda->rda_count;
136 	if (!error) {
137 		error = create_dirents(rdcp, rda->rda_offset, rd);
138 		if (error) {
139 			if (rdcp->offtp) {
140 				free_offset_tbl(rdcp->offtp);
141 				rdcp->offtp = NULL;
142 			}
143 			if (rdcp->entp) {
144 				free_dir_list(rdcp->entp);
145 				rdcp->entp = NULL;
146 			}
147 			rdcp->full = 0;
148 			list = NULL;
149 		}
150 	}
151 
152 	if (trace > 2) {
153 		/*
154 		 * print this list only once
155 		 */
156 		for (l = list; l != NULL; l = l->next)
157 			trace_prt(0, "%s\n", l->name);
158 		trace_prt(0, "\n");
159 	}
160 
161 	if (!error) {
162 		rd->rd_status = AUTOFS_OK;
163 		if (cache_time) {
164 			/*
165 			 * keep list of entries for up to
166 			 * 'cache_time' seconds
167 			 */
168 			rdcp->ttl = time((time_t *)NULL) + cache_time;
169 		} else {
170 			/*
171 			 * the underlying name service indicated not
172 			 * to cache contents.
173 			 */
174 			if (rdcp->offtp) {
175 				free_offset_tbl(rdcp->offtp);
176 				rdcp->offtp = NULL;
177 			}
178 			if (rdcp->entp) {
179 				free_dir_list(rdcp->entp);
180 				rdcp->entp = NULL;
181 			}
182 			rdcp->full = 0;
183 		}
184 	} else {
185 		/*
186 		 * return an empty list
187 		 */
188 		rd->rd_rddir.rddir_size = 0;
189 		rd->rd_rddir.rddir_eof = 1;
190 		rd->rd_rddir.rddir_entries = NULL;
191 
192 		/*
193 		 * Invalidate cache and set error
194 		 */
195 		switch (error) {
196 		case ENOENT:
197 			rd->rd_status = AUTOFS_NOENT;
198 			break;
199 		case ENOMEM:
200 			rd->rd_status = AUTOFS_NOMEM;
201 			break;
202 		default:
203 			rd->rd_status = AUTOFS_ECOMM;
204 		}
205 	}
206 	rw_unlock(&rdcp->rwlock);
207 
208 	mutex_lock(&rdcp->lock);
209 	rdcp->in_use--;
210 	mutex_unlock(&rdcp->lock);
211 
212 	assert(rdcp->in_use >= 0);
213 
214 	return (error);
215 }
216 
217 #define	roundtoint(x)	(((x) + sizeof (int) - 1) & ~(sizeof (int) - 1))
218 #define	DIRENT64_RECLEN(namelen)	\
219 	(((int)(((dirent64_t *)0)->d_name) + 1 + (namelen) + 7) & ~ 7)
220 
221 static int
create_dirents(struct autofs_rddir_cache * rdcp,ulong_t offset,autofs_rddirres * res)222 create_dirents(
223 	struct autofs_rddir_cache *rdcp,
224 	ulong_t offset,
225 	autofs_rddirres *res)
226 {
227 	uint_t total_bytes_wanted;
228 	int bufsize;
229 	ushort_t this_reclen;
230 	int outcount = 0;
231 	int namelen;
232 	struct dir_entry *list = NULL, *l, *nl;
233 	struct dirent64 *dp;
234 	char *outbuf;
235 	struct off_tbl *offtp, *next = NULL;
236 	int this_bucket = 0;
237 	int error = 0;
238 	int x = 0, y = 0;
239 
240 	assert(RW_LOCK_HELD(&rdcp->rwlock));
241 	for (offtp = rdcp->offtp; offtp != NULL; offtp = next) {
242 		x++;
243 		next = offtp->next;
244 		this_bucket = (next == NULL);
245 		if (!this_bucket)
246 			this_bucket = (offset < next->offset);
247 		if (this_bucket) {
248 			/*
249 			 * has to be in this bucket
250 			 */
251 			assert(offset >= offtp->offset);
252 			list = offtp->first;
253 			break;
254 		}
255 		/*
256 		 * loop to look in next bucket
257 		 */
258 	}
259 
260 	for (l = list; l != NULL && l->offset < offset; l = l->next)
261 		y++;
262 
263 	if (l == NULL) {
264 		/*
265 		 * reached end of directory
266 		 */
267 		error = 0;
268 		goto empty;
269 	}
270 
271 	if (trace > 2)
272 		trace_prt(1, "%s: offset searches (%d, %d)\n", rdcp->map, x, y);
273 
274 	total_bytes_wanted = res->rd_bufsize;
275 	bufsize = total_bytes_wanted + sizeof (struct dirent64);
276 	outbuf = malloc(bufsize);
277 	if (outbuf == NULL) {
278 		syslog(LOG_ERR, "memory allocation error\n");
279 		error = ENOMEM;
280 		goto empty;
281 	}
282 	memset(outbuf, 0, bufsize);
283 	/* LINTED pointer alignment */
284 	dp = (struct dirent64 *)outbuf;
285 
286 	while (l) {
287 		nl = l->next;
288 		namelen = strlen(l->name);
289 		this_reclen = DIRENT64_RECLEN(namelen);
290 		if (outcount + this_reclen > total_bytes_wanted) {
291 			break;
292 		}
293 		dp->d_ino = (ino64_t)l->nodeid;
294 		if (nl) {
295 			/*
296 			 * get the next elements offset
297 			 */
298 			dp->d_off = (off64_t)nl->offset;
299 		} else {
300 			/*
301 			 * This is the last element
302 			 * make offset one plus the current.
303 			 */
304 			dp->d_off = (off64_t)l->offset + 1;
305 		}
306 		(void) strcpy(dp->d_name, l->name);
307 		dp->d_reclen = (ushort_t)this_reclen;
308 		outcount += dp->d_reclen;
309 		dp = (struct dirent64 *)((int)dp + dp->d_reclen);
310 		assert(outcount <= total_bytes_wanted);
311 		l = l->next;
312 	}
313 
314 	res->rd_rddir.rddir_size = (long)outcount;
315 	if (outcount > 0) {
316 		/*
317 		 * have some entries
318 		 */
319 		res->rd_rddir.rddir_eof = (l == NULL);
320 		/* LINTED pointer alignment */
321 		res->rd_rddir.rddir_entries = (struct dirent64 *)outbuf;
322 		error = 0;
323 	} else {
324 		/*
325 		 * total_bytes_wanted is not large enough for one
326 		 * directory entry
327 		 */
328 		res->rd_rddir.rddir_eof = 0;
329 		res->rd_rddir.rddir_entries = NULL;
330 		free(outbuf);
331 		error = EIO;
332 	}
333 	return (error);
334 
335 empty:
336 	res->rd_rddir.rddir_size = 0L;
337 	res->rd_rddir.rddir_eof = TRUE;
338 	res->rd_rddir.rddir_entries = NULL;
339 	return (error);
340 }
341 
342 
343 /*
344  * add new entry to cache for 'map'
345  */
346 static int
autofs_rddir_cache_enter(char * map,ulong_t bucket_size,struct autofs_rddir_cache ** rdcpp)347 autofs_rddir_cache_enter(
348 	char *map,
349 	ulong_t bucket_size,
350 	struct autofs_rddir_cache **rdcpp)
351 {
352 	struct autofs_rddir_cache *p;
353 	assert(RW_LOCK_HELD(&autofs_rddir_cache_lock));
354 
355 	/*
356 	 * Add to front of the list at this time
357 	 */
358 	p = (struct autofs_rddir_cache *)malloc(sizeof (*p));
359 	if (p == NULL) {
360 		syslog(LOG_ERR,
361 			"autofs_rddir_cache_enter: memory allocation failed\n");
362 		return (ENOMEM);
363 	}
364 	memset((char *)p, 0, sizeof (*p));
365 
366 	p->map = malloc(strlen(map) + 1);
367 	if (p->map == NULL) {
368 		syslog(LOG_ERR,
369 			"autofs_rddir_cache_enter: memory allocation failed\n");
370 		free(p);
371 		return (ENOMEM);
372 	}
373 	strcpy(p->map, map);
374 
375 	p->bucket_size = bucket_size;
376 	/*
377 	 * no need to grab mutex lock since I haven't yet made the
378 	 * node visible to the list
379 	 */
380 	p->in_use = 1;
381 	(void) rwlock_init(&p->rwlock, USYNC_THREAD, NULL);
382 	(void) mutex_init(&p->lock, USYNC_THREAD, NULL);
383 
384 	if (rddir_head == NULL)
385 		rddir_head = p;
386 	else {
387 		p->next = rddir_head;
388 		rddir_head = p;
389 	}
390 	*rdcpp = p;
391 
392 	return (0);
393 }
394 
395 /*
396  * find 'map' in readdir cache
397  */
398 int
autofs_rddir_cache_lookup(char * map,struct autofs_rddir_cache ** rdcpp)399 autofs_rddir_cache_lookup(char *map, struct autofs_rddir_cache **rdcpp)
400 {
401 	struct autofs_rddir_cache *p;
402 
403 	assert(RW_LOCK_HELD(&autofs_rddir_cache_lock));
404 	for (p = rddir_head; p != NULL; p = p->next) {
405 		if (strcmp(p->map, map) == 0) {
406 			/*
407 			 * found matching entry
408 			 */
409 			*rdcpp = p;
410 			mutex_lock(&p->lock);
411 			p->in_use++;
412 			mutex_unlock(&p->lock);
413 			return (0);
414 		}
415 	}
416 	/*
417 	 * didn't find entry
418 	 */
419 	return (ENOENT);
420 }
421 
422 /*
423  * free the offset table
424  */
425 static void
free_offset_tbl(struct off_tbl * head)426 free_offset_tbl(struct off_tbl *head)
427 {
428 	struct off_tbl *p, *next = NULL;
429 
430 	for (p = head; p != NULL; p = next) {
431 		next = p->next;
432 		free(p);
433 	}
434 }
435 
436 /*
437  * free the directory entries
438  */
439 static void
free_dir_list(struct dir_entry * head)440 free_dir_list(struct dir_entry *head)
441 {
442 	struct dir_entry *p, *next = NULL;
443 
444 	for (p = head; p != NULL; p = next) {
445 		next = p->next;
446 		assert(p->name);
447 		free(p->name);
448 		free(p);
449 	}
450 }
451 
452 static void
autofs_rddir_cache_entry_free(struct autofs_rddir_cache * p)453 autofs_rddir_cache_entry_free(struct autofs_rddir_cache *p)
454 {
455 	assert(RW_LOCK_HELD(&autofs_rddir_cache_lock));
456 	assert(!p->in_use);
457 	if (p->map)
458 		free(p->map);
459 	if (p->offtp)
460 		free_offset_tbl(p->offtp);
461 	if (p->entp)
462 		free_dir_list(p->entp);
463 	free(p);
464 }
465 
466 /*
467  * Remove entry from the rddircache
468  * the caller must own the autofs_rddir_cache_lock.
469  */
470 static int
autofs_rddir_cache_delete(struct autofs_rddir_cache * rdcp)471 autofs_rddir_cache_delete(struct autofs_rddir_cache *rdcp)
472 {
473 	struct autofs_rddir_cache *p, *prev;
474 
475 	assert(RW_LOCK_HELD(&autofs_rddir_cache_lock));
476 	/*
477 	 * Search cache for entry
478 	 */
479 	prev = NULL;
480 	for (p = rddir_head; p != NULL; p = p->next) {
481 		if (p == rdcp) {
482 			/*
483 			 * entry found, remove from list if not in use
484 			 */
485 			if (p->in_use)
486 				return (EBUSY);
487 			if (prev)
488 				prev->next = p->next;
489 			else
490 				rddir_head = p->next;
491 			autofs_rddir_cache_entry_free(p);
492 			return (0);
493 		}
494 		prev = p;
495 	}
496 	syslog(LOG_ERR, "Couldn't find entry %x in cache\n", p);
497 	return (ENOENT);
498 }
499 
500 /*
501  * Return entry that matches name, NULL otherwise.
502  * Assumes the readers lock for this list has been grabed.
503  */
504 struct dir_entry *
rddir_entry_lookup(char * name,struct dir_entry * list)505 rddir_entry_lookup(char *name, struct dir_entry *list)
506 {
507 	return (btree_lookup(list, name));
508 }
509 
510 static void
build_dir_entry_list(struct autofs_rddir_cache * rdcp,struct dir_entry * list)511 build_dir_entry_list(struct autofs_rddir_cache *rdcp, struct dir_entry *list)
512 {
513 	struct dir_entry *p;
514 	ulong_t offset = AUTOFS_DAEMONCOOKIE, offset_list = AUTOFS_DAEMONCOOKIE;
515 	struct off_tbl *offtp, *last = NULL;
516 	ino_t inonum = 4;
517 
518 	assert(RW_LOCK_HELD(&rdcp->rwlock));
519 	assert(rdcp->entp == NULL);
520 	rdcp->entp = list;
521 	for (p = list; p != NULL; p = p->next) {
522 		p->nodeid = inonum;
523 		p->offset = offset;
524 		if (offset >= offset_list) {
525 			/*
526 			 * add node to index table
527 			 */
528 			offtp = (struct off_tbl *)
529 				malloc(sizeof (struct off_tbl));
530 			if (offtp != NULL) {
531 				offtp->offset = offset;
532 				offtp->first = p;
533 				offtp->next = NULL;
534 				offset_list += rdcp->bucket_size;
535 			} else {
536 				syslog(LOG_ERR,
537 "WARNING: build_dir_entry_list: could not add offset to index table\n");
538 				continue;
539 			}
540 			/*
541 			 * add to cache
542 			 */
543 			if (rdcp->offtp == NULL)
544 				rdcp->offtp = offtp;
545 			else
546 				last->next = offtp;
547 			last = offtp;
548 		}
549 		offset++;
550 		inonum += 2;		/* use even numbers in daemon */
551 	}
552 	rdcp->full = 1;
553 }
554 
555 mutex_t cleanup_lock;
556 cond_t cleanup_start_cv;
557 cond_t cleanup_done_cv;
558 
559 /*
560  * cache cleanup thread starting point
561  */
562 void
cache_cleanup(void)563 cache_cleanup(void)
564 {
565 	timestruc_t reltime;
566 	struct autofs_rddir_cache *p, *next = NULL;
567 	int error;
568 
569 	mutex_init(&cleanup_lock, USYNC_THREAD, NULL);
570 	cond_init(&cleanup_start_cv, USYNC_THREAD, NULL);
571 	cond_init(&cleanup_done_cv, USYNC_THREAD, NULL);
572 
573 	mutex_lock(&cleanup_lock);
574 	for (;;) {
575 		reltime.tv_sec = RDDIR_CACHE_TIME/2;
576 		reltime.tv_nsec = 0;
577 
578 		/*
579 		 * delay RDDIR_CACHE_TIME seconds, or until some other thread
580 		 * requests that I cleanup the caches
581 		 */
582 		if (error = cond_reltimedwait(
583 		    &cleanup_start_cv, &cleanup_lock, &reltime)) {
584 			if (error != ETIME) {
585 				if (trace > 1)
586 					trace_prt(1,
587 					"cleanup thread wakeup (%d)\n", error);
588 				continue;
589 			}
590 		}
591 		mutex_unlock(&cleanup_lock);
592 
593 		/*
594 		 * Perform the cache cleanup
595 		 */
596 		rw_wrlock(&autofs_rddir_cache_lock);
597 		for (p = rddir_head; p != NULL; p = next) {
598 			next = p->next;
599 			if (p->in_use > 0) {
600 				/*
601 				 * cache entry busy, skip it
602 				 */
603 				if (trace > 1) {
604 					trace_prt(1,
605 					"%s cache in use\n", p->map);
606 				}
607 				continue;
608 			}
609 			/*
610 			 * Cache entry is not in use, and nobody can grab a
611 			 * new reference since I'm holding the
612 			 * autofs_rddir_cache_lock
613 			 */
614 
615 			/*
616 			 * error will be zero if some thread signaled us asking
617 			 * that the caches be freed. In such case, free caches
618 			 * even if they're still valid and nobody is referencing
619 			 * them at this time. Otherwise, free caches only
620 			 * if their time to live (ttl) has expired.
621 			 */
622 			if (error == ETIME && (p->ttl > time((time_t *)NULL))) {
623 				/*
624 				 * Scheduled cache cleanup, if cache is still
625 				 * valid don't free.
626 				 */
627 				if (trace > 1) {
628 					trace_prt(1,
629 					"%s cache still valid\n", p->map);
630 				}
631 				continue;
632 			}
633 			if (trace > 1)
634 				trace_prt(1, "%s freeing cache\n", p->map);
635 			assert(!p->in_use);
636 			error = autofs_rddir_cache_delete(p);
637 			assert(!error);
638 		}
639 		rw_unlock(&autofs_rddir_cache_lock);
640 
641 		/*
642 		 * wakeup the thread/threads waiting for the
643 		 * cleanup to finish
644 		 */
645 		mutex_lock(&cleanup_lock);
646 		cond_broadcast(&cleanup_done_cv);
647 	}
648 	/* NOTREACHED */
649 }
650