xref: /titanic_41/usr/src/cmd/fs.d/autofs/autod_readdir.c (revision 70025d765b044c6d8594bb965a2247a61e991a99)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  *	autod_readdir.c
29  */
30 
31 #pragma ident	"%Z%%M%	%I%	%E% SMI"
32 
33 #include <stdio.h>
34 #include <ctype.h>
35 #include <string.h>
36 #include <syslog.h>
37 #include <sys/types.h>
38 #include <sys/param.h>
39 #include <errno.h>
40 #include <pwd.h>
41 #include <locale.h>
42 #include <stdlib.h>
43 #include <unistd.h>
44 #include <assert.h>
45 #include <fcntl.h>
46 #include "automount.h"
47 
48 static void build_dir_entry_list(struct rddir_cache *rdcp,
49 				struct dir_entry *list);
50 static int rddir_cache_enter(char *map, ulong_t bucket_size,
51 				struct rddir_cache **rdcpp);
52 int rddir_cache_lookup(char *map, struct rddir_cache **rdcpp);
53 static int rddir_cache_delete(struct rddir_cache *rdcp);
54 static int create_dirents(struct rddir_cache *rdcp, ulong_t offset,
55 				autofs_rddirres *res);
56 struct dir_entry *rddir_entry_lookup(char *name, struct dir_entry *list);
57 static void free_offset_tbl(struct off_tbl *head);
58 static void free_dir_list(struct dir_entry *head);
59 
60 #define	OFFSET_BUCKET_SIZE	100
61 
62 rwlock_t rddir_cache_lock;		/* readdir cache lock */
63 struct rddir_cache *rddir_head;		/* readdir cache head */
64 
65 int
66 do_readdir(struct autofs_rddirargs *rda, struct autofs_rddirres *rd,
67     struct authunix_parms *cred)
68 {
69 	struct dir_entry *list = NULL, *l;
70 	struct rddir_cache *rdcp = NULL;
71 	int error;
72 	int cache_time = RDDIR_CACHE_TIME;
73 
74 	if (automountd_nobrowse) {
75 		/*
76 		 * Browsability was disabled return an empty list.
77 		 */
78 		rd->rd_status = AUTOFS_OK;
79 		rd->rd_rddir.rddir_size = 0;
80 		rd->rd_rddir.rddir_eof = 1;
81 		rd->rd_rddir.rddir_entries = NULL;
82 
83 		return (0);
84 	}
85 
86 	rw_rdlock(&rddir_cache_lock);
87 	error = rddir_cache_lookup(rda->rda_map, &rdcp);
88 	if (error) {
89 		rw_unlock(&rddir_cache_lock);
90 		rw_wrlock(&rddir_cache_lock);
91 		error = rddir_cache_lookup(rda->rda_map, &rdcp);
92 		if (error) {
93 			if (trace > 2)
94 				trace_prt(1,
95 				"map %s not found, adding...\n", rda->rda_map);
96 			/*
97 			 * entry doesn't exist, add it.
98 			 */
99 			error = rddir_cache_enter(rda->rda_map,
100 					OFFSET_BUCKET_SIZE, &rdcp);
101 		}
102 	}
103 	rw_unlock(&rddir_cache_lock);
104 
105 	if (error)
106 		return (error);
107 
108 	assert(rdcp != NULL);
109 	assert(rdcp->in_use);
110 
111 	if (!rdcp->full) {
112 		rw_wrlock(&rdcp->rwlock);
113 		if (!rdcp->full) {
114 			/*
115 			 * cache entry hasn't been filled up, do it now.
116 			 */
117 			char *stack[STACKSIZ];
118 			char **stkptr;
119 
120 			/*
121 			 * Initialize the stack of open files
122 			 * for this thread
123 			 */
124 			stack_op(INIT, NULL, stack, &stkptr);
125 			(void) getmapkeys(rda->rda_map, &list, &error,
126 			    &cache_time, stack, &stkptr, cred->aup_uid);
127 			if (!error)
128 				build_dir_entry_list(rdcp, list);
129 			else if (list) {
130 				free_dir_list(list);
131 				list = NULL;
132 			}
133 		}
134 	} else
135 		rw_rdlock(&rdcp->rwlock);
136 
137 	rd->rd_bufsize = rda->rda_count;
138 	if (!error) {
139 		error = create_dirents(rdcp, rda->rda_offset, rd);
140 		if (error) {
141 			if (rdcp->offtp) {
142 				free_offset_tbl(rdcp->offtp);
143 				rdcp->offtp = NULL;
144 			}
145 			if (rdcp->entp) {
146 				free_dir_list(rdcp->entp);
147 				rdcp->entp = NULL;
148 			}
149 			rdcp->full = 0;
150 			list = NULL;
151 		}
152 	}
153 
154 	if (trace > 2) {
155 		/*
156 		 * print this list only once
157 		 */
158 		for (l = list; l != NULL; l = l->next)
159 			trace_prt(0, "%s\n", l->name);
160 		trace_prt(0, "\n");
161 	}
162 
163 	if (!error) {
164 		rd->rd_status = AUTOFS_OK;
165 		if (cache_time) {
166 			/*
167 			 * keep list of entries for up to
168 			 * 'cache_time' seconds
169 			 */
170 			rdcp->ttl = time((time_t *)NULL) + cache_time;
171 		} else {
172 			/*
173 			 * the underlying name service indicated not
174 			 * to cache contents.
175 			 */
176 			if (rdcp->offtp) {
177 				free_offset_tbl(rdcp->offtp);
178 				rdcp->offtp = NULL;
179 			}
180 			if (rdcp->entp) {
181 				free_dir_list(rdcp->entp);
182 				rdcp->entp = NULL;
183 			}
184 			rdcp->full = 0;
185 		}
186 	} else {
187 		/*
188 		 * return an empty list
189 		 */
190 		rd->rd_rddir.rddir_size = 0;
191 		rd->rd_rddir.rddir_eof = 1;
192 		rd->rd_rddir.rddir_entries = NULL;
193 
194 		/*
195 		 * Invalidate cache and set error
196 		 */
197 		switch (error) {
198 		case ENOENT:
199 			rd->rd_status = AUTOFS_NOENT;
200 			break;
201 		case ENOMEM:
202 			rd->rd_status = AUTOFS_NOMEM;
203 			break;
204 		default:
205 			rd->rd_status = AUTOFS_ECOMM;
206 		}
207 	}
208 	rw_unlock(&rdcp->rwlock);
209 
210 	mutex_lock(&rdcp->lock);
211 	rdcp->in_use--;
212 	mutex_unlock(&rdcp->lock);
213 
214 	assert(rdcp->in_use >= 0);
215 
216 	return (error);
217 }
218 
219 #define	roundtoint(x)	(((x) + sizeof (int) - 1) & ~(sizeof (int) - 1))
220 #define	DIRENT64_RECLEN(namelen)	\
221 	(((int)(((dirent64_t *)0)->d_name) + 1 + (namelen) + 7) & ~ 7)
222 
223 static int
224 create_dirents(struct rddir_cache *rdcp, ulong_t offset, autofs_rddirres *res)
225 {
226 	uint_t total_bytes_wanted;
227 	int bufsize;
228 	ushort_t this_reclen;
229 	int outcount = 0;
230 	int namelen;
231 	struct dir_entry *list = NULL, *l, *nl;
232 	struct dirent64 *dp;
233 	char *outbuf;
234 	struct off_tbl *offtp, *next = NULL;
235 	int this_bucket = 0;
236 	int error = 0;
237 	int x = 0, y = 0;
238 
239 	assert(RW_LOCK_HELD(&rdcp->rwlock));
240 	for (offtp = rdcp->offtp; offtp != NULL; offtp = next) {
241 		x++;
242 		next = offtp->next;
243 		this_bucket = (next == NULL);
244 		if (!this_bucket)
245 			this_bucket = (offset < next->offset);
246 		if (this_bucket) {
247 			/*
248 			 * has to be in this bucket
249 			 */
250 			assert(offset >= offtp->offset);
251 			list = offtp->first;
252 			break;
253 		}
254 		/*
255 		 * loop to look in next bucket
256 		 */
257 	}
258 
259 	for (l = list; l != NULL && l->offset < offset; l = l->next)
260 		y++;
261 
262 	if (l == NULL) {
263 		/*
264 		 * reached end of directory
265 		 */
266 		error = 0;
267 		goto empty;
268 	}
269 
270 	if (trace > 2)
271 		trace_prt(1, "%s: offset searches (%d, %d)\n", rdcp->map, x, y);
272 
273 	total_bytes_wanted = res->rd_bufsize;
274 	bufsize = total_bytes_wanted + sizeof (struct dirent64);
275 	outbuf = malloc(bufsize);
276 	if (outbuf == NULL) {
277 		syslog(LOG_ERR, "memory allocation error\n");
278 		error = ENOMEM;
279 		goto empty;
280 	}
281 	memset(outbuf, 0, bufsize);
282 	/* LINTED pointer alignment */
283 	dp = (struct dirent64 *)outbuf;
284 
285 	while (l) {
286 		nl = l->next;
287 		namelen = strlen(l->name);
288 		this_reclen = DIRENT64_RECLEN(namelen);
289 		if (outcount + this_reclen > total_bytes_wanted) {
290 			break;
291 		}
292 		dp->d_ino = (ino64_t)l->nodeid;
293 		if (nl) {
294 			/*
295 			 * get the next elements offset
296 			 */
297 			dp->d_off = (off64_t)nl->offset;
298 		} else {
299 			/*
300 			 * This is the last element
301 			 * make offset one plus the current.
302 			 */
303 			dp->d_off = (off64_t)l->offset + 1;
304 		}
305 		(void) strcpy(dp->d_name, l->name);
306 		dp->d_reclen = (ushort_t)this_reclen;
307 		outcount += dp->d_reclen;
308 		dp = (struct dirent64 *)((int)dp + dp->d_reclen);
309 		assert(outcount <= total_bytes_wanted);
310 		l = l->next;
311 	}
312 
313 	res->rd_rddir.rddir_size = (long)outcount;
314 	if (outcount > 0) {
315 		/*
316 		 * have some entries
317 		 */
318 		res->rd_rddir.rddir_eof = (l == NULL);
319 		/* LINTED pointer alignment */
320 		res->rd_rddir.rddir_entries = (struct dirent64 *)outbuf;
321 		error = 0;
322 	} else {
323 		/*
324 		 * total_bytes_wanted is not large enough for one
325 		 * directory entry
326 		 */
327 		res->rd_rddir.rddir_eof = 0;
328 		res->rd_rddir.rddir_entries = NULL;
329 		free(outbuf);
330 		error = EIO;
331 	}
332 	return (error);
333 
334 empty:	res->rd_rddir.rddir_size = (long)0;
335 	res->rd_rddir.rddir_eof = TRUE;
336 	res->rd_rddir.rddir_entries = NULL;
337 	return (error);
338 }
339 
340 
341 /*
342  * add new entry to cache for 'map'
343  */
344 static int
345 rddir_cache_enter(char *map, ulong_t bucket_size, struct rddir_cache **rdcpp)
346 {
347 	struct rddir_cache *p;
348 	assert(RW_LOCK_HELD(&rddir_cache_lock));
349 
350 	/*
351 	 * Add to front of the list at this time
352 	 */
353 	p = (struct rddir_cache *)malloc(sizeof (*p));
354 	if (p == NULL) {
355 		syslog(LOG_ERR,
356 			"rddir_cache_enter: memory allocation failed\n");
357 		return (ENOMEM);
358 	}
359 	memset((char *)p, 0, sizeof (*p));
360 
361 	p->map = malloc(strlen(map) + 1);
362 	if (p->map == NULL) {
363 		syslog(LOG_ERR,
364 			"rddir_cache_enter: memory allocation failed\n");
365 		free(p);
366 		return (ENOMEM);
367 	}
368 	strcpy(p->map, map);
369 
370 	p->bucket_size = bucket_size;
371 	/*
372 	 * no need to grab mutex lock since I haven't yet made the
373 	 * node visible to the list
374 	 */
375 	p->in_use = 1;
376 	(void) rwlock_init(&p->rwlock, USYNC_THREAD, NULL);
377 	(void) mutex_init(&p->lock, USYNC_THREAD, NULL);
378 
379 	if (rddir_head == NULL)
380 		rddir_head = p;
381 	else {
382 		p->next = rddir_head;
383 		rddir_head = p;
384 	}
385 	*rdcpp = p;
386 
387 	return (0);
388 }
389 
390 /*
391  * find 'map' in readdir cache
392  */
393 int
394 rddir_cache_lookup(char *map, struct rddir_cache **rdcpp)
395 {
396 	struct rddir_cache *p;
397 
398 	assert(RW_LOCK_HELD(&rddir_cache_lock));
399 	for (p = rddir_head; p != NULL; p = p->next) {
400 		if (strcmp(p->map, map) == 0) {
401 			/*
402 			 * found matching entry
403 			 */
404 			*rdcpp = p;
405 			mutex_lock(&p->lock);
406 			p->in_use++;
407 			mutex_unlock(&p->lock);
408 			return (0);
409 		}
410 	}
411 	/*
412 	 * didn't find entry
413 	 */
414 	return (ENOENT);
415 }
416 
417 /*
418  * free the offset table
419  */
420 static void
421 free_offset_tbl(struct off_tbl *head)
422 {
423 	struct off_tbl *p, *next = NULL;
424 
425 	for (p = head; p != NULL; p = next) {
426 		next = p->next;
427 		free(p);
428 	}
429 }
430 
431 /*
432  * free the directory entries
433  */
434 static void
435 free_dir_list(struct dir_entry *head)
436 {
437 	struct dir_entry *p, *next = NULL;
438 
439 	for (p = head; p != NULL; p = next) {
440 		next = p->next;
441 		assert(p->name);
442 		free(p->name);
443 		free(p);
444 	}
445 }
446 
447 static void
448 rddir_cache_entry_free(struct rddir_cache *p)
449 {
450 	assert(RW_LOCK_HELD(&rddir_cache_lock));
451 	assert(!p->in_use);
452 	if (p->map)
453 		free(p->map);
454 	if (p->offtp)
455 		free_offset_tbl(p->offtp);
456 	if (p->entp)
457 		free_dir_list(p->entp);
458 	free(p);
459 }
460 
461 /*
462  * Remove entry from the rddircache
463  * the caller must own the rddir_cache_lock.
464  */
465 static int
466 rddir_cache_delete(struct rddir_cache *rdcp)
467 {
468 	struct rddir_cache *p, *prev;
469 
470 	assert(RW_LOCK_HELD(&rddir_cache_lock));
471 	/*
472 	 * Search cache for entry
473 	 */
474 	prev = NULL;
475 	for (p = rddir_head; p != NULL; p = p->next) {
476 		if (p == rdcp) {
477 			/*
478 			 * entry found, remove from list if not in use
479 			 */
480 			if (p->in_use)
481 				return (EBUSY);
482 			if (prev)
483 				prev->next = p->next;
484 			else
485 				rddir_head = p->next;
486 			rddir_cache_entry_free(p);
487 			return (0);
488 		}
489 		prev = p;
490 	}
491 	syslog(LOG_ERR, "Couldn't find entry %x in cache\n", p);
492 	return (ENOENT);
493 }
494 
495 /*
496  * Return entry that matches name, NULL otherwise.
497  * Assumes the readers lock for this list has been grabed.
498  */
499 struct dir_entry *
500 rddir_entry_lookup(char *name, struct dir_entry *list)
501 {
502 	return (btree_lookup(list, name));
503 }
504 
505 static void
506 build_dir_entry_list(struct rddir_cache *rdcp, struct dir_entry *list)
507 {
508 	struct dir_entry *p;
509 	ulong_t offset = AUTOFS_DAEMONCOOKIE, offset_list = AUTOFS_DAEMONCOOKIE;
510 	struct off_tbl *offtp, *last = NULL;
511 	ino_t inonum = 4;
512 
513 	assert(RW_LOCK_HELD(&rdcp->rwlock));
514 	assert(rdcp->entp == NULL);
515 	rdcp->entp = list;
516 	for (p = list; p != NULL; p = p->next) {
517 		p->nodeid = inonum;
518 		p->offset = offset;
519 		if (offset >= offset_list) {
520 			/*
521 			 * add node to index table
522 			 */
523 			offtp = (struct off_tbl *)
524 				malloc(sizeof (struct off_tbl));
525 			if (offtp != NULL) {
526 				offtp->offset = offset;
527 				offtp->first = p;
528 				offtp->next = NULL;
529 				offset_list += rdcp->bucket_size;
530 			} else {
531 				syslog(LOG_ERR,
532 "WARNING: build_dir_entry_list: could not add offset to index table\n");
533 				continue;
534 			}
535 			/*
536 			 * add to cache
537 			 */
538 			if (rdcp->offtp == NULL)
539 				rdcp->offtp = offtp;
540 			else
541 				last->next = offtp;
542 			last = offtp;
543 		}
544 		offset++;
545 		inonum += 2;		/* use even numbers in daemon */
546 	}
547 	rdcp->full = 1;
548 }
549 
550 mutex_t cleanup_lock;
551 cond_t cleanup_start_cv;
552 cond_t cleanup_done_cv;
553 
554 /*
555  * cache cleanup thread starting point
556  */
557 void
558 cache_cleanup(void)
559 {
560 	timestruc_t reltime;
561 	struct rddir_cache *p, *next = NULL;
562 	int error;
563 
564 	mutex_init(&cleanup_lock, USYNC_THREAD, NULL);
565 	cond_init(&cleanup_start_cv, USYNC_THREAD, NULL);
566 	cond_init(&cleanup_done_cv, USYNC_THREAD, NULL);
567 
568 	mutex_lock(&cleanup_lock);
569 	for (;;) {
570 		reltime.tv_sec = RDDIR_CACHE_TIME/2;
571 		reltime.tv_nsec = 0;
572 
573 		/*
574 		 * delay RDDIR_CACHE_TIME seconds, or until some other thread
575 		 * requests that I cleanup the caches
576 		 */
577 		if (error = cond_reltimedwait(
578 		    &cleanup_start_cv, &cleanup_lock, &reltime)) {
579 			if (error != ETIME) {
580 				if (trace > 1)
581 					trace_prt(1,
582 					"cleanup thread wakeup (%d)\n", error);
583 				continue;
584 			}
585 		}
586 		mutex_unlock(&cleanup_lock);
587 
588 		/*
589 		 * Perform the cache cleanup
590 		 */
591 		rw_wrlock(&rddir_cache_lock);
592 		for (p = rddir_head; p != NULL; p = next) {
593 			next = p->next;
594 			if (p->in_use > 0) {
595 				/*
596 				 * cache entry busy, skip it
597 				 */
598 				if (trace > 1) {
599 					trace_prt(1,
600 					"%s cache in use\n", p->map);
601 				}
602 				continue;
603 			}
604 			/*
605 			 * Cache entry is not in use, and nobody can grab a
606 			 * new reference since I'm holding the rddir_cache_lock
607 			 */
608 
609 			/*
610 			 * error will be zero if some thread signaled us asking
611 			 * that the caches be freed. In such case, free caches
612 			 * even if they're still valid and nobody is referencing
613 			 * them at this time. Otherwise, free caches only
614 			 * if their time to live (ttl) has expired.
615 			 */
616 			if (error == ETIME && (p->ttl > time((time_t *)NULL))) {
617 				/*
618 				 * Scheduled cache cleanup, if cache is still
619 				 * valid don't free.
620 				 */
621 				if (trace > 1) {
622 					trace_prt(1,
623 					"%s cache still valid\n", p->map);
624 				}
625 				continue;
626 			}
627 			if (trace > 1)
628 				trace_prt(1, "%s freeing cache\n", p->map);
629 			assert(!p->in_use);
630 			error = rddir_cache_delete(p);
631 			assert(!error);
632 		}
633 		rw_unlock(&rddir_cache_lock);
634 
635 		/*
636 		 * wakeup the thread/threads waiting for the
637 		 * cleanup to finish
638 		 */
639 		mutex_lock(&cleanup_lock);
640 		cond_broadcast(&cleanup_done_cv);
641 	}
642 	/* NOTREACHED */
643 }
644