xref: /titanic_41/usr/src/cmd/avs/dscfglockd/dscfglockd.c (revision 80e2ca8596e3435bc3b76f3c597833ea0a87f85e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <signal.h>
27 #include <sys/types.h>
28 #include <sys/time.h>
29 #include <sys/socket.h>
30 #include <netinet/in.h>
31 #include <netinet/tcp.h>
32 #include <arpa/inet.h>
33 #include <netdb.h>
34 #include <fcntl.h>
35 #include <string.h>
36 #include <memory.h>
37 #include <sys/param.h>
38 #include <sys/pathconf.h>
39 #include <netdir.h>
40 #include <netconfig.h>
41 #include <sys/sockio.h>
42 #include <net/if.h>
43 #include <sys/resource.h>
44 #include <stdio.h>
45 #include <errno.h>
46 #include <assert.h>
47 #include <locale.h>
48 #include <unistd.h>
49 #include <stdlib.h>
50 #include <string.h>
51 #include <strings.h>
52 #include <sys/unistat/spcs_s.h>
53 #include <sys/unistat/spcs_s_u.h>
54 #include <sys/unistat/spcs_errors.h>
55 
56 #include <sys/nsctl/cfg.h>
57 #include <sys/nsctl/cfg_lockd.h>
58 
59 #ifdef DEBUG
60 #define	DPF(m)		if (debug) (void) fprintf m
61 #else
62 #define	DPF(m)
63 #endif
64 
65 #ifdef	TTY_MESSAGES
66 #define	CLOSE_FD	3
67 #else
68 #define	CLOSE_FD	0
69 #endif
70 
71 #define	MAX_LOCKQ	1024
72 #define	MAX_DAEMONS	1024
73 #define	MAX_LOCAL	1024
74 #define	MAX_UNLOCK	32
75 #define	MAX_TIMEOUTS	3
76 #define	TIMEOUT_SECS	5
77 
78 static char program[] = "dscfglockd";
79 static int debug;
80 static int lstate;
81 static int msgtrace;
82 static FILE *debugfile = NULL;
83 
84 struct lock_req {
85 	cfglockd_t	type;	/* read or write */
86 	pid_t	pid;		/* pid of read locker or local writer */
87 	daemonaddr_t	remote;	/* remote machine requesting write lock */
88 	int		state;	/* for write locks */
89 	int32_t		order;	/* who gets priority? */
90 } lock_queue[MAX_LOCKQ];
91 
92 struct unlock_s {
93 	pid_t	pid;		/* pid of locker */
94 	uint8_t seq;		/* seq number of last lock request */
95 } unlock_buf[MAX_UNLOCK];
96 
97 int next_req;
98 int32_t order;
99 
100 #define	lock_wanted	lock_queue[0]
101 long	ticker	= 1l;
102 
103 #define	ALIVE		0x10
104 #define	READ_LOCK	0x11
105 #define	WRITE_LOCK	0x12
106 #define	UNLOCK		0x13
107 #define	GRANTED		0x14
108 
109 int next_q;
110 
111 struct {
112 	cfglockd_t	type;
113 	int		nholders;
114 	int		state;
115 	daemonaddr_t	holder;
116 	struct lockdaemon	*remote_daemon;
117 	pid_t		holding_pid[MAX_LOCAL];
118 } the_lock;
119 
120 daemonaddr_t	thishost;
121 daemonaddr_t	localhost;
122 
123 #define	STATE_CLEAR	0
124 #define	STATE_ASKED	1
125 #define	STATE_OKAYED	2
126 #define	STATE_WANTS	3
127 #define	lockdaemon_dead(ldp)	((ticker - (ldp)->timeout) > MAX_TIMEOUTS)
128 #define	CRIT_BEGIN()	(void) sighold(SIGALRM)
129 #define	CRIT_END()	(void) sigrelse(SIGALRM)
130 
131 #define	NORMAL_UNLOCK	0
132 #define	FORCE_UNLOCK	1
133 
134 struct lockdaemon {
135 	daemonaddr_t	host;
136 	int	up;
137 	long	timeout;
138 	int	inuse;
139 	int	state;
140 	int32_t	order;
141 } daemon_list[MAX_DAEMONS];
142 
143 unsigned short	lock_port = CFG_SERVER_PORT;
144 int	lock_soc = 0;
145 int	pf_inet = PF_INET;
146 #define	dp_addr(p)	inet_ntoa(((struct sockaddr_in *)p)->sin_addr)
147 
148 #define	MAXIFS 32
149 
150 static char *
151 lockd_type(cfglockd_t type)
152 {
153 	switch (type) {
154 	case LOCK_NOTLOCKED:	return "NotLocked";
155 	case LOCK_READ:		return "Read";
156 	case LOCK_WRITE:	return "Write";
157 	case LOCK_LOCKED:	return "Locked";
158 	case LOCK_LOCKEDBY:	return "LockedBy";
159 	case LOCK_STAT:		return "Stat";
160 	case LOCK_ACK:		return "Ack";
161 	default:		return "*unknown*";
162 	}
163 }
164 
165 static char *
166 lockd_state(int state)
167 {
168 	switch (state) {
169 	case STATE_CLEAR:	return "Clear";
170 	case STATE_ASKED:	return "Asked";
171 	case STATE_OKAYED:	return "Okayed";
172 	case STATE_WANTS:	return "Wants";
173 	default:		return "*unknown*";
174 	}
175 }
176 
177 static char *
178 lockd_msg(int message)
179 {
180 	switch (message) {
181 	case ALIVE:		return "Alive";
182 	case READ_LOCK:		return "ReadLock";
183 	case WRITE_LOCK:	return "WriteLock";
184 	case UNLOCK:		return "Unlock";
185 	case GRANTED:		return "Granted";
186 	default:		return lockd_type((cfglockd_t)message);
187 	}
188 }
189 
190 /*
191  * The following is stolen from autod_nfs.c
192  */
193 static void
194 getmyaddrs(struct ifconf *ifc)
195 {
196 	int sock;
197 	int numifs;
198 	char *buf;
199 	int family;
200 
201 	ifc->ifc_buf = NULL;
202 	ifc->ifc_len = 0;
203 
204 #ifdef AF_INET6
205 	family = AF_INET6;
206 #else
207 	family = AF_INET;
208 #endif
209 	if ((sock = socket(family, SOCK_DGRAM, 0)) < 0) {
210 #ifdef DEBUG
211 		perror("getmyaddrs(): socket");
212 #endif
213 		return;
214 	}
215 
216 	if (ioctl(sock, SIOCGIFNUM, (char *)&numifs) < 0) {
217 #ifdef DEBUG
218 		perror("getmyaddrs(): SIOCGIFNUM");
219 #endif
220 		numifs = MAXIFS;
221 	}
222 
223 	buf = (char *)malloc(numifs * sizeof (struct ifreq));
224 	if (buf == NULL) {
225 #ifdef DEBUG
226 		(void) fprintf(stderr, "getmyaddrs(): malloc failed\n");
227 #endif
228 		(void) close(sock);
229 		return;
230 	}
231 
232 	ifc->ifc_buf = buf;
233 	ifc->ifc_len = numifs * sizeof (struct ifreq);
234 
235 	if (ioctl(sock, SIOCGIFCONF, (char *)ifc) < 0) {
236 #ifdef DEBUG
237 		perror("getmyaddrs(): SIOCGIFCONF");
238 #endif
239 	}
240 
241 	(void) close(sock);
242 }
243 
244 struct ifconf *ifc;
245 
246 static int
247 cmp_addr(daemonaddr_t *a, daemonaddr_t *b)
248 {
249 	int rc;
250 	rc = memcmp(&(a->sin_addr), &(b->sin_addr), sizeof (a->sin_addr));
251 	DPF((stderr, "compare %s %hu with", dp_addr(a), a->sin_port));
252 	DPF((stderr, " %s %hu = %d\n", dp_addr(b), b->sin_port, rc));
253 	return (rc);
254 }
255 
256 static int
257 addr_is_holder(int32_t order)
258 {
259 	return ((the_lock.nholders > 0) && the_lock.remote_daemon != NULL &&
260 	    (order == the_lock.remote_daemon->order));
261 }
262 
263 static int
264 islocalhost(daemonaddr_t *host)
265 {
266 	int n;
267 	struct sockaddr_in *s1, *s2;
268 	struct ifreq *ifr;
269 	int retval = 0;
270 
271 	ifr = ifc->ifc_req;
272 	n = ifc->ifc_len / sizeof (struct ifreq);
273 	s1 = host;
274 	s2 = NULL;
275 	for (; n > 0; n--, ifr++) {
276 		if (ifr->ifr_addr.sa_family != AF_INET)
277 			continue;
278 
279 		/* LINTED pointer alignment */
280 		s2 = (struct sockaddr_in *)&ifr->ifr_addr;
281 
282 		if (memcmp((char *)&s2->sin_addr,
283 		    (char *)&s1->sin_addr, sizeof (s1->sin_addr)) == 0) {
284 			retval = 1;
285 			/* it's me */
286 			break;
287 		}
288 	}
289 	return (retval);
290 }
291 
292 static void
293 send_lockmsg(int cmd, pid_t pid, daemonaddr_t *dp, uint8_t seq)
294 {
295 	struct lock_msg message_buf;
296 	int rc;
297 
298 	if (msgtrace && debugfile) {
299 		time_t t = time(0);
300 		(void) fprintf(debugfile, "%19.19s send %-9.9s to   %s\n",
301 		    ctime(&t), lockd_msg(cmd), dp_addr(dp));
302 	}
303 	DPF((stderr, "send %d to %s port %hu\n", cmd,
304 	    dp_addr(dp), dp->sin_port));
305 	message_buf.message = cmd;
306 	message_buf.pid = pid;
307 	message_buf.order = order;
308 	message_buf.seq = seq;
309 	do {
310 		rc = sendto(lock_soc, &message_buf, sizeof (message_buf), 0,
311 		    (struct sockaddr *)dp, sizeof (struct sockaddr));
312 	} while (rc == -1 && errno == EINTR);
313 	if (rc == -1)
314 		spcs_log("cfglockd", NULL, "sendto rc -1 errno %d", errno);
315 }
316 
317 /*
318  * send an alive message to all configured daemons so that they can tell
319  * us if they are holding a write lock.
320  */
321 
322 static void
323 send_aliveall()
324 {
325 	struct lockdaemon *ldp;
326 	int i;
327 	for (i = 0, ldp = daemon_list; i < MAX_DAEMONS; i++, ldp++) {
328 		if (ldp->inuse == 0)
329 			break;
330 		send_lockmsg(ALIVE, (pid_t)0, &(ldp->host), 0);
331 	}
332 }
333 
334 /* find the lock daemon structure for a give daemon address */
335 
336 static struct lockdaemon *
337 find_lockdaemon(daemonaddr_t *d)
338 {
339 	struct lockdaemon *ldp;
340 	int i;
341 	for (i = 0, ldp = daemon_list; i < MAX_DAEMONS; i++, ldp++) {
342 		if (ldp->inuse == 0)
343 			break;
344 		if (cmp_addr(&(ldp->host), d) == 0)
345 			return (ldp);
346 	}
347 	return (NULL);
348 }
349 
350 /*
351  * a messge has been received from daemon, note this and if the daemon
352  * was previously dead  and we have the write lock tell it that we do.
353  */
354 
355 static void
356 daemon_alive(daemonaddr_t *daemon, int32_t order)
357 {
358 	struct lockdaemon *ldp;
359 	int i;
360 
361 	for (i = 0, ldp = daemon_list; i < MAX_DAEMONS; i++, ldp++) {
362 		if (ldp->inuse == 0)
363 			break;
364 		if (cmp_addr(&(ldp->host), daemon) == 0) {
365 			ldp->order = order;
366 			ldp->timeout = ticker;
367 			if (ldp->up == 0) {
368 				spcs_log("cfglockd", NULL,
369 				    "daemon restarted on %s\n",
370 				    dp_addr(daemon));
371 				DPF((stderr, "daemon restarted on %s\n",
372 				    dp_addr(daemon)));
373 				ldp->up = 1;
374 				goto come_up;
375 			}
376 			return;
377 		}
378 	}
379 	/* new daemon has announced itself */
380 	if (i < MAX_DAEMONS) {
381 		DPF((stderr, "new daemon on %s\n", dp_addr(daemon)));
382 		spcs_log("cfglockd", NULL,
383 		    "new daemon on %s\n", dp_addr(daemon));
384 		ldp->host = *daemon;
385 		ldp->inuse = 1;
386 		ldp->timeout = ticker;
387 		ldp->order = order;
388 	} else {
389 		/* problem, more daemons than expected */
390 		i++;
391 	}
392 come_up:
393 	if (the_lock.type == LOCK_WRITE && the_lock.remote_daemon == NULL)
394 		send_lockmsg(WRITE_LOCK, (pid_t)0, daemon, 0);
395 }
396 
397 static void
398 delete_queue_entry(struct  lock_req *req)
399 {
400 	int i;
401 
402 	for (i = (req - lock_queue); i++ < next_req; req++)
403 		*req = *(req+1);
404 	next_req--;
405 }
406 
407 static void
408 take_lock(int ackmessage)
409 {
410 	send_lockmsg(ackmessage, (pid_t)0, &lock_wanted.remote, 0);
411 	delete_queue_entry(lock_queue);
412 }
413 
414 static void
415 check_for_write_lock()
416 {
417 	struct lockdaemon *ldp;
418 	int i;
419 	int	wait = 0;
420 
421 	DPF((stderr, "check for lock\n"));
422 	if (lock_wanted.state != STATE_ASKED)
423 		return;
424 	for (i = 0, ldp = daemon_list; i < MAX_DAEMONS; i++, ldp++) {
425 		if (ldp->inuse == 0)
426 			break;
427 		if (ldp->up && ldp->state != STATE_OKAYED) {
428 			wait = 1;
429 			break;
430 		}
431 	}
432 	if (wait == 0 && lock_wanted.type == LOCK_WRITE) {
433 		the_lock.type = LOCK_WRITE;
434 		the_lock.holding_pid[0] = lock_wanted.pid;
435 		the_lock.nholders = 1;
436 		the_lock.state = STATE_CLEAR;
437 		take_lock(LOCK_LOCKED);
438 	}
439 }
440 
441 static void
442 lock_granted(daemonaddr_t *da)
443 {
444 	struct lockdaemon *ldp;
445 
446 	if ((ldp = find_lockdaemon(da)) != NULL) {
447 		/* if we already own the lock, throw the msg away */
448 		if (the_lock.remote_daemon == NULL &&
449 		    the_lock.type == LOCK_WRITE) {
450 			return;
451 		}
452 
453 		/*
454 		 * If the current lock isn't a write lock and we're not
455 		 * asking for one
456 		 * -OR-
457 		 * The current lock is a write lock and it's not owned by us
458 		 * -THEN-
459 		 * send back an unlocked message.
460 		 */
461 		if ((the_lock.type != LOCK_WRITE &&
462 		    the_lock.state != STATE_ASKED) ||
463 		    (the_lock.type == LOCK_WRITE &&
464 		    the_lock.remote_daemon != NULL)) {
465 			send_lockmsg(UNLOCK, (pid_t)0, &(ldp->host), 0);
466 			return;
467 		}
468 		ldp->state = STATE_OKAYED;
469 	}
470 	check_for_write_lock();
471 }
472 
473 static int
474 try_lock()
475 {
476 	struct lockdaemon *ldp;
477 	int i;
478 
479 	switch (the_lock.type) {
480 	case LOCK_READ:
481 		if (lock_wanted.type == LOCK_READ) {
482 			i = the_lock.nholders++;
483 			the_lock.holding_pid[i] = lock_wanted.pid;
484 			the_lock.state = STATE_CLEAR;
485 			DPF((stderr, "increment read lockers to %d\n",
486 			    the_lock.nholders));
487 			take_lock(LOCK_LOCKED);
488 			break;
489 		}
490 		/* write lock has to wait */
491 		break;
492 	case LOCK_WRITE:
493 		/* lock has to wait until write lock is cleared */
494 		break;
495 	case LOCK_NOTLOCKED:
496 		if (lock_wanted.type == LOCK_READ) {
497 			DPF((stderr, "local locker, 1 lock holder\n"));
498 			the_lock.holding_pid[0] = lock_wanted.pid;
499 			the_lock.nholders = 1;
500 			the_lock.type = LOCK_READ;
501 			the_lock.state = STATE_CLEAR;
502 			the_lock.remote_daemon = NULL;
503 			take_lock(LOCK_LOCKED);
504 			return (1);
505 		}
506 		if (islocalhost(&lock_wanted.remote)) {
507 			DPF((stderr, "local locker, take write lock\n"));
508 			/* tell everyone I'm locking */
509 			if (lock_wanted.state != STATE_ASKED) {
510 				for (i = 0, ldp = daemon_list; i < MAX_DAEMONS;
511 				    i++, ldp++) {
512 					if (ldp->inuse == 0)
513 						break;
514 					ldp->state = STATE_ASKED;
515 					send_lockmsg(WRITE_LOCK, (pid_t)0,
516 					    &(ldp->host), 0);
517 				}
518 			}
519 			lock_wanted.state = STATE_ASKED;
520 			check_for_write_lock();
521 			the_lock.remote_daemon = NULL;
522 			the_lock.state = STATE_ASKED;
523 			return (0);
524 		} else {
525 			DPF((stderr, "remote locker, take write lock\n"));
526 			the_lock.type = LOCK_WRITE;
527 			the_lock.holder = lock_wanted.remote;
528 			the_lock.nholders = 1;
529 			the_lock.remote_daemon =
530 			    find_lockdaemon(&the_lock.holder);
531 			the_lock.state = STATE_CLEAR;
532 			/* okay to remote */
533 			take_lock(GRANTED);
534 		}
535 		break;
536 	default:
537 		DPF((stderr, "weird lock type held - %d\n", the_lock.type));
538 		the_lock.type = LOCK_NOTLOCKED;
539 		break;
540 	}
541 	return (0);
542 }
543 
544 static void
545 process_queue()
546 {
547 	if (next_req < 1)
548 		return;		/* no locks queued */
549 	while (try_lock())
550 		;
551 }
552 
553 static int
554 lock_sort(const void *a, const void *b)
555 {
556 	struct lock_req *left = (struct lock_req *)a;
557 	struct lock_req *right = (struct lock_req *)b;
558 
559 	return (left->order - right->order);
560 }
561 
562 static void
563 queue_lock(cfglockd_t type, struct lock_msg *msg, daemonaddr_t *addr)
564 {
565 	int	i;
566 	struct lock_req *lrp;
567 	struct lockdaemon *ldp;
568 
569 	/* first check if new lock matches current lock */
570 	if (the_lock.type == type && addr_is_holder(msg->order)) {
571 		/* remote daemon missed locked message */
572 		send_lockmsg(GRANTED, (pid_t)0, addr, msg->seq);
573 		return;
574 	}
575 
576 	/* next search queue to check for duplicate */
577 	for (i = 0, lrp = lock_queue; i++ < next_req; lrp++) {
578 		if (lrp->type == type && lrp->pid == msg->pid &&
579 		    cmp_addr(addr, &(lrp->remote)) == 0)
580 			return;
581 
582 	}
583 
584 	/*
585 	 * It's a new lock request.  Are we in the middle of
586 	 * obtaining one for ourselves?
587 	 */
588 
589 	if (the_lock.type == LOCK_NOTLOCKED && the_lock.state == STATE_ASKED) {
590 		/* did a higher priority request just come in? */
591 		if (msg->order < order) {
592 			/* requeue our request */
593 			the_lock.state = STATE_CLEAR;
594 			lock_wanted.state = STATE_CLEAR;
595 
596 			/* let the other lockds know */
597 			for (i = 0, ldp = daemon_list; i < MAX_DAEMONS;
598 			    i++, ldp++) {
599 				if (ldp->inuse == 0)
600 					break;
601 				if (ldp->up && ldp->state == STATE_OKAYED) {
602 					send_lockmsg(UNLOCK, (pid_t)0,
603 					    &(ldp->host), 0);
604 				}
605 			}
606 		}
607 	}
608 
609 
610 	lrp = lock_queue;
611 	lrp += (next_req++);
612 	lrp->type = type;
613 	lrp->pid = msg->pid;
614 	lrp->state = STATE_CLEAR;
615 	lrp->order = msg->order;
616 	if (addr) {
617 		lrp->remote = *addr;
618 	}
619 
620 	if (next_req > 1)
621 		qsort(lock_queue, next_req, sizeof (lock_queue[0]), lock_sort);
622 
623 	if (the_lock.type != LOCK_WRITE)
624 		process_queue();
625 }
626 
627 static void
628 lock_stat()
629 {
630 	char *lt = "Unknown";
631 	struct lockdaemon *ldp;
632 	int i;
633 
634 	spcs_log("cfglockd", NULL,
635 	    "%s, Lock daemon built %s **********", program, __DATE__);
636 	switch (the_lock.type) {
637 	case LOCK_NOTLOCKED:
638 		lt = "not locked";
639 		break;
640 	case LOCK_READ:
641 		lt = "read locked";
642 		break;
643 	case LOCK_WRITE:
644 		lt = "write locked";
645 		break;
646 	}
647 	spcs_log("cfglockd", NULL, "Lock is %s (%d)", lt, the_lock.type);
648 	spcs_log("cfglockd", NULL, "There are %d holders of the lock",
649 	    the_lock.nholders);
650 	if (the_lock.nholders > 0) {
651 		for (i = 0; i < the_lock.nholders; i++)
652 			spcs_log("cfglockd", NULL, "holding_pid[%d] = %6d", i,
653 			    the_lock.holding_pid[i]);
654 	}
655 	spcs_log("cfglockd", NULL, "holder daemon was %s port %hu, remote %x",
656 	    dp_addr(&the_lock.holder), the_lock.holder.sin_port,
657 	    the_lock.remote_daemon);
658 	spcs_log("cfglockd", NULL, "Lock queue, %d requests", next_req);
659 	for (i = 0; i < next_req; i++) {
660 		spcs_log("cfglockd", NULL, "request %d type %d order %d", i,
661 		    lock_queue[i].type, lock_queue[i].order);
662 		spcs_log("cfglockd", NULL, "  client %s port %hu, pid %d",
663 		    dp_addr(&lock_queue[i].remote),
664 		    lock_queue[i].remote.sin_port, lock_queue[i].pid);
665 	}
666 	spcs_log("cfglockd", NULL, "Daemon list");
667 
668 	for (i = 0, ldp = daemon_list; i < MAX_DAEMONS; i++, ldp++) {
669 		if (ldp->inuse == 0)
670 			break;
671 		spcs_log("cfglockd", NULL, "daemon %d, %s port %hu", i,
672 		    dp_addr(&ldp->host), ldp->host.sin_port);
673 		spcs_log("cfglockd", NULL,
674 		    "  up %d timeout %ld missed %d state %d\n", ldp->up,
675 		    ldp->timeout, ticker - ldp->timeout, ldp->state);
676 	}
677 }
678 
679 static int
680 is_duplicate(cfglockd_t type, pid_t pid, uint8_t seq)
681 {
682 	struct unlock_s *bufp;
683 	int i;
684 
685 	if (!pid) {
686 		return (0);
687 	}
688 
689 	for (i = 0, bufp = unlock_buf; bufp->pid && i < MAX_UNLOCK;
690 	    i++, bufp++) {
691 		if (bufp->pid == pid && bufp->seq == seq) {
692 			/* throw message away */
693 #ifdef DEBUG
694 			spcs_log("cfglockd", NULL,
695 			    "duplicate '%d' request received from %d",
696 			    type, pid);
697 #endif
698 			return (1);
699 		}
700 	}
701 
702 	/* add it to the list */
703 	bcopy(unlock_buf, &unlock_buf[ 1 ],
704 	    sizeof (unlock_buf) - sizeof (struct unlock_s));
705 	(*unlock_buf).pid = pid;
706 	(*unlock_buf).seq = seq;
707 
708 	return (0);
709 }
710 
711 static void
712 local_lock(cfglockd_t type, struct lock_msg *msg, daemonaddr_t *client)
713 {
714 	if (is_duplicate(type, msg->pid, msg->seq)) {
715 		if (the_lock.remote_daemon == NULL &&
716 		    (the_lock.type == LOCK_WRITE ||
717 		    the_lock.type == LOCK_READ) &&
718 		    the_lock.holding_pid[0] == msg->pid) {
719 			send_lockmsg(LOCK_LOCKED, (pid_t)0, client, msg->seq);
720 		}
721 	} else {
722 		queue_lock(type, msg, client);
723 	}
724 }
725 
726 static void
727 remote_lock(struct sockaddr_in *remote, struct lock_msg *msg)
728 {
729 	/* make sure remote knows we are alive */
730 	send_lockmsg(ALIVE, (pid_t)0, remote, 0);
731 
732 	/* clear out pid as it is meaningless on this node */
733 	msg->pid = (pid_t)0;
734 
735 	queue_lock(LOCK_WRITE, msg, (daemonaddr_t *)remote);
736 }
737 
738 static void
739 unqueue_lock(daemonaddr_t *d, pid_t pid)
740 {
741 	int	i;
742 	struct lock_req *lrp, *xrp;
743 	int diff;
744 
745 	/* search queue to delete ungranted locks */
746 	for (i = 0, xrp = lrp = lock_queue; i++ < next_req; lrp++) {
747 		*xrp = *lrp;
748 		diff = 0;
749 		if (pid != (pid_t)0 && lrp->pid != pid)
750 			diff = 1;
751 		if (d != NULL && cmp_addr(d, &(lrp->remote)) != 0)
752 			diff = 1;
753 		if (!diff)
754 			continue;
755 
756 		xrp++;
757 	}
758 	next_req = xrp - lock_queue;
759 }
760 
761 static void
762 xxunlock()
763 {
764 	DPF((stderr, "** UNLOCK **\n"));
765 	the_lock.remote_daemon = NULL;
766 	the_lock.type = LOCK_NOTLOCKED;
767 	the_lock.nholders = 0;
768 	the_lock.state = STATE_CLEAR;
769 	process_queue();
770 }
771 
772 
773 static void
774 local_unlock(pid_t pid, uint8_t seq, int method)
775 {
776 	struct lockdaemon *ldp;
777 	int i;
778 
779 	if (method == NORMAL_UNLOCK && is_duplicate(LOCK_NOTLOCKED, pid, seq)) {
780 		return;
781 	}
782 
783 	if (the_lock.type == LOCK_READ) {
784 		/* delete reference to pid of reading process */
785 		for (i = 0; i < the_lock.nholders; i++) {
786 			if (the_lock.holding_pid[i] == pid) {
787 				DPF((stderr, "decrement lockers from %d\n",
788 				    the_lock.nholders));
789 				--the_lock.nholders;
790 				break;
791 			}
792 		}
793 		for (; i < the_lock.nholders; i++) {
794 			the_lock.holding_pid[i] = the_lock.holding_pid[i+1];
795 		}
796 		if (the_lock.nholders > 0)
797 			return;
798 	} else {
799 		/* LOCK_WRITE */
800 		if (pid != the_lock.holding_pid[0])
801 			return;
802 		the_lock.holding_pid[0] = (pid_t)0;
803 		for (i = 0, ldp = daemon_list; i < MAX_DAEMONS; i++, ldp++) {
804 			if (ldp->inuse == 0)
805 				break;
806 			if (ldp->up)
807 				send_lockmsg(UNLOCK, (pid_t)0, &(ldp->host), 0);
808 		}
809 	}
810 	xxunlock();
811 }
812 
813 static void
814 remote_unlock(int32_t order, daemonaddr_t *d)
815 {
816 	int	i;
817 	struct lock_req *lrp;
818 
819 	DPF((stderr, "remote unlock from %s ", dp_addr(d)));
820 	DPF((stderr, "when %s holds lock\n", dp_addr(&the_lock.holder)));
821 
822 	/* search queue to check for ungranted lock */
823 	for (i = 0, lrp = lock_queue; i++ < next_req; lrp++) {
824 		if (lrp->type == LOCK_WRITE &&
825 		    cmp_addr(d, &(lrp->remote)) == 0) {
826 			delete_queue_entry(lrp);
827 			return;
828 		}
829 
830 	}
831 	if (addr_is_holder(order)) {
832 		xxunlock();
833 	}
834 }
835 
836 static void
837 lockedby(daemonaddr_t *d, uint8_t seq)
838 {
839 	DPF((stderr, "lockby enquiry from %s ", dp_addr(d)));
840 	switch (the_lock.type) {
841 	case LOCK_NOTLOCKED:
842 		send_lockmsg(LOCK_NOTLOCKED, (pid_t)0, d, seq);
843 		break;
844 	case LOCK_READ:
845 		send_lockmsg(LOCK_READ, the_lock.holding_pid[0], d, seq);
846 		break;
847 	case LOCK_WRITE:
848 		send_lockmsg(LOCK_WRITE, the_lock.holding_pid[0], d, seq);
849 		break;
850 	}
851 }
852 
853 /* ARGSUSED */
854 static void
855 keepalive(int signo)
856 {
857 	int i;
858 	struct lock_req *locker;
859 	struct lockdaemon *ldp;
860 
861 	DPF((stderr, "keepalive...\n"));
862 	ticker++;
863 
864 	/*
865 	 * tell any other daemon that has a lock request in our queue that
866 	 * this daemon is still alive.
867 	 */
868 
869 	for (i = 0, locker = lock_queue; i < next_req; i++, locker++) {
870 		if (locker->pid == 0)	/* remote lock request */
871 			send_lockmsg(ALIVE, (pid_t)0, &(locker->remote), 0);
872 	}
873 
874 	/*
875 	 * if a remote daemon holds the lock, check it is still alive and
876 	 * if the remote daemon is sent it a grant message in case the
877 	 * remote daemon missed our original grant.
878 	 */
879 
880 	if (the_lock.remote_daemon) {
881 		if (lockdaemon_dead(the_lock.remote_daemon)) {
882 			DPF((stderr, "lock owner died\n"));
883 			the_lock.remote_daemon->up = 0;
884 			xxunlock();
885 		} else {
886 			send_lockmsg(GRANTED, (pid_t)0, &the_lock.holder, 0);
887 		}
888 	}
889 
890 	/*
891 	 * check for response from daemons preventing this daemon
892 	 * from taking a write lock by not sending a grant message.
893 	 * if the remote daemon is alive send another lock request,
894 	 * otherwise mark it as dead.
895 	 * send alive message to any live remote daemons if this
896 	 * daemon has the write lock.
897 	 */
898 	if (lstate) {
899 		(void) printf("\nlock: %s\n", lockd_type(the_lock.type));
900 		(void) printf("    no. holders: %d\n", the_lock.nholders);
901 		(void) printf("    hold addr  : %s\n", the_lock.remote_daemon?
902 		    dp_addr(the_lock.remote_daemon): "0.0.0.0");
903 		(void) printf("    holding pid:");
904 		for (i = 0; i < the_lock.nholders; i++) {
905 			(void) printf(" %ld", the_lock.holding_pid[ i ]);
906 		}
907 		(void) printf("\n");
908 	}
909 	for (i = 0, ldp = daemon_list; i < MAX_DAEMONS; i++, ldp++) {
910 		if (ldp->inuse == 0)
911 			break;
912 
913 		if (lstate) {
914 			(void) printf("%-15.15s ", dp_addr(&ldp->host));
915 			(void) printf("%-4.4s ", ldp->up? "up" : "down");
916 			(void) printf("%5ld ", ldp->timeout);
917 			(void) printf("%-10.10s ", lockd_state(ldp->state));
918 			(void) printf("%6d\n", ldp->order);
919 		}
920 
921 		if (ldp->state == STATE_ASKED) {
922 			if (lockdaemon_dead(ldp)) {
923 				ldp->up = 0;
924 				ldp->state = STATE_CLEAR;
925 				continue;
926 			}
927 			send_lockmsg(WRITE_LOCK, (pid_t)0, &(ldp->host), 0);
928 			continue;
929 		}
930 		if (the_lock.type == LOCK_WRITE &&
931 		    the_lock.remote_daemon == NULL)
932 			send_lockmsg(ALIVE, (pid_t)0, &(ldp->host), 0);
933 	}
934 }
935 
936 static void
937 dispatch(struct lock_msg *mp, daemonaddr_t *host)
938 {
939 	int message = mp->message;
940 	int localhost;
941 
942 	localhost = islocalhost(host);
943 	if (msgtrace && debugfile) {
944 		time_t t = time(0);
945 		if (localhost) {
946 			(void) fprintf(debugfile,
947 			    "%19.19s recv %-9.9s from %s (%ld)\n", ctime(&t),
948 			    lockd_msg(message), dp_addr(host), mp->pid);
949 		} else {
950 			(void) fprintf(debugfile,
951 			    "%19.19s recv %-9.9s from %s order %d (%ld)\n",
952 			    ctime(&t), lockd_msg(message), dp_addr(host),
953 			    mp->order, mp->pid);
954 		}
955 	}
956 	DPF((stderr, "received message %d\n", message));
957 	DPF((stderr, "from %s port %hu\n", dp_addr(host), host->sin_port));
958 	if (!localhost)
959 		daemon_alive(host, mp->order);
960 	else
961 		mp->order = order;
962 	switch (message) {
963 	case ALIVE:
964 		DPF((stderr, "received ALIVE %s\n", dp_addr(host)));
965 		/* do nothing, general "not localhost" code above does this */
966 		break;
967 	case UNLOCK:
968 		DPF((stderr, "received UNLOCK\n"));
969 		remote_unlock(mp->order, host);
970 		break;
971 	case GRANTED:
972 		DPF((stderr, "received GRANTED\n"));
973 		lock_granted(host);
974 		break;
975 	case WRITE_LOCK:
976 		DPF((stderr, "received WRITE_LOCK\n"));
977 		assert(!localhost);
978 		remote_lock(host, mp);
979 		break;
980 	case READ_LOCK:
981 	case LOCK_READ:
982 		DPF((stderr, "received READ_LOCK\n"));
983 		assert(localhost);
984 		local_lock(LOCK_READ, mp, host);
985 		break;
986 	case LOCK_WRITE:
987 		DPF((stderr, "received LOCK_WRITE\n"));
988 		assert(localhost);
989 		local_lock(LOCK_WRITE, mp, host);
990 		break;
991 	case LOCK_NOTLOCKED:
992 		DPF((stderr, "received LOCK_NOTLOCKED\n"));
993 		send_lockmsg(LOCK_ACK, (pid_t)0, host, mp->seq);
994 		if (the_lock.type != LOCK_NOTLOCKED) {
995 			local_unlock(mp->pid, mp->seq, NORMAL_UNLOCK);
996 		}
997 		break;
998 	case LOCK_LOCKEDBY:
999 		lockedby(host, mp->seq);
1000 		break;
1001 	case LOCK_STAT:
1002 		lock_stat();
1003 		break;
1004 	case LOCK_ACK:
1005 		/* throw message away -- this is an error to receive */
1006 		break;
1007 	}
1008 }
1009 
1010 /*
1011  * unqueue any locks asked for by pid and unlock any locks held by pid.
1012  */
1013 
1014 static void
1015 purge_pid(pid_t pid)
1016 {
1017 	DPF((stderr, "purge locks for %ld\n", pid));
1018 	unqueue_lock(NULL, pid);
1019 	if (the_lock.type != LOCK_NOTLOCKED)
1020 		local_unlock(pid, 0, FORCE_UNLOCK);
1021 }
1022 
1023 /*
1024  * Check for exit or exec of client processes.
1025  * The lock protecting the processes pid in the lockfile will
1026  * be removed by the kernel when a client exits or execs.
1027  */
1028 
1029 static void
1030 check_for_dead()
1031 {
1032 	int i, x;
1033 	pid_t pid;
1034 
1035 	for (i = 0; (x = cfg_filelock(i, 0)) != CFG_LF_EOF; i++) {
1036 		if (x == CFG_LF_AGAIN)
1037 			continue; /* can't take lock, must be still alive */
1038 		cfg_readpid(i, &pid);
1039 		cfg_writepid(i, (pid_t)0);
1040 		(void) cfg_fileunlock(i);
1041 		if (pid != (pid_t)0)
1042 			purge_pid(pid);
1043 	}
1044 }
1045 
1046 static void
1047 build_daemon_list(char *cf_file, int exe)
1048 {
1049 	FILE *fp;
1050 	char	host[1024];
1051 	int	port;
1052 	int	i;
1053 	struct	hostent *hp;
1054 	struct lockdaemon *ldp;
1055 
1056 	if ((hp = gethostbyname("localhost")) == NULL) {
1057 		(void) fprintf(stderr, "%s: Can't find hostent for %s\n",
1058 		    program, "localhost");
1059 		spcs_log("cfglockd", NULL, "couldn't find localhost");
1060 		exit(1);
1061 	}
1062 
1063 	(void) memcpy(&(localhost.sin_addr.s_addr), *(hp->h_addr_list),
1064 	    sizeof (localhost.sin_addr));
1065 	if (cf_file == NULL) {
1066 		(void) endhostent();
1067 		return;
1068 	}
1069 	if (exe) {
1070 		if ((fp = popen(cf_file, "r")) == NULL) {
1071 			perror(cf_file);
1072 			(void) fprintf(stderr,
1073 			    "%s: Can't open config program\n", program);
1074 			spcs_log("cfglockd", NULL, "couldn't read config");
1075 			exit(1);
1076 		}
1077 	} else {
1078 		if ((fp = fopen(cf_file, "r")) == NULL) {
1079 			perror(cf_file);
1080 			(void) fprintf(stderr, "%s: Can't open config file\n",
1081 			    program);
1082 			spcs_log("cfglockd", NULL, "couldn't read config");
1083 			exit(1);
1084 		}
1085 	}
1086 	ldp = daemon_list;
1087 	while ((i = fscanf(fp, "%s %d\n", host, &port)) != EOF) {
1088 		if (host[0] == '#')	/* line starting with # are comments */
1089 			continue;
1090 		if (i == 1) {
1091 			port = lock_port;
1092 		} else {
1093 			if (strcmp(host, "localhost") == 0) {
1094 				lock_port = port;
1095 				continue;
1096 			}
1097 		}
1098 
1099 		if ((hp = gethostbyname(host)) == NULL) {
1100 			(void) fprintf(stderr,
1101 			    "%s: Can't find hostent for %s\n", program, host);
1102 			continue;
1103 		}
1104 
1105 		(void) memcpy(&(ldp->host.sin_addr.s_addr), *(hp->h_addr_list),
1106 		    sizeof (ldp->host.sin_addr));
1107 		DPF((stderr, "daemon: %s\t%s\n",
1108 		    inet_ntoa(ldp->host.sin_addr), hp->h_name));
1109 		if (islocalhost(&(ldp->host))) {
1110 			DPF((stderr, "is an alias for this host, skipping\n"));
1111 			continue;
1112 		}
1113 		ldp->host.sin_port = htons((short)port);
1114 		ldp->host.sin_family = hp->h_addrtype;
1115 		ldp->inuse = 1;
1116 		ldp->up = 1;
1117 		ldp++;
1118 	}
1119 	if (exe)
1120 		(void) pclose(fp);
1121 	else
1122 		(void) fclose(fp);
1123 	(void) endhostent();
1124 }
1125 
1126 static void
1127 usage()
1128 {
1129 	(void) fprintf(stderr,
1130 	    gettext("usage: %s [-d] [-f file]|[-e program]\n"), program);
1131 	exit(1);
1132 }
1133 
1134 static void
1135 unexpected(int sig)
1136 {
1137 	spcs_log("cfglockd", NULL, "pid %d unexpected signal %d, ignoring",
1138 	    getpid(), sig);
1139 }
1140 
1141 static void
1142 term(int sig)
1143 {
1144 	(void) unlink(CFG_PIDFILE);
1145 	spcs_log("cfglockd", NULL, "pid %d terminate on signal %d", getpid(),
1146 	    sig);
1147 	exit(0);
1148 }
1149 
1150 static void
1151 init(int argc, char *argv[])
1152 {
1153 #if defined(_SunOS_5_6) || defined(_SunOS_5_7) || defined(_SunOS_5_8)
1154 	struct rlimit rl;
1155 #endif
1156 	int	c, i, x;
1157 	int	rc;
1158 	char	*cp = NULL;
1159 	struct	itimerval	tv;
1160 	struct	timeval		tp;
1161 	socklen_t len = sizeof (thishost);
1162 	int	exe = 0;
1163 	pid_t	pid;
1164 	FILE	*fp;
1165 
1166 	lstate = (getenv("LOCKD_STATE") != NULL);
1167 	msgtrace = (getenv("LOCKD_MSG") != NULL);
1168 
1169 	/*
1170 	 * Fork off a child that becomes the daemon.
1171 	 */
1172 
1173 #ifndef TTY_MESSAGES
1174 	if ((rc = fork()) > 0)
1175 		exit(0);
1176 	else if (rc < 0) {
1177 		spcs_log("cfglockd", NULL, "can't fork %d", errno);
1178 		(void) fprintf(stderr, gettext("dscfglockd: cannot fork: %s\n"),
1179 		    strerror(errno));
1180 		exit(1);
1181 	}
1182 #endif
1183 
1184 	/*
1185 	 * In child - become daemon.
1186 	 */
1187 
1188 #if !defined(_SunOS_5_6) && !defined(_SunOS_5_7) && !defined(_SunOS_5_8)
1189 	/* use closefrom(3C) from PSARC/2000/193 when possible */
1190 	closefrom(CLOSE_FD);
1191 #else
1192 	(void) getrlimit(RLIMIT_NOFILE, &rl);
1193 	for (i = CLOSE_FD; i < rl.rlim_max; i++)
1194 		(void) close(i);
1195 #endif
1196 
1197 #ifdef DEBUG
1198 #ifndef	TTY_MESSAGES
1199 	(void) open("/dev/console", O_WRONLY|O_APPEND);
1200 	(void) dup(0);
1201 	(void) dup(0);
1202 #endif
1203 #endif
1204 	(void) close(0);
1205 
1206 	if (msgtrace || lstate) {
1207 		debugfile = fopen("/var/tmp/dscfglockd.out", "a");
1208 		if (debugfile) {
1209 			time_t t = time(0);
1210 			setbuf(debugfile, (char *)0);
1211 			(void) fprintf(debugfile, "%19.19s dscfglockd start\n",
1212 			    ctime(&t));
1213 		}
1214 	}
1215 
1216 	(void) setpgrp();
1217 	spcs_log("cfglockd", NULL, "new lock daemon, pid %d", getpid());
1218 
1219 	/*
1220 	 * Catch as unexpected all signals apart from SIGTERM.
1221 	 */
1222 
1223 	for (i = 1; i < _sys_nsig; i++)
1224 		(void) sigset(i, unexpected);
1225 	(void) sigset(SIGTERM, term);
1226 
1227 	for (i = 0; (c = getopt(argc, argv, "df:e:")) != EOF; i++) {
1228 		switch (c) {
1229 		case 'd':
1230 			debug = 1;
1231 			break;
1232 		case 'e':
1233 			exe = 1;
1234 			if (cp) {
1235 				usage();
1236 			}
1237 			cp = optarg;
1238 			break;
1239 		case 'f':
1240 			if (cp) {
1241 				usage();
1242 			}
1243 			cp = optarg;
1244 			break;
1245 		default:
1246 			usage();
1247 			break;
1248 		}
1249 	}
1250 
1251 	ifc = (struct ifconf *)malloc(sizeof (struct ifconf));
1252 	if (ifc == NULL) {
1253 		perror(CFG_PIDFILE);
1254 		DPF((stderr, "Can't open pid file\n"));
1255 		exit(1);
1256 	}
1257 	(void) memset((char *)ifc, 0, sizeof (struct ifconf));
1258 	getmyaddrs(ifc);
1259 
1260 	/*
1261 	 * if (lockdaemonalive()) {
1262 	 *	(void) fprintf(stderr, "%s: %s\n", program,
1263 	 *		gettext("There is already a live lockdaemon"));
1264 	 *	exit(1);
1265 	 * }
1266 	 */
1267 	if ((fp = fopen(CFG_PIDFILE, "w")) == NULL) {
1268 		perror(CFG_PIDFILE);
1269 		DPF((stderr, "Can't open pid file\n"));
1270 		exit(1);
1271 	}
1272 	(void) fprintf(fp, "%ld\n", getpid());
1273 	(void) fclose(fp);
1274 
1275 	/* order should be set to node number within cluster */
1276 	order = cfg_iscluster();
1277 	cfg_lfinit();
1278 
1279 	if (!order) {
1280 		(void) gettimeofday(&tp, NULL);
1281 		srand48(tp.tv_usec);
1282 		order = lrand48();
1283 		if (debugfile) {
1284 			(void) fprintf(debugfile, "WARNING: order number "
1285 			    "is 0 -- changing randomly to %d\n", order);
1286 		}
1287 	}
1288 	c = 0;
1289 	for (i = 0; (x = cfg_filelock(i, 0)) != CFG_LF_EOF; i++) {
1290 		if (x == CFG_LF_AGAIN) {
1291 			cfg_readpid(i, &pid);
1292 			if (c++ == 0)
1293 				spcs_log("cfglockd", NULL,
1294 				    "init .dscfg.lck slot %d pid %d locked",
1295 				    i, pid);
1296 			DPF((stderr, "client process %ld still alive\n", pid));
1297 			continue; /* can't take lock, must be still alive */
1298 		}
1299 		cfg_writepid(i, 0);
1300 		(void) cfg_fileunlock(i);
1301 	}
1302 
1303 	tv.it_interval.tv_sec = TIMEOUT_SECS;
1304 	tv.it_interval.tv_usec = 0;
1305 	tv.it_value = tv.it_interval;
1306 
1307 	bzero(unlock_buf, sizeof (unlock_buf));
1308 	next_q = 0;
1309 	build_daemon_list(cp, exe);
1310 	if ((lock_soc = socket(pf_inet, SOCK_DGRAM, 0)) < 0) {
1311 		(void) fprintf(stderr, "%s: %s\n", program,
1312 		    gettext("failed to create socket"));
1313 		perror("socket");
1314 		spcs_log("cfglockd", NULL, "couldn't create socket");
1315 		exit(1);
1316 	}
1317 	thishost.sin_family = AF_INET;
1318 	thishost.sin_addr.s_addr = INADDR_ANY;
1319 	thishost.sin_port = htons(lock_port);
1320 	rc = bind(lock_soc, (struct sockaddr *)&thishost, sizeof (thishost));
1321 	if (rc < 0) {
1322 		perror("bind");
1323 		spcs_log("cfglockd", NULL, "couldn't bind");
1324 		exit(1);
1325 	}
1326 	if (getsockname(lock_soc, (struct sockaddr *)&thishost, &len) < 0)
1327 		perror("getsockname");
1328 	send_aliveall();
1329 	(void) sigset(SIGALRM, keepalive);
1330 	(void) setitimer(ITIMER_REAL, &tv, NULL);
1331 	/*
1332 	 * wait 2 time outs before allowing a lock to find if someone else
1333 	 * currently has the lock.
1334 	 */
1335 }
1336 
1337 #ifdef lint
1338 int
1339 lintmain(int argc, char *argv[])
1340 #else
1341 int
1342 main(int argc, char *argv[])
1343 #endif
1344 {
1345 	struct lock_msg message_buf;
1346 	daemonaddr_t from;
1347 	int addrlen;
1348 	int rc;
1349 	int x = 1;		/* kludge to stop warnings from compiler */
1350 
1351 	init(argc, argv);
1352 	CRIT_BEGIN();
1353 	while (x) {
1354 		CRIT_END();
1355 		addrlen = sizeof (from);
1356 		DPF((stderr, "begin recvfrom\n"));
1357 		rc = recvfrom(lock_soc, &message_buf, sizeof (message_buf),
1358 		    0, (struct sockaddr *)&from, &addrlen);
1359 		DPF((stderr, "end recvfrom rc = %d\n", rc));
1360 		CRIT_BEGIN();
1361 		if (rc == sizeof (message_buf))
1362 			dispatch(&message_buf, &from);
1363 		else
1364 			check_for_write_lock();
1365 
1366 		/* if we own the lock, check to see if the process died */
1367 		if (the_lock.type != LOCK_NOTLOCKED &&
1368 		    the_lock.remote_daemon == NULL)
1369 			check_for_dead();
1370 	}
1371 	CRIT_END();
1372 	return (0);
1373 }
1374