xref: /freebsd/usr.sbin/rpc.lockd/lockd_lock.c (revision 54ab3ed82b639b593fb0fe6db845e38a9d18970e)
1 /*	$NetBSD: lockd_lock.c,v 1.5 2000/11/21 03:47:41 enami Exp $	*/
2 /*	$FreeBSD$ */
3 
4 /*
5  * Copyright (c) 2001 Andrew P. Lentvorski, Jr.
6  * Copyright (c) 2000 Manuel Bouyer.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  */
37 
38 #define LOCKD_DEBUG
39 
40 #include <stdio.h>
41 #ifdef LOCKD_DEBUG
42 #include <stdarg.h>
43 #endif
44 #include <stdlib.h>
45 #include <unistd.h>
46 #include <fcntl.h>
47 #include <syslog.h>
48 #include <errno.h>
49 #include <string.h>
50 #include <signal.h>
51 #include <rpc/rpc.h>
52 #include <sys/types.h>
53 #include <sys/stat.h>
54 #include <sys/socket.h>
55 #include <sys/param.h>
56 #include <sys/mount.h>
57 #include <sys/wait.h>
58 #include <rpcsvc/sm_inter.h>
59 #include <rpcsvc/nlm_prot.h>
60 #include "lockd_lock.h"
61 #include "lockd.h"
62 
63 #define MAXOBJECTSIZE 64
64 #define MAXBUFFERSIZE 1024
65 
66 /*
67  * SM_MAXSTRLEN is usually 1024.  This means that lock requests and
68  * host name monitoring entries are *MUCH* larger than they should be
69  */
70 
71 /*
72  * A set of utilities for managing file locking
73  *
74  * XXX: All locks are in a linked list, a better structure should be used
75  * to improve search/access effeciency.
76  */
77 
78 /* struct describing a lock */
79 struct file_lock {
80 	LIST_ENTRY(file_lock) nfslocklist;
81 	fhandle_t filehandle; /* NFS filehandle */
82 	struct sockaddr *addr;
83 	struct nlm4_holder client; /* lock holder */
84 	/* XXX: client_cookie used *only* in send_granted */
85 	netobj client_cookie; /* cookie sent by the client */
86 	char client_name[SM_MAXSTRLEN];
87 	int nsm_status; /* status from the remote lock manager */
88 	int status; /* lock status, see below */
89 	int flags; /* lock flags, see lockd_lock.h */
90 	int blocking; /* blocking lock or not */
91 	pid_t locker; /* pid of the child process trying to get the lock */
92 	int fd;	/* file descriptor for this lock */
93 };
94 
95 LIST_HEAD(nfslocklist_head, file_lock);
96 struct nfslocklist_head nfslocklist_head = LIST_HEAD_INITIALIZER(nfslocklist_head);
97 
98 LIST_HEAD(blockedlocklist_head, file_lock);
99 struct blockedlocklist_head blockedlocklist_head = LIST_HEAD_INITIALIZER(blockedlocklist_head);
100 
101 /* lock status */
102 #define LKST_LOCKED	1 /* lock is locked */
103 /* XXX: Is this flag file specific or lock specific? */
104 #define LKST_WAITING	2 /* file is already locked by another host */
105 #define LKST_PROCESSING	3 /* child is trying to aquire the lock */
106 #define LKST_DYING	4 /* must dies when we get news from the child */
107 
108 /* struct describing a monitored host */
109 struct host {
110 	LIST_ENTRY(host) hostlst;
111 	char name[SM_MAXSTRLEN];
112 	int refcnt;
113 };
114 /* list of hosts we monitor */
115 LIST_HEAD(hostlst_head, host);
116 struct hostlst_head hostlst_head = LIST_HEAD_INITIALIZER(hostlst_head);
117 
118 /*
119  * File monitoring handlers
120  * XXX: These might be able to be removed when kevent support
121  * is placed into the hardware lock/unlock routines.  (ie.
122  * let the kernel do all the file monitoring)
123  */
124 
125 /* Struct describing a monitored file */
126 struct monfile {
127 	LIST_ENTRY(monfile) monfilelist;
128 	fhandle_t filehandle; /* Local access filehandle */
129 	int fd; /* file descriptor: remains open until unlock! */
130 	int refcount;
131 	int exclusive;
132 };
133 
134 /* List of files we monitor */
135 LIST_HEAD(monfilelist_head, monfile);
136 struct monfilelist_head monfilelist_head = LIST_HEAD_INITIALIZER(monfilelist_head);
137 
138 static int debugdelay = 0;
139 
140 enum nfslock_status { NFS_GRANTED = 0, NFS_GRANTED_DUPLICATE,
141 		      NFS_DENIED, NFS_DENIED_NOLOCK,
142 		      NFS_RESERR };
143 
144 enum hwlock_status { HW_GRANTED = 0, HW_GRANTED_DUPLICATE,
145 		     HW_DENIED, HW_DENIED_NOLOCK,
146 		     HW_STALEFH, HW_READONLY, HW_RESERR };
147 
148 enum partialfilelock_status { PFL_GRANTED=0, PFL_GRANTED_DUPLICATE, PFL_DENIED,
149 			      PFL_NFSDENIED, PFL_NFSBLOCKED, PFL_NFSDENIED_NOLOCK, PFL_NFSRESERR,
150 			      PFL_HWDENIED,  PFL_HWBLOCKED,  PFL_HWDENIED_NOLOCK, PFL_HWRESERR};
151 
152 enum LFLAGS {LEDGE_LEFT, LEDGE_LBOUNDARY, LEDGE_INSIDE, LEDGE_RBOUNDARY, LEDGE_RIGHT};
153 enum RFLAGS {REDGE_LEFT, REDGE_LBOUNDARY, REDGE_INSIDE, REDGE_RBOUNDARY, REDGE_RIGHT};
154 /* XXX: WARNING! I HAVE OVERLOADED THIS STATUS ENUM!  SPLIT IT APART INTO TWO */
155 enum split_status {SPL_DISJOINT=0, SPL_LOCK1=1, SPL_LOCK2=2, SPL_CONTAINED=4, SPL_RESERR=8};
156 
157 enum partialfilelock_status lock_partialfilelock(struct file_lock *fl);
158 
159 void send_granted(struct file_lock *fl, int opcode);
160 void siglock(void);
161 void sigunlock(void);
162 void monitor_lock_host(const char *hostname);
163 void unmonitor_lock_host(const char *hostname);
164 
165 void	copy_nlm4_lock_to_nlm4_holder(const struct nlm4_lock *src,
166     const bool_t exclusive, struct nlm4_holder *dest);
167 struct file_lock *	allocate_file_lock(const netobj *lockowner,
168     const netobj *matchcookie);
169 void	deallocate_file_lock(struct file_lock *fl);
170 void	fill_file_lock(struct file_lock *fl, const fhandle_t *fh,
171     struct sockaddr *addr, const bool_t exclusive, const int32_t svid,
172     const u_int64_t offset, const u_int64_t len, const char *caller_name,
173     const int state, const int status, const int flags, const int blocking);
174 int	regions_overlap(const u_int64_t start1, const u_int64_t len1,
175     const u_int64_t start2, const u_int64_t len2);;
176 enum split_status  region_compare(const u_int64_t starte, const u_int64_t lene,
177     const u_int64_t startu, const u_int64_t lenu,
178     u_int64_t *start1, u_int64_t *len1, u_int64_t *start2, u_int64_t *len2);
179 int	same_netobj(const netobj *n0, const netobj *n1);
180 int	same_filelock_identity(const struct file_lock *fl0,
181     const struct file_lock *fl2);
182 
183 static void debuglog(char const *fmt, ...);
184 void dump_static_object(const unsigned char* object, const int sizeof_object,
185                         unsigned char* hbuff, const int sizeof_hbuff,
186                         unsigned char* cbuff, const int sizeof_cbuff);
187 void dump_netobj(const struct netobj *nobj);
188 void dump_filelock(const struct file_lock *fl);
189 struct file_lock *	get_lock_matching_unlock(const struct file_lock *fl);
190 enum nfslock_status	test_nfslock(const struct file_lock *fl,
191     struct file_lock **conflicting_fl);
192 enum nfslock_status	lock_nfslock(struct file_lock *fl);
193 enum nfslock_status	delete_nfslock(struct file_lock *fl);
194 enum nfslock_status	unlock_nfslock(const struct file_lock *fl,
195     struct file_lock **released_lock, struct file_lock **left_lock,
196     struct file_lock **right_lock);
197 enum hwlock_status lock_hwlock(struct file_lock *fl);
198 enum split_status split_nfslock(const struct file_lock *exist_lock,
199     const struct file_lock *unlock_lock, struct file_lock **left_lock,
200     struct file_lock **right_lock);
201 void	add_blockingfilelock(struct file_lock *fl);
202 enum hwlock_status	unlock_hwlock(const struct file_lock *fl);
203 enum hwlock_status	test_hwlock(const struct file_lock *fl,
204     struct file_lock **conflicting_fl);
205 void	remove_blockingfilelock(struct file_lock *fl);
206 void	clear_blockingfilelock(const char *hostname);
207 void	retry_blockingfilelocklist(void);
208 enum partialfilelock_status	unlock_partialfilelock(
209     const struct file_lock *fl);
210 void	clear_partialfilelock(const char *hostname);
211 enum partialfilelock_status	test_partialfilelock(
212     const struct file_lock *fl, struct file_lock **conflicting_fl);
213 enum nlm_stats	do_test(struct file_lock *fl,
214     struct file_lock **conflicting_fl);
215 enum nlm_stats	do_unlock(struct file_lock *fl);
216 enum nlm_stats	do_lock(struct file_lock *fl);
217 void	do_clear(const char *hostname);
218 
219 
220 void
221 debuglog(char const *fmt, ...)
222 {
223 	va_list ap;
224 
225 	if (debug_level < 1) {
226 		return;
227 	}
228 
229 	sleep(debugdelay);
230 
231 	va_start(ap, fmt);
232 	vsyslog(LOG_DEBUG, fmt, ap);
233 	va_end(ap);
234 }
235 
236 void
237 dump_static_object(object, size_object, hbuff, size_hbuff, cbuff, size_cbuff)
238 	const unsigned char *object;
239 	const int size_object;
240 	unsigned char *hbuff;
241 	const int size_hbuff;
242 	unsigned char *cbuff;
243 	const int size_cbuff;
244 {
245 	int i, objectsize;
246 
247 	if (debug_level < 2) {
248 		return;
249 	}
250 
251 	objectsize = size_object;
252 
253 	if (objectsize == 0) {
254 		debuglog("object is size 0\n");
255 	} else {
256 		if (objectsize > MAXOBJECTSIZE) {
257 			debuglog("Object of size %d being clamped"
258 			    "to size %d\n", objectsize, MAXOBJECTSIZE);
259 			objectsize = MAXOBJECTSIZE;
260 		}
261 
262 		if (hbuff != NULL) {
263 			if (size_hbuff < objectsize*2+1) {
264 				debuglog("Hbuff not large enough."
265 				    "  Increase size\n");
266 			} else {
267 				for(i=0;i<objectsize;i++) {
268 					sprintf(hbuff+i*2,"%02x",*(object+i));
269 				}
270 				*(hbuff+i*2) = '\0';
271 			}
272 		}
273 
274 		if (cbuff != NULL) {
275 			if (size_cbuff < objectsize+1) {
276 				debuglog("Cbuff not large enough."
277 				    "  Increase Size\n");
278 			}
279 
280 			for(i=0;i<objectsize;i++) {
281 				if (*(object+i) >= 32 && *(object+i) <= 127) {
282 					*(cbuff+i) = *(object+i);
283 				} else {
284 					*(cbuff+i) = '.';
285 				}
286 			}
287 			*(cbuff+i) = '\0';
288 		}
289 	}
290 }
291 
292 void
293 dump_netobj(const struct netobj *nobj)
294 {
295 	char hbuff[MAXBUFFERSIZE*2];
296 	char cbuff[MAXBUFFERSIZE];
297 
298 	if (debug_level < 2) {
299 		return;
300 	}
301 
302 	if (nobj == NULL) {
303 		debuglog("Null netobj pointer\n");
304 	}
305 	else if (nobj->n_len == 0) {
306 		debuglog("Size zero netobj\n");
307 	} else {
308 		dump_static_object(nobj->n_bytes, nobj->n_len,
309 		    hbuff, sizeof(hbuff), cbuff, sizeof(cbuff));
310 		debuglog("netobj: len: %d  data: %s :::  %s\n",
311 		    nobj->n_len, hbuff, cbuff);
312 	}
313 }
314 
315 void
316 dump_filelock(const struct file_lock *fl)
317 {
318 	char hbuff[MAXBUFFERSIZE*2];
319 	char cbuff[MAXBUFFERSIZE];
320 
321 	if (debug_level < 2) {
322 		return;
323 	}
324 
325 	if (fl != NULL) {
326 		debuglog("Dumping file lock structure @ %p\n", fl);
327 
328 		/*
329 		dump_static_object((unsigned char *)&fl->filehandle,
330 		    sizeof(fl->filehandle), hbuff, sizeof(hbuff),
331 		    cbuff, sizeof(cbuff));
332 		debuglog("Filehandle: %8s  :::  %8s\n", hbuff, cbuff);
333 		*/
334 
335 		debuglog("Dumping nlm4_holder:\n"
336 		    "exc: %x  svid: %x  offset:len %llx:%llx\n",
337 		    fl->client.exclusive, fl->client.svid,
338 		    fl->client.l_offset, fl->client.l_len);
339 
340 		/*
341 		debuglog("Dumping client identity:\n");
342 		dump_netobj(&fl->client.oh);
343 
344 		debuglog("Dumping client cookie:\n");
345 		dump_netobj(&fl->client_cookie);
346 
347 		debuglog("nsm: %d  status: %d  flags: %d  locker: %d"
348 		    "  fd:  %d\n", fl->nsm_status, fl->status,
349 		    fl->flags, fl->locker, fl->fd);
350 		*/
351 	} else {
352 		debuglog("NULL file lock structure\n");
353 	}
354 }
355 
356 void
357 copy_nlm4_lock_to_nlm4_holder(src, exclusive, dest)
358 	const struct nlm4_lock *src;
359 	const bool_t exclusive;
360 	struct nlm4_holder *dest;
361 {
362 
363 	dest->exclusive = exclusive;
364 	dest->oh.n_len = src->oh.n_len;
365 	dest->oh.n_bytes = src->oh.n_bytes;
366 	dest->svid = src->svid;
367 	dest->l_offset = src->l_offset;
368 	dest->l_len = src->l_len;
369 }
370 
371 
372 /*
373  * allocate_file_lock: Create a lock with the given parameters
374  */
375 
376 struct file_lock *
377 allocate_file_lock(const netobj *lockowner, const netobj *matchcookie)
378 {
379 	struct file_lock *newfl;
380 
381 	newfl = malloc(sizeof(struct file_lock));
382 	if (newfl == NULL) {
383 		return NULL;
384 	}
385 	bzero(newfl, sizeof(newfl));
386 
387 	newfl->client.oh.n_bytes = malloc(lockowner->n_len);
388 	if (newfl->client.oh.n_bytes == NULL) {
389 		free(newfl);
390 		return NULL;
391 	}
392 	newfl->client.oh.n_len = lockowner->n_len;
393 	bcopy(lockowner->n_bytes, newfl->client.oh.n_bytes, lockowner->n_len);
394 
395 	newfl->client_cookie.n_bytes = malloc(matchcookie->n_len);
396 	if (newfl->client_cookie.n_bytes == NULL) {
397 		free(newfl->client.oh.n_bytes);
398 		free(newfl);
399 		return NULL;
400 	}
401 	newfl->client_cookie.n_len = matchcookie->n_len;
402 	bcopy(matchcookie->n_bytes, newfl->client_cookie.n_bytes, matchcookie->n_len);
403 
404 	return newfl;
405 }
406 
407 /*
408  * file_file_lock: Force creation of a valid file lock
409  */
410 void
411 fill_file_lock(struct file_lock *fl, const fhandle_t *fh,
412     struct sockaddr *addr, const bool_t exclusive, const int32_t svid,
413     const u_int64_t offset, const u_int64_t len, const char *caller_name,
414     const int state, const int status, const int flags, const int blocking)
415 {
416 	bcopy(fh, &fl->filehandle, sizeof(fhandle_t));
417 	fl->addr = addr;
418 
419 	fl->client.exclusive = exclusive;
420 	fl->client.svid = svid;
421 	fl->client.l_offset = offset;
422 	fl->client.l_len = len;
423 
424 	strncpy(fl->client_name, caller_name, SM_MAXSTRLEN);
425 
426 	fl->nsm_status = state;
427 	fl->status = status;
428 	fl->flags = flags;
429 	fl->blocking = blocking;
430 }
431 
432 /*
433  * deallocate_file_lock: Free all storage associated with a file lock
434  */
435 void
436 deallocate_file_lock(struct file_lock *fl)
437 {
438 	free(fl->client.oh.n_bytes);
439 	free(fl->client_cookie.n_bytes);
440 	free(fl);
441 }
442 
443 /*
444  * regions_overlap(): This function examines the two provided regions for
445  * overlap.
446  */
447 int
448 regions_overlap(start1, len1, start2, len2)
449 	const u_int64_t start1, len1, start2, len2;
450 {
451 	u_int64_t d1,d2,d3,d4;
452 	enum split_status result;
453 
454 	debuglog("Entering region overlap with vals: %llu:%llu--%llu:%llu\n",
455 		 start1, len1, start2, len2);
456 
457 	result = region_compare(start1, len1, start2, len2,
458 	    &d1, &d2, &d3, &d4);
459 
460 	debuglog("Exiting region overlap with val: %d\n",result);
461 
462 	if (result == SPL_DISJOINT) {
463 		return 0;
464 	} else {
465 		return 1;
466 	}
467 
468 	return (result);
469 }
470 
471 /*
472  * region_compare(): Examine lock regions and split appropriately
473  *
474  * XXX: Fix 64 bit overflow problems
475  * XXX: Check to make sure I got *ALL* the cases.
476  * XXX: This DESPERATELY needs a regression test.
477  */
478 enum split_status
479 region_compare(starte, lene, startu, lenu,
480     start1, len1, start2, len2)
481 	const u_int64_t starte, lene, startu, lenu;
482 	u_int64_t *start1, *len1, *start2, *len2;
483 {
484 	/*
485 	 * Please pay attention to the sequential exclusions
486 	 * of the if statements!!!
487 	 */
488 	enum LFLAGS lflags;
489 	enum RFLAGS rflags;
490 	enum split_status retval;
491 
492 	retval = SPL_DISJOINT;
493 
494 	if (lene == 0 && lenu == 0) {
495 		/* Examine left edge of locker */
496 		if (startu < starte) {
497 			lflags = LEDGE_LEFT;
498 		} else if (startu == starte) {
499 			lflags = LEDGE_LBOUNDARY;
500 		} else {
501 			lflags = LEDGE_INSIDE;
502 		}
503 
504 		rflags = REDGE_RBOUNDARY; /* Both are infiinite */
505 
506 		if (lflags == LEDGE_INSIDE) {
507 			*start1 = starte;
508 			*len1 = startu - starte;
509 		}
510 
511 		if (lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) {
512 			retval = SPL_CONTAINED;
513 		} else {
514 			retval = SPL_LOCK1;
515 		}
516 	} else if (lene == 0 && lenu != 0) {
517 		/* Established lock is infinite */
518 		/* Examine left edge of unlocker */
519 		if (startu < starte) {
520 			lflags = LEDGE_LEFT;
521 		} else if (startu == starte) {
522 			lflags = LEDGE_LBOUNDARY;
523 		} else if (startu > starte) {
524 			lflags = LEDGE_INSIDE;
525 		}
526 
527 		/* Examine right edge of unlocker */
528 		if (startu + lenu < starte) {
529 			/* Right edge of unlocker left of established lock */
530 			rflags = REDGE_LEFT;
531 			return SPL_DISJOINT;
532 		} else if (startu + lenu == starte) {
533 			/* Right edge of unlocker on start of established lock */
534 			rflags = REDGE_LBOUNDARY;
535 			return SPL_DISJOINT;
536 		} else { /* Infinifty is right of finity */
537 			/* Right edge of unlocker inside established lock */
538 			rflags = REDGE_INSIDE;
539 		}
540 
541 		if (lflags == LEDGE_INSIDE) {
542 			*start1 = starte;
543 			*len1 = startu - starte;
544 			retval |= SPL_LOCK1;
545 		}
546 
547 		if (rflags == REDGE_INSIDE) {
548 			/* Create right lock */
549 			*start2 = startu+lenu;
550 			*len2 = 0;
551 			retval |= SPL_LOCK2;
552 		}
553 	} else if (lene != 0 && lenu == 0) {
554 		/* Unlocker is infinite */
555 		/* Examine left edge of unlocker */
556 		if (startu < starte) {
557 			lflags = LEDGE_LEFT;
558 			retval = SPL_CONTAINED;
559 			return retval;
560 		} else if (startu == starte) {
561 			lflags = LEDGE_LBOUNDARY;
562 			retval = SPL_CONTAINED;
563 			return retval;
564 		} else if ((startu > starte) && (startu < starte + lene - 1)) {
565 			lflags = LEDGE_INSIDE;
566 		} else if (startu == starte + lene - 1) {
567 			lflags = LEDGE_RBOUNDARY;
568 		} else { /* startu > starte + lene -1 */
569 			lflags = LEDGE_RIGHT;
570 			return SPL_DISJOINT;
571 		}
572 
573 		rflags = REDGE_RIGHT; /* Infinity is right of finity */
574 
575 		if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) {
576 			*start1 = starte;
577 			*len1 = startu - starte;
578 			retval |= SPL_LOCK1;
579 			return retval;
580 		}
581 
582 	} else {
583 		/* Both locks are finite */
584 
585 		/* Examine left edge of unlocker */
586 		if (startu < starte) {
587 			lflags = LEDGE_LEFT;
588 		} else if (startu == starte) {
589 			lflags = LEDGE_LBOUNDARY;
590 		} else if ((startu > starte) && (startu < starte + lene - 1)) {
591 			lflags = LEDGE_INSIDE;
592 		} else if (startu == starte + lene - 1) {
593 			lflags = LEDGE_RBOUNDARY;
594 		} else { /* startu > starte + lene -1 */
595 			lflags = LEDGE_RIGHT;
596 			return SPL_DISJOINT;
597 		}
598 
599 		/* Examine right edge of unlocker */
600 		if (startu + lenu < starte) {
601 			/* Right edge of unlocker left of established lock */
602 			rflags = REDGE_LEFT;
603 			return SPL_DISJOINT;
604 		} else if (startu + lenu == starte) {
605 			/* Right edge of unlocker on start of established lock */
606 			rflags = REDGE_LBOUNDARY;
607 			return SPL_DISJOINT;
608 		} else if (startu + lenu < starte + lene) {
609 			/* Right edge of unlocker inside established lock */
610 			rflags = REDGE_INSIDE;
611 		} else if (startu + lenu == starte + lene) {
612 			/* Right edge of unlocker on right edge of established lock */
613 			rflags = REDGE_RBOUNDARY;
614 		} else { /* startu + lenu > starte + lene */
615 			/* Right edge of unlocker is right of established lock */
616 			rflags = REDGE_RIGHT;
617 		}
618 
619 		if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) {
620 			/* Create left lock */
621 			*start1 = starte;
622 			*len1 = (startu - starte);
623 			retval |= SPL_LOCK1;
624 		}
625 
626 		if (rflags == REDGE_INSIDE) {
627 			/* Create right lock */
628 			*start2 = startu+lenu;
629 			*len2 = starte+lene-(startu+lenu);
630 			retval |= SPL_LOCK2;
631 		}
632 
633 		if ((lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) &&
634 		    (rflags == REDGE_RBOUNDARY || rflags == REDGE_RIGHT)) {
635 			retval = SPL_CONTAINED;
636 		}
637 	}
638 
639 	return retval;
640 }
641 
642 /*
643  * same_netobj: Compares the apprpriate bits of a netobj for identity
644  */
645 int
646 same_netobj(const netobj *n0, const netobj *n1)
647 {
648 	int retval;
649 
650 	retval = 0;
651 
652 	debuglog("Entering netobj identity check\n");
653 
654 	if (n0->n_len == n1->n_len) {
655 		debuglog("Preliminary length check passed\n");
656 		retval = !bcmp(n0->n_bytes, n1->n_bytes, n0->n_len);
657 		debuglog("netobj %smatch\n", retval ? "" : "mis");
658 	}
659 
660 	return (retval);
661 }
662 
663 /*
664  * same_filelock_identity: Compares the appropriate bits of a file_lock
665  */
666 int
667 same_filelock_identity(fl0, fl1)
668 	const struct file_lock *fl0, *fl1;
669 {
670 	int retval;
671 
672 	retval = 0;
673 
674 	debuglog("Checking filelock identity\n");
675 
676 	/*
677 	 * Check process ids and host information.
678 	 */
679 	retval = (fl0->client.svid == fl1->client.svid &&
680 	    same_netobj(&(fl0->client.oh), &(fl1->client.oh)));
681 
682 	debuglog("Exiting checking filelock identity: retval: %d\n",retval);
683 
684 	return (retval);
685 }
686 
687 /*
688  * Below here are routines associated with manipulating the NFS
689  * lock list.
690  */
691 
692 /*
693  * get_lock_matching_unlock: Return a lock which matches the given unlock lock
694  *                           or NULL otehrwise
695  * XXX: It is a shame that this duplicates so much code from test_nfslock.
696  */
697 struct file_lock *
698 get_lock_matching_unlock(const struct file_lock *fl)
699 {
700 	struct file_lock *ifl; /* Iterator */
701 
702 	debuglog("Entering lock_matching_unlock\n");
703 	debuglog("********Dump of fl*****************\n");
704 	dump_filelock(fl);
705 
706 	LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
707 		debuglog("Pointer to file lock: %p\n",ifl);
708 
709 		debuglog("****Dump of ifl****\n");
710 		dump_filelock(ifl);
711 		debuglog("*******************\n");
712 
713 		/*
714 		 * XXX: It is conceivable that someone could use the NLM RPC
715 		 * system to directly access filehandles.  This may be a
716 		 * security hazard as the filehandle code may bypass normal
717 		 * file access controls
718 		 */
719 		if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t)))
720 			continue;
721 
722 		debuglog("matching_unlock: Filehandles match, "
723 		    "checking regions\n");
724 
725 		/* Filehandles match, check for region overlap */
726 		if (!regions_overlap(fl->client.l_offset, fl->client.l_len,
727 			ifl->client.l_offset, ifl->client.l_len))
728 			continue;
729 
730 		debuglog("matching_unlock: Region overlap"
731 		    " found %llu : %llu -- %llu : %llu\n",
732 		    fl->client.l_offset,fl->client.l_len,
733 		    ifl->client.l_offset,ifl->client.l_len);
734 
735 		/* Regions overlap, check the identity */
736 		if (!same_filelock_identity(fl,ifl))
737 			continue;
738 
739 		debuglog("matching_unlock: Duplicate lock id.  Granting\n");
740 		return (ifl);
741 	}
742 
743 	debuglog("Exiting lock_matching_unlock\n");
744 
745 	return (NULL);
746 }
747 
748 /*
749  * test_nfslock: check for NFS lock in lock list
750  *
751  * This routine makes the following assumptions:
752  *    1) Nothing will adjust the lock list during a lookup
753  *
754  * This routine has an intersting quirk which bit me hard.
755  * The conflicting_fl is the pointer to the conflicting lock.
756  * However, to modify the "*pointer* to the conflicting lock" rather
757  * that the "conflicting lock itself" one must pass in a "pointer to
758  * the pointer of the conflicting lock".  Gross.
759  */
760 
761 enum nfslock_status
762 test_nfslock(const struct file_lock *fl, struct file_lock **conflicting_fl)
763 {
764 	struct file_lock *ifl; /* Iterator */
765 	enum nfslock_status retval;
766 
767 	debuglog("Entering test_nfslock\n");
768 
769 	retval = NFS_GRANTED;
770 	(*conflicting_fl) = NULL;
771 
772 	debuglog("Entering lock search loop\n");
773 
774 	debuglog("***********************************\n");
775 	debuglog("Dumping match filelock\n");
776 	debuglog("***********************************\n");
777 	dump_filelock(fl);
778 	debuglog("***********************************\n");
779 
780 	LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
781 		if (retval == NFS_DENIED)
782 			break;
783 
784 		debuglog("Top of lock loop\n");
785 		debuglog("Pointer to file lock: %p\n",ifl);
786 
787 		debuglog("***********************************\n");
788 		debuglog("Dumping test filelock\n");
789 		debuglog("***********************************\n");
790 		dump_filelock(ifl);
791 		debuglog("***********************************\n");
792 
793 		/*
794 		 * XXX: It is conceivable that someone could use the NLM RPC
795 		 * system to directly access filehandles.  This may be a
796 		 * security hazard as the filehandle code may bypass normal
797 		 * file access controls
798 		 */
799 		if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t)))
800 			continue;
801 
802 		debuglog("test_nfslock: filehandle match found\n");
803 
804 		/* Filehandles match, check for region overlap */
805 		if (!regions_overlap(fl->client.l_offset, fl->client.l_len,
806 			ifl->client.l_offset, ifl->client.l_len))
807 			continue;
808 
809 		debuglog("test_nfslock: Region overlap found"
810 		    " %llu : %llu -- %llu : %llu\n",
811 		    fl->client.l_offset,fl->client.l_len,
812 		    ifl->client.l_offset,ifl->client.l_len);
813 
814 		/* Regions overlap, check the exclusivity */
815 		if (!(fl->client.exclusive || ifl->client.exclusive))
816 			continue;
817 
818 		debuglog("test_nfslock: Exclusivity failure: %d %d\n",
819 		    fl->client.exclusive,
820 		    ifl->client.exclusive);
821 
822 		if (same_filelock_identity(fl,ifl)) {
823 			debuglog("test_nfslock: Duplicate id.  Granting\n");
824 			(*conflicting_fl) = ifl;
825 			retval = NFS_GRANTED_DUPLICATE;
826 		} else {
827 			/* locking attempt fails */
828 			debuglog("test_nfslock: Lock attempt failed\n");
829 			debuglog("Desired lock\n");
830 			dump_filelock(fl);
831 			debuglog("Conflicting lock\n");
832 			dump_filelock(ifl);
833 			(*conflicting_fl) = ifl;
834 			retval = NFS_DENIED;
835 		}
836 	}
837 
838 	debuglog("Dumping file locks\n");
839 	debuglog("Exiting test_nfslock\n");
840 
841 	return (retval);
842 }
843 
844 /*
845  * lock_nfslock: attempt to create a lock in the NFS lock list
846  *
847  * This routine tests whether the lock will be granted and then adds
848  * the entry to the lock list if so.
849  *
850  * Argument fl gets modified as its list housekeeping entries get modified
851  * upon insertion into the NFS lock list
852  *
853  * This routine makes several assumptions:
854  *    1) It is perfectly happy to grant a duplicate lock from the same pid.
855  *       While this seems to be intuitively wrong, it is required for proper
856  *       Posix semantics during unlock.  It is absolutely imperative to not
857  *       unlock the main lock before the two child locks are established. Thus,
858  *       one has be be able to create duplicate locks over an existing lock
859  *    2) It currently accepts duplicate locks from the same id,pid
860  */
861 
862 enum nfslock_status
863 lock_nfslock(struct file_lock *fl)
864 {
865 	enum nfslock_status retval;
866 	struct file_lock *dummy_fl;
867 
868 	dummy_fl = NULL;
869 
870 	debuglog("Entering lock_nfslock...\n");
871 
872 	retval = test_nfslock(fl,&dummy_fl);
873 
874 	if (retval == NFS_GRANTED || retval == NFS_GRANTED_DUPLICATE) {
875 		debuglog("Inserting lock...\n");
876 		dump_filelock(fl);
877 		LIST_INSERT_HEAD(&nfslocklist_head, fl, nfslocklist);
878 	}
879 
880 	debuglog("Exiting lock_nfslock...\n");
881 
882 	return (retval);
883 }
884 
885 /*
886  * delete_nfslock: delete an NFS lock list entry
887  *
888  * This routine is used to delete a lock out of the NFS lock list
889  * without regard to status, underlying locks, regions or anything else
890  *
891  * Note that this routine *does not deallocate memory* of the lock.
892  * It just disconnects it from the list.  The lock can then be used
893  * by other routines without fear of trashing the list.
894  */
895 
896 enum nfslock_status
897 delete_nfslock(struct file_lock *fl)
898 {
899 
900 	LIST_REMOVE(fl, nfslocklist);
901 
902 	return (NFS_GRANTED);
903 }
904 
905 enum split_status
906 split_nfslock(exist_lock, unlock_lock, left_lock, right_lock)
907 	const struct file_lock *exist_lock, *unlock_lock;
908 	struct file_lock **left_lock, **right_lock;
909 {
910 	u_int64_t start1, len1, start2, len2;
911 	enum split_status spstatus;
912 
913 	spstatus = region_compare(exist_lock->client.l_offset, exist_lock->client.l_len,
914 	    unlock_lock->client.l_offset, unlock_lock->client.l_len,
915 	    &start1, &len1, &start2, &len2);
916 
917 	if ((spstatus & SPL_LOCK1) != 0) {
918 		*left_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie);
919 		if (*left_lock == NULL) {
920 			debuglog("Unable to allocate resource for split 1\n");
921 			return SPL_RESERR;
922 		}
923 
924 		fill_file_lock(*left_lock, &exist_lock->filehandle,
925 		    exist_lock->addr,
926 		    exist_lock->client.exclusive, exist_lock->client.svid,
927 		    start1, len1,
928 		    exist_lock->client_name, exist_lock->nsm_status,
929 		    exist_lock->status, exist_lock->flags, exist_lock->blocking);
930 	}
931 
932 	if ((spstatus & SPL_LOCK2) != 0) {
933 		*right_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie);
934 		if (*right_lock == NULL) {
935 			debuglog("Unable to allocate resource for split 1\n");
936 			if (*left_lock != NULL) {
937 				deallocate_file_lock(*left_lock);
938 			}
939 			return SPL_RESERR;
940 		}
941 
942 		fill_file_lock(*right_lock, &exist_lock->filehandle,
943 		    exist_lock->addr,
944 		    exist_lock->client.exclusive, exist_lock->client.svid,
945 		    start2, len2,
946 		    exist_lock->client_name, exist_lock->nsm_status,
947 		    exist_lock->status, exist_lock->flags, exist_lock->blocking);
948 	}
949 
950 	return spstatus;
951 }
952 
953 enum nfslock_status
954 unlock_nfslock(fl, released_lock, left_lock, right_lock)
955 	const struct file_lock *fl;
956 	struct file_lock **released_lock;
957 	struct file_lock **left_lock;
958 	struct file_lock **right_lock;
959 {
960 	struct file_lock *mfl; /* Matching file lock */
961 	enum nfslock_status retval;
962 	enum split_status spstatus;
963 
964 	debuglog("Entering unlock_nfslock\n");
965 
966 	*released_lock = NULL;
967 	*left_lock = NULL;
968 	*right_lock = NULL;
969 
970 	retval = NFS_DENIED_NOLOCK;
971 
972 	printf("Attempting to match lock...\n");
973 	mfl = get_lock_matching_unlock(fl);
974 
975 	if (mfl != NULL) {
976 		debuglog("Unlock matched.  Querying for split\n");
977 
978 		spstatus = split_nfslock(mfl, fl, left_lock, right_lock);
979 
980 		debuglog("Split returned %d %p %p %p %p\n",spstatus,mfl,fl,*left_lock,*right_lock);
981 		debuglog("********Split dumps********");
982 		dump_filelock(mfl);
983 		dump_filelock(fl);
984 		dump_filelock(*left_lock);
985 		dump_filelock(*right_lock);
986 		debuglog("********End Split dumps********");
987 
988 		if (spstatus == SPL_RESERR) {
989 			if (*left_lock != NULL) {
990 				deallocate_file_lock(*left_lock);
991 				*left_lock = NULL;
992 			}
993 
994 			if (*right_lock != NULL) {
995 				deallocate_file_lock(*right_lock);
996 				*right_lock = NULL;
997 			}
998 
999 			return NFS_RESERR;
1000 		}
1001 
1002 		/* Insert new locks from split if required */
1003 		if (*left_lock != NULL) {
1004 			debuglog("Split left activated\n");
1005 			LIST_INSERT_HEAD(&nfslocklist_head, *left_lock, nfslocklist);
1006 		}
1007 
1008 		if (*right_lock != NULL) {
1009 			debuglog("Split right activated\n");
1010 			LIST_INSERT_HEAD(&nfslocklist_head, *right_lock, nfslocklist);
1011 		}
1012 
1013 		/* Unlock the lock since it matches identity */
1014 		LIST_REMOVE(mfl, nfslocklist);
1015 		*released_lock = mfl;
1016 		retval = NFS_GRANTED;
1017 	}
1018 
1019 	debuglog("Exiting unlock_nfslock\n");
1020 
1021 	return retval;
1022 }
1023 
1024 /*
1025  * Below here are the routines for manipulating the file lock directly
1026  * on the disk hardware itself
1027  */
1028 enum hwlock_status
1029 lock_hwlock(struct file_lock *fl)
1030 {
1031 	struct monfile *imf,*nmf;
1032 	int lflags, flerror;
1033 
1034 	/* Scan to see if filehandle already present */
1035 	LIST_FOREACH(imf, &monfilelist_head, monfilelist) {
1036 		if (bcmp(&fl->filehandle, &imf->filehandle,
1037 			sizeof(fl->filehandle)) == 0) {
1038 			/* imf is the correct filehandle */
1039 			break;
1040 		}
1041 	}
1042 
1043 	/*
1044 	 * Filehandle already exists (we control the file)
1045 	 * *AND* NFS has already cleared the lock for availability
1046 	 * Grant it and bump the refcount.
1047 	 */
1048 	if (imf != NULL) {
1049 		++(imf->refcount);
1050 		return (HW_GRANTED);
1051 	}
1052 
1053 	/* No filehandle found, create and go */
1054 	nmf = malloc(sizeof(struct monfile));
1055 	if (nmf == NULL) {
1056 		debuglog("hwlock resource allocation failure\n");
1057 		return (HW_RESERR);
1058 	}
1059 
1060 	/* XXX: Is O_RDWR always the correct mode? */
1061 	nmf->fd = fhopen(&fl->filehandle, O_RDWR);
1062 	if (nmf->fd < 0) {
1063 		debuglog("fhopen failed (from %16s): %32s\n",
1064 		    fl->client_name, strerror(errno));
1065 		free(nmf);
1066 		switch (errno) {
1067 		case ESTALE:
1068 			return (HW_STALEFH);
1069 		case EROFS:
1070 			return (HW_READONLY);
1071 		default:
1072 			return (HW_RESERR);
1073 		}
1074 	}
1075 
1076 	/* File opened correctly, fill the monitor struct */
1077 	bcopy(&fl->filehandle, &nmf->filehandle, sizeof(fl->filehandle));
1078 	nmf->refcount = 1;
1079 	nmf->exclusive = fl->client.exclusive;
1080 
1081 	lflags = (nmf->exclusive == 1) ?
1082 	    (LOCK_EX | LOCK_NB) : (LOCK_SH | LOCK_NB);
1083 
1084 	flerror = flock(nmf->fd, lflags);
1085 
1086 	if (flerror != 0) {
1087 		debuglog("flock failed (from %16s): %32s\n",
1088 		    fl->client_name, strerror(errno));
1089 		close(nmf->fd);
1090 		free(nmf);
1091 		switch (errno) {
1092 		case EAGAIN:
1093 			return (HW_DENIED);
1094 		case ESTALE:
1095 			return (HW_STALEFH);
1096 		case EROFS:
1097 			return (HW_READONLY);
1098 		default:
1099 			return (HW_RESERR);
1100 			break;
1101 		}
1102 	}
1103 
1104 	/* File opened and locked */
1105 	LIST_INSERT_HEAD(&monfilelist_head, nmf, monfilelist);
1106 
1107 	debuglog("flock succeeded (from %16s)\n", fl->client_name);
1108 	return (HW_GRANTED);
1109 }
1110 
1111 enum hwlock_status
1112 unlock_hwlock(const struct file_lock *fl)
1113 {
1114 	struct monfile *imf;
1115 
1116 	debuglog("Entering unlock_hwlock\n");
1117 	debuglog("Entering loop interation\n");
1118 
1119 	/* Scan to see if filehandle already present */
1120 	LIST_FOREACH(imf, &monfilelist_head, monfilelist) {
1121 		if (bcmp(&fl->filehandle, &imf->filehandle,
1122 			sizeof(fl->filehandle)) == 0) {
1123 			/* imf is the correct filehandle */
1124 			break;
1125 		}
1126 	}
1127 
1128 	debuglog("Completed iteration.  Proceeding\n");
1129 
1130 	if (imf == NULL) {
1131 		/* No lock found */
1132 		debuglog("Exiting unlock_hwlock (HW_DENIED_NOLOCK)\n");
1133 		return (HW_DENIED_NOLOCK);
1134 	}
1135 
1136 	/* Lock found */
1137 	--imf->refcount;
1138 
1139 	if (imf->refcount < 0) {
1140 		debuglog("Negative hardware reference count\n");
1141 	}
1142 
1143 	if (imf->refcount <= 0) {
1144 		close(imf->fd);
1145 		LIST_REMOVE(imf, monfilelist);
1146 		free(imf);
1147 	}
1148 	debuglog("Exiting unlock_hwlock (HW_GRANTED)\n");
1149 	return (HW_GRANTED);
1150 }
1151 
1152 enum hwlock_status
1153 test_hwlock(const struct file_lock *fl, struct file_lock **conflicting_fl)
1154 {
1155 
1156 	/*
1157 	 * XXX: lock tests on hardware are not required until
1158 	 * true partial file testing is done on the underlying file
1159 	 */
1160 	return (HW_RESERR);
1161 }
1162 
1163 
1164 
1165 /*
1166  * Below here are routines for manipulating blocked lock requests
1167  * They should only be called from the XXX_partialfilelock routines
1168  * if at all possible
1169  */
1170 
1171 void
1172 add_blockingfilelock(struct file_lock *fl)
1173 {
1174 
1175 	debuglog("Entering add_blockingfilelock\n");
1176 
1177 	/*
1178 	 * Clear the blocking flag so that it can be reused without
1179 	 * adding it to the blocking queue a second time
1180 	 */
1181 
1182 	fl->blocking = 0;
1183 	LIST_INSERT_HEAD(&blockedlocklist_head, fl, nfslocklist);
1184 
1185 	debuglog("Exiting add_blockingfilelock\n");
1186 }
1187 
1188 void
1189 remove_blockingfilelock(struct file_lock *fl)
1190 {
1191 
1192 	debuglog("Entering remove_blockingfilelock\n");
1193 
1194 	LIST_REMOVE(fl, nfslocklist);
1195 
1196 	debuglog("Exiting remove_blockingfilelock\n");
1197 }
1198 
1199 void
1200 clear_blockingfilelock(const char *hostname)
1201 {
1202 	struct file_lock *ifl,*nfl;
1203 
1204 	/*
1205 	 * Normally, LIST_FOREACH is called for, but since
1206 	 * the current element *is* the iterator, deleting it
1207 	 * would mess up the iteration.  Thus, a next element
1208 	 * must be used explicitly
1209 	 */
1210 
1211 	ifl = LIST_FIRST(&blockedlocklist_head);
1212 
1213 	while (ifl != NULL) {
1214 		nfl = LIST_NEXT(ifl, nfslocklist);
1215 
1216 		if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) {
1217 			remove_blockingfilelock(ifl);
1218 			deallocate_file_lock(ifl);
1219 		}
1220 
1221 		ifl = nfl;
1222 	}
1223 }
1224 
1225 void
1226 retry_blockingfilelocklist(void)
1227 {
1228 	/* Retry all locks in the blocked list */
1229 	struct file_lock *ifl, *nfl; /* Iterator */
1230 	enum partialfilelock_status pflstatus;
1231 
1232 	debuglog("Entering retry_blockingfilelocklist\n");
1233 
1234 	ifl = LIST_FIRST(&blockedlocklist_head);
1235 	debuglog("Iterator choice %p\n",ifl);
1236 
1237 	while (ifl != NULL) {
1238 		/*
1239 		 * SUBTLE BUG: The next element must be worked out before the
1240 		 * current element has been moved
1241 		 */
1242 		nfl = LIST_NEXT(ifl, nfslocklist);
1243 		debuglog("Iterator choice %p\n",ifl);
1244 		debuglog("Next iterator choice %p\n",nfl);
1245 
1246 		/*
1247 		 * SUBTLE BUG: The file_lock must be removed from the
1248 		 * old list so that it's list pointers get disconnected
1249 		 * before being allowed to participate in the new list
1250 		 * which will automatically add it in if necessary.
1251 		 */
1252 
1253 		LIST_REMOVE(ifl, nfslocklist);
1254 		pflstatus = lock_partialfilelock(ifl);
1255 
1256 		if (pflstatus == PFL_GRANTED || pflstatus == PFL_GRANTED_DUPLICATE) {
1257 			debuglog("Granted blocked lock\n");
1258 			/* lock granted and is now being used */
1259 			send_granted(ifl,0);
1260 		} else {
1261 			/* Reinsert lock back into same place in blocked list */
1262 			debuglog("Replacing blocked lock\n");
1263 			LIST_INSERT_BEFORE(nfl, ifl, nfslocklist);
1264 		}
1265 
1266 		/* Valid increment behavior regardless of state of ifl */
1267 		ifl = nfl;
1268 	}
1269 
1270 	debuglog("Exiting retry_blockingfilelocklist\n");
1271 }
1272 
1273 /*
1274  * Below here are routines associated with manipulating all
1275  * aspects of the partial file locking system (list, hardware, etc.)
1276  */
1277 
1278 /*
1279  * Please note that lock monitoring must be done at this level which
1280  * keeps track of *individual* lock requests on lock and unlock
1281  *
1282  * XXX: Split unlocking is going to make the unlock code miserable
1283  */
1284 
1285 /*
1286  * lock_partialfilelock:
1287  *
1288  * Argument fl gets modified as its list housekeeping entries get modified
1289  * upon insertion into the NFS lock list
1290  *
1291  * This routine makes several assumptions:
1292  * 1) It (will) pass locks through to flock to lock the entire underlying file
1293  *     and then parcel out NFS locks if it gets control of the file.
1294  *         This matches the old rpc.lockd file semantics (except where it
1295  *         is now more correct).  It is the safe solution, but will cause
1296  *         overly restrictive blocking if someone is trying to use the
1297  *         underlying files without using NFS.  This appears to be an
1298  *         acceptable tradeoff since most people use standalone NFS servers.
1299  * XXX: The right solution is probably kevent combined with fcntl
1300  *
1301  *    2) Nothing modifies the lock lists between testing and granting
1302  *           I have no idea whether this is a useful assumption or not
1303  */
1304 
1305 enum partialfilelock_status
1306 lock_partialfilelock(struct file_lock *fl)
1307 {
1308 	enum partialfilelock_status retval;
1309 	enum nfslock_status lnlstatus;
1310 	enum hwlock_status hwstatus;
1311 
1312 	debuglog("Entering lock_partialfilelock\n");
1313 
1314 	retval = PFL_DENIED;
1315 
1316 	/*
1317 	 * Execute the NFS lock first, if possible, as it is significantly
1318 	 * easier and less expensive to undo than the filesystem lock
1319 	 */
1320 
1321 	lnlstatus = lock_nfslock(fl);
1322 
1323 	switch (lnlstatus) {
1324 	case NFS_GRANTED:
1325 	case NFS_GRANTED_DUPLICATE:
1326 		/*
1327 		 * At this point, the NFS lock is allocated and active.
1328 		 * Remember to clean it up if the hardware lock fails
1329 		 */
1330 		hwstatus = lock_hwlock(fl);
1331 
1332 		switch (hwstatus) {
1333 		case HW_GRANTED:
1334 		case HW_GRANTED_DUPLICATE:
1335 			debuglog("HW GRANTED\n");
1336 			/*
1337 			 * XXX: Fixme: Check hwstatus for duplicate when
1338 			 * true partial file locking and accounting is
1339 			 * done on the hardware
1340 			 */
1341 			if (lnlstatus == NFS_GRANTED_DUPLICATE) {
1342 				retval = PFL_GRANTED_DUPLICATE;
1343 			} else {
1344 				retval = PFL_GRANTED;
1345 			}
1346 			monitor_lock_host(fl->client_name);
1347 			break;
1348 		case HW_RESERR:
1349 			debuglog("HW RESERR\n");
1350 			retval = PFL_HWRESERR;
1351 			break;
1352 		case HW_DENIED:
1353 			debuglog("HW DENIED\n");
1354 			retval = PFL_HWDENIED;
1355 			break;
1356 		default:
1357 			debuglog("Unmatched hwstatus %d\n",hwstatus);
1358 			break;
1359 		}
1360 
1361 		if (retval != PFL_GRANTED &&
1362 		    retval != PFL_GRANTED_DUPLICATE) {
1363 			/* Clean up the NFS lock */
1364 			debuglog("Deleting trial NFS lock\n");
1365 			delete_nfslock(fl);
1366 		}
1367 		break;
1368 	case NFS_DENIED:
1369 		retval = PFL_NFSDENIED;
1370 		break;
1371 	case NFS_RESERR:
1372 		retval = PFL_NFSRESERR;
1373 	default:
1374 		debuglog("Unmatched lnlstatus %d\n");
1375 		retval = PFL_NFSDENIED_NOLOCK;
1376 		break;
1377 	}
1378 
1379 	/*
1380 	 * By the time fl reaches here, it is completely free again on
1381 	 * failure.  The NFS lock done before attempting the
1382 	 * hardware lock has been backed out
1383 	 */
1384 
1385 	if (retval == PFL_NFSDENIED || retval == PFL_HWDENIED) {
1386 		/* Once last chance to check the lock */
1387 		if (fl->blocking == 1) {
1388 			/* Queue the lock */
1389 			debuglog("BLOCKING LOCK RECEIVED\n");
1390 			retval = (retval == PFL_NFSDENIED ?
1391 			    PFL_NFSBLOCKED : PFL_HWBLOCKED);
1392 			add_blockingfilelock(fl);
1393 			dump_filelock(fl);
1394 		} else {
1395 			/* Leave retval alone, it's already correct */
1396 			debuglog("Lock denied.  Non-blocking failure\n");
1397 			dump_filelock(fl);
1398 		}
1399 	}
1400 
1401 	debuglog("Exiting lock_partialfilelock\n");
1402 
1403 	return retval;
1404 }
1405 
1406 /*
1407  * unlock_partialfilelock:
1408  *
1409  * Given a file_lock, unlock all locks which match.
1410  *
1411  * Note that a given lock might have to unlock ITSELF!  See
1412  * clear_partialfilelock for example.
1413  */
1414 
1415 enum partialfilelock_status
1416 unlock_partialfilelock(const struct file_lock *fl)
1417 {
1418 	struct file_lock *lfl,*rfl,*releasedfl,*selffl;
1419 	enum partialfilelock_status retval;
1420 	enum nfslock_status unlstatus;
1421 	enum hwlock_status unlhwstatus, lhwstatus;
1422 
1423 	debuglog("Entering unlock_partialfilelock\n");
1424 
1425 	selffl = NULL;
1426 	lfl = NULL;
1427 	rfl = NULL;
1428 	releasedfl = NULL;
1429 	retval = PFL_DENIED;
1430 
1431 	/*
1432 	 * There are significant overlap and atomicity issues
1433 	 * with partially releasing a lock.  For example, releasing
1434 	 * part of an NFS shared lock does *not* always release the
1435 	 * corresponding part of the file since there is only one
1436 	 * rpc.lockd UID but multiple users could be requesting it
1437 	 * from NFS.  Also, an unlock request should never allow
1438 	 * another process to gain a lock on the remaining parts.
1439 	 * ie. Always apply the new locks before releasing the
1440 	 * old one
1441 	 */
1442 
1443 	/*
1444 	 * Loop is required since multiple little locks
1445 	 * can be allocated and then deallocated with one
1446 	 * big unlock.
1447 	 *
1448 	 * The loop is required to be here so that the nfs &
1449 	 * hw subsystems do not need to communicate with one
1450 	 * one another
1451 	 */
1452 
1453 	do {
1454 		debuglog("Value of releasedfl: %p\n",releasedfl);
1455 		/* lfl&rfl are created *AND* placed into the NFS lock list if required */
1456 		unlstatus = unlock_nfslock(fl, &releasedfl, &lfl, &rfl);
1457 		debuglog("Value of releasedfl: %p\n",releasedfl);
1458 
1459 
1460 		/* XXX: This is grungy.  It should be refactored to be cleaner */
1461 		if (lfl != NULL) {
1462 			lhwstatus = lock_hwlock(lfl);
1463 			if (lhwstatus != HW_GRANTED &&
1464 			    lhwstatus != HW_GRANTED_DUPLICATE) {
1465 				debuglog("HW duplicate lock failure for left split\n");
1466 			}
1467 			monitor_lock_host(lfl->client_name);
1468 		}
1469 
1470 		if (rfl != NULL) {
1471 			lhwstatus = lock_hwlock(rfl);
1472 			if (lhwstatus != HW_GRANTED &&
1473 			    lhwstatus != HW_GRANTED_DUPLICATE) {
1474 				debuglog("HW duplicate lock failure for right split\n");
1475 			}
1476 			monitor_lock_host(rfl->client_name);
1477 		}
1478 
1479 		switch (unlstatus) {
1480 		case NFS_GRANTED:
1481 			/* Attempt to unlock on the hardware */
1482 			debuglog("NFS unlock granted.  Attempting hardware unlock\n");
1483 
1484 			/* This call *MUST NOT* unlock the two newly allocated locks */
1485 			unlhwstatus = unlock_hwlock(fl);
1486 			debuglog("HW unlock returned with code %d\n",unlhwstatus);
1487 
1488 			switch (unlhwstatus) {
1489 			case HW_GRANTED:
1490 				debuglog("HW unlock granted\n");
1491 				unmonitor_lock_host(releasedfl->client_name);
1492 				retval = PFL_GRANTED;
1493 				break;
1494 			case HW_DENIED_NOLOCK:
1495 				/* Huh?!?!  This shouldn't happen */
1496 				debuglog("HW unlock denied no lock\n");
1497 				retval = PFL_HWRESERR;
1498 				/* Break out of do-while */
1499 				unlstatus = NFS_RESERR;
1500 				break;
1501 			default:
1502 				debuglog("HW unlock failed\n");
1503 				retval = PFL_HWRESERR;
1504 				/* Break out of do-while */
1505 				unlstatus = NFS_RESERR;
1506 				break;
1507 			}
1508 
1509 			debuglog("Exiting with status retval: %d\n",retval);
1510 
1511 			retry_blockingfilelocklist();
1512 			break;
1513 		case NFS_DENIED_NOLOCK:
1514 			retval = PFL_GRANTED;
1515 			debuglog("All locks cleaned out\n");
1516 			break;
1517 		default:
1518 			retval = PFL_NFSRESERR;
1519 			debuglog("NFS unlock failure\n");
1520 			dump_filelock(fl);
1521 			break;
1522 		}
1523 
1524 		if (releasedfl != NULL) {
1525 			if (fl == releasedfl) {
1526 				/*
1527 				 * XXX: YECHHH!!! Attempt to unlock self succeeded
1528 				 * but we can't deallocate the space yet.  This is what
1529 				 * happens when you don't write malloc and free together
1530 				 */
1531 				debuglog("Attempt to unlock self\n");
1532 				selffl = releasedfl;
1533 			} else {
1534 				/*
1535 				 * XXX: this deallocation *still* needs to migrate closer
1536 				 * to the allocation code way up in get_lock or the allocation
1537 				 * code needs to migrate down (violation of "When you write
1538 				 * malloc you must write free")
1539 				 */
1540 
1541 				deallocate_file_lock(releasedfl);
1542 			}
1543 		}
1544 
1545 	} while (unlstatus == NFS_GRANTED);
1546 
1547 	if (selffl != NULL) {
1548 		/*
1549 		 * This statement wipes out the incoming file lock (fl)
1550 		 * in spite of the fact that it is declared const
1551 		 */
1552 		debuglog("WARNING!  Destroying incoming lock pointer\n");
1553 		deallocate_file_lock(selffl);
1554 	}
1555 
1556 	debuglog("Exiting unlock_partialfilelock\n");
1557 
1558 	return retval;
1559 }
1560 
1561 /*
1562  * clear_partialfilelock
1563  *
1564  * Normally called in response to statd state number change.
1565  * Wipe out all locks held by a host.  As a bonus, the act of
1566  * doing so should automatically clear their statd entries and
1567  * unmonitor the host.
1568  */
1569 
1570 void
1571 clear_partialfilelock(const char *hostname)
1572 {
1573 	struct file_lock *ifl, *nfl;
1574 
1575 	/* Clear blocking file lock list */
1576 	clear_blockingfilelock(hostname);
1577 
1578 	/* do all required unlocks */
1579 	/* Note that unlock can smash the current pointer to a lock */
1580 
1581 	/*
1582 	 * Normally, LIST_FOREACH is called for, but since
1583 	 * the current element *is* the iterator, deleting it
1584 	 * would mess up the iteration.  Thus, a next element
1585 	 * must be used explicitly
1586 	 */
1587 
1588 	ifl = LIST_FIRST(&nfslocklist_head);
1589 
1590 	while (ifl != NULL) {
1591 		nfl = LIST_NEXT(ifl, nfslocklist);
1592 
1593 		if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) {
1594 			/* Unlock destroys ifl out from underneath */
1595 			unlock_partialfilelock(ifl);
1596 			/* ifl is NO LONGER VALID AT THIS POINT */
1597 		}
1598 		ifl = nfl;
1599 	}
1600 }
1601 
1602 /*
1603  * test_partialfilelock:
1604  */
1605 enum partialfilelock_status
1606 test_partialfilelock(const struct file_lock *fl,
1607     struct file_lock **conflicting_fl)
1608 {
1609 	enum partialfilelock_status retval;
1610 	enum nfslock_status teststatus;
1611 
1612 	debuglog("Entering testpartialfilelock...\n");
1613 
1614 	retval = PFL_DENIED;
1615 
1616 	teststatus = test_nfslock(fl, conflicting_fl);
1617 	debuglog("test_partialfilelock: teststatus %d\n",teststatus);
1618 
1619 	if (teststatus == NFS_GRANTED || teststatus == NFS_GRANTED_DUPLICATE) {
1620 		/* XXX: Add the underlying filesystem locking code */
1621 		retval = (teststatus == NFS_GRANTED) ?
1622 		    PFL_GRANTED : PFL_GRANTED_DUPLICATE;
1623 		debuglog("Dumping locks...\n");
1624 		dump_filelock(fl);
1625 		dump_filelock(*conflicting_fl);
1626 		debuglog("Done dumping locks...\n");
1627 	} else {
1628 		retval = PFL_NFSDENIED;
1629 		debuglog("NFS test denied.\n");
1630 		dump_filelock(fl);
1631 		debuglog("Conflicting.\n");
1632 		dump_filelock(*conflicting_fl);
1633 	}
1634 
1635 	debuglog("Exiting testpartialfilelock...\n");
1636 
1637 	return retval;
1638 }
1639 
1640 /*
1641  * Below here are routines associated with translating the partial file locking
1642  * codes into useful codes to send back to the NFS RPC messaging system
1643  */
1644 
1645 /*
1646  * These routines translate the (relatively) useful return codes back onto
1647  * the few return codes which the nlm subsystems wishes to trasmit
1648  */
1649 
1650 enum nlm_stats
1651 do_test(struct file_lock *fl, struct file_lock **conflicting_fl)
1652 {
1653 	enum partialfilelock_status pfsret;
1654 	enum nlm_stats retval;
1655 
1656 	debuglog("Entering do_test...\n");
1657 
1658 	pfsret = test_partialfilelock(fl,conflicting_fl);
1659 
1660 	switch (pfsret) {
1661 	case PFL_GRANTED:
1662 		debuglog("PFL test lock granted\n");
1663 		dump_filelock(fl);
1664 		dump_filelock(*conflicting_fl);
1665 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1666 		break;
1667 	case PFL_GRANTED_DUPLICATE:
1668 		debuglog("PFL test lock granted--duplicate id detected\n");
1669 		dump_filelock(fl);
1670 		dump_filelock(*conflicting_fl);
1671 		debuglog("Clearing conflicting_fl for call semantics\n");
1672 		*conflicting_fl = NULL;
1673 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1674 		break;
1675 	case PFL_NFSDENIED:
1676 	case PFL_HWDENIED:
1677 		debuglog("PFL test lock denied\n");
1678 		dump_filelock(fl);
1679 		dump_filelock(*conflicting_fl);
1680 		retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1681 		break;
1682 	case PFL_NFSRESERR:
1683 	case PFL_HWRESERR:
1684 		debuglog("PFL test lock resource fail\n");
1685 		dump_filelock(fl);
1686 		dump_filelock(*conflicting_fl);
1687 		retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1688 		break;
1689 	default:
1690 		debuglog("PFL test lock *FAILED*\n");
1691 		dump_filelock(fl);
1692 		dump_filelock(*conflicting_fl);
1693 		retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1694 		break;
1695 	}
1696 
1697 	debuglog("Exiting do_test...\n");
1698 
1699 	return retval;
1700 }
1701 
1702 /*
1703  * do_lock: Try to acquire a lock
1704  *
1705  * This routine makes a distinction between NLM versions.  I am pretty
1706  * convinced that this should be abstracted out and bounced up a level
1707  */
1708 
1709 enum nlm_stats
1710 do_lock(struct file_lock *fl)
1711 {
1712 	enum partialfilelock_status pfsret;
1713 	enum nlm_stats retval;
1714 
1715 	debuglog("Entering do_lock...\n");
1716 
1717 	pfsret = lock_partialfilelock(fl);
1718 
1719 	switch (pfsret) {
1720 	case PFL_GRANTED:
1721 		debuglog("PFL lock granted");
1722 		dump_filelock(fl);
1723 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1724 		break;
1725 	case PFL_GRANTED_DUPLICATE:
1726 		debuglog("PFL lock granted--duplicate id detected");
1727 		dump_filelock(fl);
1728 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1729 		break;
1730 	case PFL_NFSDENIED:
1731 	case PFL_HWDENIED:
1732 		debuglog("PFL_NFS lock denied");
1733 		dump_filelock(fl);
1734 		retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1735 		break;
1736 	case PFL_NFSBLOCKED:
1737 	case PFL_HWBLOCKED:
1738 		debuglog("PFL_NFS blocking lock denied.  Queued.\n");
1739 		dump_filelock(fl);
1740 		retval = (fl->flags & LOCK_V4) ? nlm4_blocked : nlm_blocked;
1741 		break;
1742 	case PFL_NFSRESERR:
1743 	case PFL_HWRESERR:
1744 		debuglog("PFL lock resource alocation fail\n");
1745 		dump_filelock(fl);
1746 		retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1747 		break;
1748 	default:
1749 		debuglog("PFL lock *FAILED*");
1750 		dump_filelock(fl);
1751 		retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1752 		break;
1753 	}
1754 
1755 	debuglog("Exiting do_lock...\n");
1756 
1757 	return retval;
1758 }
1759 
1760 enum nlm_stats
1761 do_unlock(struct file_lock *fl)
1762 {
1763 	enum partialfilelock_status pfsret;
1764 	enum nlm_stats retval;
1765 
1766 	debuglog("Entering do_unlock...\n");
1767 	pfsret = unlock_partialfilelock(fl);
1768 
1769 	switch (pfsret) {
1770 	case PFL_GRANTED:
1771 		debuglog("PFL unlock granted");
1772 		dump_filelock(fl);
1773 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1774 		break;
1775 	case PFL_NFSDENIED:
1776 	case PFL_HWDENIED:
1777 		debuglog("PFL_NFS unlock denied");
1778 		dump_filelock(fl);
1779 		retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1780 		break;
1781 	case PFL_NFSDENIED_NOLOCK:
1782 	case PFL_HWDENIED_NOLOCK:
1783 		debuglog("PFL_NFS no lock found\n");
1784 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1785 		break;
1786 	case PFL_NFSRESERR:
1787 	case PFL_HWRESERR:
1788 		debuglog("PFL unlock resource failure");
1789 		dump_filelock(fl);
1790 		retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1791 		break;
1792 	default:
1793 		debuglog("PFL unlock *FAILED*");
1794 		dump_filelock(fl);
1795 		retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1796 		break;
1797 	}
1798 
1799 	debuglog("Exiting do_unlock...\n");
1800 
1801 	return retval;
1802 }
1803 
1804 /*
1805  * do_clear
1806  *
1807  * This routine is non-existent because it doesn't have a return code.
1808  * It is here for completeness in case someone *does* need to do return
1809  * codes later.  A decent compiler should optimize this away.
1810  */
1811 
1812 void
1813 do_clear(const char *hostname)
1814 {
1815 
1816 	clear_partialfilelock(hostname);
1817 }
1818 
1819 /*
1820  * The following routines are all called from the code which the
1821  * RPC layer invokes
1822  */
1823 
1824 /*
1825  * testlock(): inform the caller if the requested lock would be granted
1826  *
1827  * returns NULL if lock would granted
1828  * returns pointer to a conflicting nlm4_holder if not
1829  */
1830 
1831 struct nlm4_holder *
1832 testlock(struct nlm4_lock *lock, bool_t exclusive, int flags)
1833 {
1834 	struct file_lock test_fl, *conflicting_fl;
1835 
1836 	bzero(&test_fl, sizeof(test_fl));
1837 
1838 	bcopy(lock->fh.n_bytes, &(test_fl.filehandle), sizeof(fhandle_t));
1839 	copy_nlm4_lock_to_nlm4_holder(lock, exclusive, &test_fl.client);
1840 
1841 	siglock();
1842 	do_test(&test_fl, &conflicting_fl);
1843 
1844 	if (conflicting_fl == NULL) {
1845 		debuglog("No conflicting lock found\n");
1846 		sigunlock();
1847 		return NULL;
1848 	} else {
1849 		debuglog("Found conflicting lock\n");
1850 		dump_filelock(conflicting_fl);
1851 		sigunlock();
1852 		return (&conflicting_fl->client);
1853 	}
1854 }
1855 
1856 /*
1857  * getlock: try to aquire the lock.
1858  * If file is already locked and we can sleep, put the lock in the list with
1859  * status LKST_WAITING; it'll be processed later.
1860  * Otherwise try to lock. If we're allowed to block, fork a child which
1861  * will do the blocking lock.
1862  */
1863 
1864 enum nlm_stats
1865 getlock(nlm4_lockargs *lckarg, struct svc_req *rqstp, const int flags)
1866 {
1867 	struct file_lock *newfl;
1868 	enum nlm_stats retval;
1869 
1870 	debuglog("Entering getlock...\n");
1871 
1872 	if (grace_expired == 0 && lckarg->reclaim == 0)
1873 		return (flags & LOCK_V4) ?
1874 		    nlm4_denied_grace_period : nlm_denied_grace_period;
1875 
1876 	/* allocate new file_lock for this request */
1877 	newfl = allocate_file_lock(&lckarg->alock.oh, &lckarg->cookie);
1878 	if (newfl == NULL) {
1879 		syslog(LOG_NOTICE, "lock allocate failed: %s", strerror(errno));
1880 		/* failed */
1881 		return (flags & LOCK_V4) ?
1882 		    nlm4_denied_nolocks : nlm_denied_nolocks;
1883 	}
1884 
1885 	if (lckarg->alock.fh.n_len != sizeof(fhandle_t)) {
1886 		debuglog("recieved fhandle size %d, local size %d",
1887 		    lckarg->alock.fh.n_len, (int)sizeof(fhandle_t));
1888 	}
1889 
1890 	fill_file_lock(newfl, (fhandle_t *)lckarg->alock.fh.n_bytes,
1891 	    (struct sockaddr *)svc_getrpccaller(rqstp->rq_xprt)->buf,
1892 	    lckarg->exclusive, lckarg->alock.svid, lckarg->alock.l_offset,
1893 	    lckarg->alock.l_len,
1894 	    lckarg->alock.caller_name, lckarg->state, 0, flags, lckarg->block);
1895 
1896 	/*
1897 	 * newfl is now fully constructed and deallocate_file_lock
1898 	 * can now be used to delete it
1899 	 */
1900 
1901 	siglock();
1902 	debuglog("Pointer to new lock is %p\n",newfl);
1903 
1904 	retval = do_lock(newfl);
1905 
1906 	debuglog("Pointer to new lock is %p\n",newfl);
1907 	sigunlock();
1908 
1909 	switch (retval)
1910 		{
1911 		case nlm4_granted:
1912 			/* case nlm_granted: is the same as nlm4_granted */
1913 			/* do_mon(lckarg->alock.caller_name); */
1914 			break;
1915 		case nlm4_blocked:
1916 			/* case nlm_blocked: is the same as nlm4_blocked */
1917 			/* do_mon(lckarg->alock.caller_name); */
1918 			break;
1919 		default:
1920 			deallocate_file_lock(newfl);
1921 			break;
1922 		}
1923 
1924 	debuglog("Exiting getlock...\n");
1925 
1926 	return retval;
1927 }
1928 
1929 
1930 /* unlock a filehandle */
1931 enum nlm_stats
1932 unlock(nlm4_lock *lock, const int flags)
1933 {
1934 	struct file_lock fl;
1935 	enum nlm_stats err;
1936 
1937 	siglock();
1938 
1939 	debuglog("Entering unlock...\n");
1940 
1941 	bzero(&fl,sizeof(struct file_lock));
1942 	bcopy(lock->fh.n_bytes, &fl.filehandle, sizeof(fhandle_t));
1943 
1944 	copy_nlm4_lock_to_nlm4_holder(lock, 0, &fl.client);
1945 
1946 	err = do_unlock(&fl);
1947 
1948 	sigunlock();
1949 
1950 	debuglog("Exiting unlock...\n");
1951 
1952 	return err;
1953 }
1954 
1955 /*
1956  * XXX: The following monitor/unmonitor routines
1957  * have not been extensively tested (ie. no regression
1958  * script exists like for the locking sections
1959  */
1960 
1961 /*
1962  * monitor_lock_host: monitor lock hosts locally with a ref count and
1963  * inform statd
1964  */
1965 void
1966 monitor_lock_host(const char *hostname)
1967 {
1968 	struct host *ihp, *nhp;
1969 	struct mon smon;
1970 	struct sm_stat_res sres;
1971 	int rpcret, statflag;
1972 
1973 	rpcret = 0;
1974 	statflag = 0;
1975 
1976 	LIST_FOREACH(ihp, &hostlst_head, hostlst) {
1977 		if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) {
1978 			/* Host is already monitored, bump refcount */
1979 			++ihp->refcnt;
1980 			/* Host should only be in the monitor list once */
1981 			return;
1982 		}
1983 	}
1984 
1985 	/* Host is not yet monitored, add it */
1986 	nhp = malloc(sizeof(struct host));
1987 
1988 	if (nhp == NULL) {
1989 		debuglog("Unable to allocate entry for statd mon\n");
1990 		return;
1991 	}
1992 
1993 	/* Allocated new host entry, now fill the fields */
1994 	strncpy(nhp->name, hostname, SM_MAXSTRLEN);
1995 	nhp->refcnt = 1;
1996 	debuglog("Locally Monitoring host %16s\n",hostname);
1997 
1998 	debuglog("Attempting to tell statd\n");
1999 
2000 	bzero(&smon,sizeof(smon));
2001 
2002 	smon.mon_id.mon_name = nhp->name;
2003 	smon.mon_id.my_id.my_name = "localhost\0";
2004 
2005 	smon.mon_id.my_id.my_prog = NLM_PROG;
2006 	smon.mon_id.my_id.my_vers = NLM_SM;
2007 	smon.mon_id.my_id.my_proc = NLM_SM_NOTIFY;
2008 
2009 	rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_MON, xdr_mon,
2010 	    &smon, xdr_sm_stat_res, &sres);
2011 
2012 	if (rpcret == 0) {
2013 		if (sres.res_stat == stat_fail) {
2014 			debuglog("Statd call failed\n");
2015 			statflag = 0;
2016 		} else {
2017 			statflag = 1;
2018 		}
2019 	} else {
2020 		debuglog("Rpc call to statd failed with return value: %d\n",
2021 		    rpcret);
2022 		statflag = 0;
2023 	}
2024 
2025 	if (statflag == 1) {
2026 		LIST_INSERT_HEAD(&hostlst_head, nhp, hostlst);
2027 	} else {
2028 		free(nhp);
2029 	}
2030 
2031 }
2032 
2033 /*
2034  * unmonitor_lock_host: clear monitor ref counts and inform statd when gone
2035  */
2036 void
2037 unmonitor_lock_host(const char *hostname)
2038 {
2039 	struct host *ihp;
2040 	struct mon_id smon_id;
2041 	struct sm_stat smstat;
2042 	int rpcret;
2043 
2044 	rpcret = 0;
2045 
2046 	for( ihp=LIST_FIRST(&hostlst_head); ihp != NULL;
2047 	     ihp=LIST_NEXT(ihp, hostlst)) {
2048 		if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) {
2049 			/* Host is monitored, bump refcount */
2050 			--ihp->refcnt;
2051 			/* Host should only be in the monitor list once */
2052 			break;
2053 		}
2054 	}
2055 
2056 	if (ihp == NULL) {
2057 		debuglog("Could not find host %16s in mon list\n", hostname);
2058 		return;
2059 	}
2060 
2061 	if (ihp->refcnt > 0)
2062 		return;
2063 
2064 	if (ihp->refcnt < 0) {
2065 		debuglog("Negative refcount!: %d\n",
2066 		    ihp->refcnt);
2067 	}
2068 
2069 	debuglog("Attempting to unmonitor host %16s\n", hostname);
2070 
2071 	bzero(&smon_id,sizeof(smon_id));
2072 
2073 	smon_id.mon_name = (char *)hostname;
2074 	smon_id.my_id.my_name = "localhost";
2075 	smon_id.my_id.my_prog = NLM_PROG;
2076 	smon_id.my_id.my_vers = NLM_SM;
2077 	smon_id.my_id.my_proc = NLM_SM_NOTIFY;
2078 
2079 	rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_UNMON, xdr_mon,
2080 	    &smon_id, xdr_sm_stat_res, &smstat);
2081 
2082 	if (rpcret != 0) {
2083 		debuglog("Rpc call to unmonitor statd failed with "
2084 		   " return value: %d\n", rpcret);
2085 	}
2086 
2087 	LIST_REMOVE(ihp, hostlst);
2088 	free(ihp);
2089 }
2090 
2091 /*
2092  * notify: Clear all locks from a host if statd complains
2093  *
2094  * XXX: This routine has not been thoroughly tested.  However, neither
2095  * had the old one been.  It used to compare the statd crash state counter
2096  * to the current lock state.  The upshot of this was that it basically
2097  * cleared all locks from the specified host 99% of the time (with the
2098  * other 1% being a bug).  Consequently, the assumption is that clearing
2099  * all locks from a host when notified by statd is acceptable.
2100  *
2101  * Please note that this routine skips the usual level of redirection
2102  * through a do_* type routine.  This introduces a possible level of
2103  * error and might better be written as do_notify and take this one out.
2104 
2105  */
2106 
2107 void
2108 notify(const char *hostname, const int state)
2109 {
2110 	debuglog("notify from %s, new state %d", hostname, state);
2111 
2112 	siglock();
2113 	do_clear(hostname);
2114 	sigunlock();
2115 
2116 	debuglog("Leaving notify\n");
2117 }
2118 
2119 void
2120 send_granted(fl, opcode)
2121 	struct file_lock *fl;
2122 	int opcode;
2123 {
2124 	CLIENT *cli;
2125 	static char dummy;
2126 	struct timeval timeo;
2127 	int success;
2128 	static struct nlm_res retval;
2129 	static struct nlm4_res retval4;
2130 
2131 	debuglog("About to send granted on blocked lock\n");
2132 	sleep(1);
2133 	debuglog("Blowing off return send\n");
2134 
2135 	cli = get_client(fl->addr,
2136 	    (fl->flags & LOCK_V4) ? NLM_VERS4 : NLM_VERS);
2137 	if (cli == NULL) {
2138 		syslog(LOG_NOTICE, "failed to get CLIENT for %s",
2139 		    fl->client_name);
2140 		/*
2141 		 * We fail to notify remote that the lock has been granted.
2142 		 * The client will timeout and retry, the lock will be
2143 		 * granted at this time.
2144 		 */
2145 		return;
2146 	}
2147 	timeo.tv_sec = 0;
2148 	timeo.tv_usec = (fl->flags & LOCK_ASYNC) ? 0 : 500000; /* 0.5s */
2149 
2150 	if (fl->flags & LOCK_V4) {
2151 		static nlm4_testargs res;
2152 		res.cookie = fl->client_cookie;
2153 		res.exclusive = fl->client.exclusive;
2154 		res.alock.caller_name = fl->client_name;
2155 		res.alock.fh.n_len = sizeof(fhandle_t);
2156 		res.alock.fh.n_bytes = (char*)&fl->filehandle;
2157 		res.alock.oh = fl->client.oh;
2158 		res.alock.svid = fl->client.svid;
2159 		res.alock.l_offset = fl->client.l_offset;
2160 		res.alock.l_len = fl->client.l_len;
2161 		debuglog("sending v4 reply%s",
2162 			 (fl->flags & LOCK_ASYNC) ? " (async)":"");
2163 		if (fl->flags & LOCK_ASYNC) {
2164 			success = clnt_call(cli, NLM4_GRANTED_MSG,
2165 			    xdr_nlm4_testargs, &res, xdr_void, &dummy, timeo);
2166 		} else {
2167 			success = clnt_call(cli, NLM4_GRANTED,
2168 			    xdr_nlm4_testargs, &res, xdr_nlm4_res,
2169 			    &retval4, timeo);
2170 		}
2171 	} else {
2172 		static nlm_testargs res;
2173 
2174 		res.cookie = fl->client_cookie;
2175 		res.exclusive = fl->client.exclusive;
2176 		res.alock.caller_name = fl->client_name;
2177 		res.alock.fh.n_len = sizeof(fhandle_t);
2178 		res.alock.fh.n_bytes = (char*)&fl->filehandle;
2179 		res.alock.oh = fl->client.oh;
2180 		res.alock.svid = fl->client.svid;
2181 		res.alock.l_offset = fl->client.l_offset;
2182 		res.alock.l_len = fl->client.l_len;
2183 		debuglog("sending v1 reply%s",
2184 			 (fl->flags & LOCK_ASYNC) ? " (async)":"");
2185 		if (fl->flags & LOCK_ASYNC) {
2186 			success = clnt_call(cli, NLM_GRANTED_MSG,
2187 			    xdr_nlm_testargs, &res, xdr_void, &dummy, timeo);
2188 		} else {
2189 			success = clnt_call(cli, NLM_GRANTED,
2190 			    xdr_nlm_testargs, &res, xdr_nlm_res,
2191 			    &retval, timeo);
2192 		}
2193 	}
2194 	if (debug_level > 2)
2195 		debuglog("clnt_call returns %d(%s) for granted",
2196 			 success, clnt_sperrno(success));
2197 
2198 }
2199 
2200 /*
2201  * Routines below here have not been modified in the overhaul
2202  */
2203 
2204 /*
2205  * Are these two routines still required since lockd is not spawning off
2206  * children to service locks anymore?  Presumably they were originally
2207  * put in place to prevent a one child from changing the lock list out
2208  * from under another one.
2209  */
2210 
2211 void
2212 siglock(void)
2213 {
2214   sigset_t block;
2215 
2216   sigemptyset(&block);
2217   sigaddset(&block, SIGCHLD);
2218 
2219   if (sigprocmask(SIG_BLOCK, &block, NULL) < 0) {
2220     syslog(LOG_WARNING, "siglock failed: %s", strerror(errno));
2221   }
2222 }
2223 
2224 void
2225 sigunlock(void)
2226 {
2227   sigset_t block;
2228 
2229   sigemptyset(&block);
2230   sigaddset(&block, SIGCHLD);
2231 
2232   if (sigprocmask(SIG_UNBLOCK, &block, NULL) < 0) {
2233     syslog(LOG_WARNING, "sigunlock failed: %s", strerror(errno));
2234   }
2235 }
2236 
2237 
2238