xref: /freebsd/usr.sbin/rpc.lockd/lockd_lock.c (revision d5b0e70f7e04d971691517ce1304d86a1e367e2e)
1 /*	$NetBSD: lockd_lock.c,v 1.5 2000/11/21 03:47:41 enami Exp $	*/
2 
3 /*-
4  * SPDX-License-Identifier: BSD-4-Clause
5  *
6  * Copyright (c) 2001 Andrew P. Lentvorski, Jr.
7  * Copyright (c) 2000 Manuel Bouyer.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by the University of
20  *	California, Berkeley and its contributors.
21  * 4. Neither the name of the University nor the names of its contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  *
37  */
38 
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41 
42 #define LOCKD_DEBUG
43 
44 #include <stdio.h>
45 #ifdef LOCKD_DEBUG
46 #include <stdarg.h>
47 #endif
48 #include <stdlib.h>
49 #include <unistd.h>
50 #include <fcntl.h>
51 #include <syslog.h>
52 #include <errno.h>
53 #include <string.h>
54 #include <signal.h>
55 #include <rpc/rpc.h>
56 #include <sys/types.h>
57 #include <sys/stat.h>
58 #include <sys/socket.h>
59 #include <sys/param.h>
60 #include <sys/mount.h>
61 #include <sys/wait.h>
62 #include <rpcsvc/sm_inter.h>
63 #include <rpcsvc/nlm_prot.h>
64 #include "lockd_lock.h"
65 #include "lockd.h"
66 
67 #define MAXOBJECTSIZE 64
68 #define MAXBUFFERSIZE 1024
69 
70 /*
71  * A set of utilities for managing file locking
72  *
73  * XXX: All locks are in a linked list, a better structure should be used
74  * to improve search/access efficiency.
75  */
76 
77 /* struct describing a lock */
78 struct file_lock {
79 	LIST_ENTRY(file_lock) nfslocklist;
80 	fhandle_t filehandle; /* NFS filehandle */
81 	struct sockaddr *addr;
82 	struct nlm4_holder client; /* lock holder */
83 	/* XXX: client_cookie used *only* in send_granted */
84 	netobj client_cookie; /* cookie sent by the client */
85 	int nsm_status; /* status from the remote lock manager */
86 	int status; /* lock status, see below */
87 	int flags; /* lock flags, see lockd_lock.h */
88 	int blocking; /* blocking lock or not */
89 	char client_name[SM_MAXSTRLEN];	/* client_name is really variable
90 					   length and must be last! */
91 };
92 
93 LIST_HEAD(nfslocklist_head, file_lock);
94 struct nfslocklist_head nfslocklist_head = LIST_HEAD_INITIALIZER(nfslocklist_head);
95 
96 LIST_HEAD(blockedlocklist_head, file_lock);
97 struct blockedlocklist_head blockedlocklist_head = LIST_HEAD_INITIALIZER(blockedlocklist_head);
98 
99 /* lock status */
100 #define LKST_LOCKED	1 /* lock is locked */
101 /* XXX: Is this flag file specific or lock specific? */
102 #define LKST_WAITING	2 /* file is already locked by another host */
103 #define LKST_PROCESSING	3 /* child is trying to acquire the lock */
104 #define LKST_DYING	4 /* must dies when we get news from the child */
105 
106 /* struct describing a monitored host */
107 struct host {
108 	LIST_ENTRY(host) hostlst;
109 	int refcnt;
110 	char name[SM_MAXSTRLEN]; /* name is really variable length and
111                                     must be last! */
112 };
113 /* list of hosts we monitor */
114 LIST_HEAD(hostlst_head, host);
115 struct hostlst_head hostlst_head = LIST_HEAD_INITIALIZER(hostlst_head);
116 
117 /*
118  * File monitoring handlers
119  * XXX: These might be able to be removed when kevent support
120  * is placed into the hardware lock/unlock routines.  (ie.
121  * let the kernel do all the file monitoring)
122  */
123 
124 /* Struct describing a monitored file */
125 struct monfile {
126 	LIST_ENTRY(monfile) monfilelist;
127 	fhandle_t filehandle; /* Local access filehandle */
128 	int fd; /* file descriptor: remains open until unlock! */
129 	int refcount;
130 	int exclusive;
131 };
132 
133 /* List of files we monitor */
134 LIST_HEAD(monfilelist_head, monfile);
135 struct monfilelist_head monfilelist_head = LIST_HEAD_INITIALIZER(monfilelist_head);
136 
137 static int debugdelay = 0;
138 
139 enum nfslock_status { NFS_GRANTED = 0, NFS_GRANTED_DUPLICATE,
140 		      NFS_DENIED, NFS_DENIED_NOLOCK,
141 		      NFS_RESERR };
142 
143 enum hwlock_status { HW_GRANTED = 0, HW_GRANTED_DUPLICATE,
144 		     HW_DENIED, HW_DENIED_NOLOCK,
145 		     HW_STALEFH, HW_READONLY, HW_RESERR };
146 
147 enum partialfilelock_status { PFL_GRANTED=0, PFL_GRANTED_DUPLICATE, PFL_DENIED,
148 			      PFL_NFSDENIED, PFL_NFSBLOCKED, PFL_NFSDENIED_NOLOCK, PFL_NFSRESERR,
149 			      PFL_HWDENIED,  PFL_HWBLOCKED,  PFL_HWDENIED_NOLOCK, PFL_HWRESERR};
150 
151 enum LFLAGS {LEDGE_LEFT, LEDGE_LBOUNDARY, LEDGE_INSIDE, LEDGE_RBOUNDARY, LEDGE_RIGHT};
152 enum RFLAGS {REDGE_LEFT, REDGE_LBOUNDARY, REDGE_INSIDE, REDGE_RBOUNDARY, REDGE_RIGHT};
153 /* XXX: WARNING! I HAVE OVERLOADED THIS STATUS ENUM!  SPLIT IT APART INTO TWO */
154 enum split_status {SPL_DISJOINT=0, SPL_LOCK1=1, SPL_LOCK2=2, SPL_CONTAINED=4, SPL_RESERR=8};
155 
156 enum partialfilelock_status lock_partialfilelock(struct file_lock *fl);
157 
158 void send_granted(struct file_lock *fl, int opcode);
159 void siglock(void);
160 void sigunlock(void);
161 void monitor_lock_host(const char *hostname);
162 void unmonitor_lock_host(char *hostname);
163 
164 void	copy_nlm4_lock_to_nlm4_holder(const struct nlm4_lock *src,
165     const bool_t exclusive, struct nlm4_holder *dest);
166 struct file_lock *	allocate_file_lock(const netobj *lockowner,
167 					   const netobj *matchcookie,
168 					   const struct sockaddr *addr,
169 					   const char *caller_name);
170 void	deallocate_file_lock(struct file_lock *fl);
171 void	fill_file_lock(struct file_lock *fl, const fhandle_t *fh,
172 		       const bool_t exclusive, const int32_t svid,
173     const u_int64_t offset, const u_int64_t len,
174     const int state, const int status, const int flags, const int blocking);
175 int	regions_overlap(const u_int64_t start1, const u_int64_t len1,
176     const u_int64_t start2, const u_int64_t len2);
177 enum split_status  region_compare(const u_int64_t starte, const u_int64_t lene,
178     const u_int64_t startu, const u_int64_t lenu,
179     u_int64_t *start1, u_int64_t *len1, u_int64_t *start2, u_int64_t *len2);
180 int	same_netobj(const netobj *n0, const netobj *n1);
181 int	same_filelock_identity(const struct file_lock *fl0,
182     const struct file_lock *fl2);
183 
184 static void debuglog(char const *fmt, ...);
185 void dump_static_object(const unsigned char* object, const int sizeof_object,
186                         unsigned char* hbuff, const int sizeof_hbuff,
187                         unsigned char* cbuff, const int sizeof_cbuff);
188 void dump_netobj(const struct netobj *nobj);
189 void dump_filelock(const struct file_lock *fl);
190 struct file_lock *	get_lock_matching_unlock(const struct file_lock *fl);
191 enum nfslock_status	test_nfslock(const struct file_lock *fl,
192     struct file_lock **conflicting_fl);
193 enum nfslock_status	lock_nfslock(struct file_lock *fl);
194 enum nfslock_status	delete_nfslock(struct file_lock *fl);
195 enum nfslock_status	unlock_nfslock(const struct file_lock *fl,
196     struct file_lock **released_lock, struct file_lock **left_lock,
197     struct file_lock **right_lock);
198 enum hwlock_status lock_hwlock(struct file_lock *fl);
199 enum split_status split_nfslock(const struct file_lock *exist_lock,
200     const struct file_lock *unlock_lock, struct file_lock **left_lock,
201     struct file_lock **right_lock);
202 int	duplicate_block(struct file_lock *fl);
203 void	add_blockingfilelock(struct file_lock *fl);
204 enum hwlock_status	unlock_hwlock(const struct file_lock *fl);
205 enum hwlock_status	test_hwlock(const struct file_lock *fl,
206     struct file_lock **conflicting_fl);
207 void	remove_blockingfilelock(struct file_lock *fl);
208 void	clear_blockingfilelock(const char *hostname);
209 void	retry_blockingfilelocklist(void);
210 enum partialfilelock_status	unlock_partialfilelock(
211     const struct file_lock *fl);
212 void	clear_partialfilelock(const char *hostname);
213 enum partialfilelock_status	test_partialfilelock(
214     const struct file_lock *fl, struct file_lock **conflicting_fl);
215 enum nlm_stats	do_test(struct file_lock *fl,
216     struct file_lock **conflicting_fl);
217 enum nlm_stats	do_unlock(struct file_lock *fl);
218 enum nlm_stats	do_lock(struct file_lock *fl);
219 void	do_clear(const char *hostname);
220 
221 void
222 debuglog(char const *fmt, ...)
223 {
224 	va_list ap;
225 
226 	if (debug_level < 1) {
227 		return;
228 	}
229 
230 	sleep(debugdelay);
231 
232 	va_start(ap, fmt);
233 	vsyslog(LOG_DEBUG, fmt, ap);
234 	va_end(ap);
235 }
236 
237 void
238 dump_static_object(object, size_object, hbuff, size_hbuff, cbuff, size_cbuff)
239 	const unsigned char *object;
240 	const int size_object;
241 	unsigned char *hbuff;
242 	const int size_hbuff;
243 	unsigned char *cbuff;
244 	const int size_cbuff;
245 {
246 	int i, objectsize;
247 
248 	if (debug_level < 2) {
249 		return;
250 	}
251 
252 	objectsize = size_object;
253 
254 	if (objectsize == 0) {
255 		debuglog("object is size 0\n");
256 	} else {
257 		if (objectsize > MAXOBJECTSIZE) {
258 			debuglog("Object of size %d being clamped"
259 			    "to size %d\n", objectsize, MAXOBJECTSIZE);
260 			objectsize = MAXOBJECTSIZE;
261 		}
262 
263 		if (hbuff != NULL) {
264 			if (size_hbuff < objectsize*2+1) {
265 				debuglog("Hbuff not large enough."
266 				    "  Increase size\n");
267 			} else {
268 				for(i=0;i<objectsize;i++) {
269 					sprintf(hbuff+i*2,"%02x",*(object+i));
270 				}
271 				*(hbuff+i*2) = '\0';
272 			}
273 		}
274 
275 		if (cbuff != NULL) {
276 			if (size_cbuff < objectsize+1) {
277 				debuglog("Cbuff not large enough."
278 				    "  Increase Size\n");
279 			}
280 
281 			for(i=0;i<objectsize;i++) {
282 				if (*(object+i) >= 32 && *(object+i) <= 127) {
283 					*(cbuff+i) = *(object+i);
284 				} else {
285 					*(cbuff+i) = '.';
286 				}
287 			}
288 			*(cbuff+i) = '\0';
289 		}
290 	}
291 }
292 
293 void
294 dump_netobj(const struct netobj *nobj)
295 {
296 	char hbuff[MAXBUFFERSIZE*2];
297 	char cbuff[MAXBUFFERSIZE];
298 
299 	if (debug_level < 2) {
300 		return;
301 	}
302 
303 	if (nobj == NULL) {
304 		debuglog("Null netobj pointer\n");
305 	}
306 	else if (nobj->n_len == 0) {
307 		debuglog("Size zero netobj\n");
308 	} else {
309 		dump_static_object(nobj->n_bytes, nobj->n_len,
310 		    hbuff, sizeof(hbuff), cbuff, sizeof(cbuff));
311 		debuglog("netobj: len: %d  data: %s :::  %s\n",
312 		    nobj->n_len, hbuff, cbuff);
313 	}
314 }
315 
316 /* #define DUMP_FILELOCK_VERBOSE */
317 void
318 dump_filelock(const struct file_lock *fl)
319 {
320 #ifdef DUMP_FILELOCK_VERBOSE
321 	char hbuff[MAXBUFFERSIZE*2];
322 	char cbuff[MAXBUFFERSIZE];
323 #endif
324 
325 	if (debug_level < 2) {
326 		return;
327 	}
328 
329 	if (fl != NULL) {
330 		debuglog("Dumping file lock structure @ %p\n", fl);
331 
332 #ifdef DUMP_FILELOCK_VERBOSE
333 		dump_static_object((unsigned char *)&fl->filehandle,
334 		    sizeof(fl->filehandle), hbuff, sizeof(hbuff),
335 		    cbuff, sizeof(cbuff));
336 		debuglog("Filehandle: %8s  :::  %8s\n", hbuff, cbuff);
337 #endif
338 
339 		debuglog("Dumping nlm4_holder:\n"
340 		    "exc: %x  svid: %x  offset:len %llx:%llx\n",
341 		    fl->client.exclusive, fl->client.svid,
342 		    fl->client.l_offset, fl->client.l_len);
343 
344 #ifdef DUMP_FILELOCK_VERBOSE
345 		debuglog("Dumping client identity:\n");
346 		dump_netobj(&fl->client.oh);
347 
348 		debuglog("Dumping client cookie:\n");
349 		dump_netobj(&fl->client_cookie);
350 
351 		debuglog("nsm: %d  status: %d  flags: %d  svid: %x"
352 		    "  client_name: %s\n", fl->nsm_status, fl->status,
353 		    fl->flags, fl->client.svid, fl->client_name);
354 #endif
355 	} else {
356 		debuglog("NULL file lock structure\n");
357 	}
358 }
359 
360 void
361 copy_nlm4_lock_to_nlm4_holder(src, exclusive, dest)
362 	const struct nlm4_lock *src;
363 	const bool_t exclusive;
364 	struct nlm4_holder *dest;
365 {
366 
367 	dest->exclusive = exclusive;
368 	dest->oh.n_len = src->oh.n_len;
369 	dest->oh.n_bytes = src->oh.n_bytes;
370 	dest->svid = src->svid;
371 	dest->l_offset = src->l_offset;
372 	dest->l_len = src->l_len;
373 }
374 
375 /*
376  * allocate_file_lock: Create a lock with the given parameters
377  */
378 
379 struct file_lock *
380 allocate_file_lock(const netobj *lockowner, const netobj *matchcookie,
381 		   const struct sockaddr *addr, const char *caller_name)
382 {
383 	struct file_lock *newfl;
384 	size_t n;
385 
386 	/* Beware of rubbish input! */
387 	n = strnlen(caller_name, SM_MAXSTRLEN);
388 	if (n == SM_MAXSTRLEN) {
389 		return NULL;
390 	}
391 
392 	newfl = malloc(sizeof(*newfl) - sizeof(newfl->client_name) + n + 1);
393 	if (newfl == NULL) {
394 		return NULL;
395 	}
396 	bzero(newfl, sizeof(*newfl) - sizeof(newfl->client_name));
397 	memcpy(newfl->client_name, caller_name, n);
398 	newfl->client_name[n] = 0;
399 
400 	newfl->client.oh.n_bytes = malloc(lockowner->n_len);
401 	if (newfl->client.oh.n_bytes == NULL) {
402 		free(newfl);
403 		return NULL;
404 	}
405 	newfl->client.oh.n_len = lockowner->n_len;
406 	bcopy(lockowner->n_bytes, newfl->client.oh.n_bytes, lockowner->n_len);
407 
408 	newfl->client_cookie.n_bytes = malloc(matchcookie->n_len);
409 	if (newfl->client_cookie.n_bytes == NULL) {
410 		free(newfl->client.oh.n_bytes);
411 		free(newfl);
412 		return NULL;
413 	}
414 	newfl->client_cookie.n_len = matchcookie->n_len;
415 	bcopy(matchcookie->n_bytes, newfl->client_cookie.n_bytes, matchcookie->n_len);
416 
417 	newfl->addr = malloc(addr->sa_len);
418 	if (newfl->addr == NULL) {
419 		free(newfl->client_cookie.n_bytes);
420 		free(newfl->client.oh.n_bytes);
421 		free(newfl);
422 		return NULL;
423 	}
424 	memcpy(newfl->addr, addr, addr->sa_len);
425 
426 	return newfl;
427 }
428 
429 /*
430  * file_file_lock: Force creation of a valid file lock
431  */
432 void
433 fill_file_lock(struct file_lock *fl, const fhandle_t *fh,
434     const bool_t exclusive, const int32_t svid,
435     const u_int64_t offset, const u_int64_t len,
436     const int state, const int status, const int flags, const int blocking)
437 {
438 	bcopy(fh, &fl->filehandle, sizeof(fhandle_t));
439 
440 	fl->client.exclusive = exclusive;
441 	fl->client.svid = svid;
442 	fl->client.l_offset = offset;
443 	fl->client.l_len = len;
444 
445 	fl->nsm_status = state;
446 	fl->status = status;
447 	fl->flags = flags;
448 	fl->blocking = blocking;
449 }
450 
451 /*
452  * deallocate_file_lock: Free all storage associated with a file lock
453  */
454 void
455 deallocate_file_lock(struct file_lock *fl)
456 {
457 	free(fl->addr);
458 	free(fl->client.oh.n_bytes);
459 	free(fl->client_cookie.n_bytes);
460 	free(fl);
461 }
462 
463 /*
464  * regions_overlap(): This function examines the two provided regions for
465  * overlap.
466  */
467 int
468 regions_overlap(start1, len1, start2, len2)
469 	const u_int64_t start1, len1, start2, len2;
470 {
471 	u_int64_t d1,d2,d3,d4;
472 	enum split_status result;
473 
474 	debuglog("Entering region overlap with vals: %llu:%llu--%llu:%llu\n",
475 		 start1, len1, start2, len2);
476 
477 	result = region_compare(start1, len1, start2, len2,
478 	    &d1, &d2, &d3, &d4);
479 
480 	debuglog("Exiting region overlap with val: %d\n",result);
481 
482 	if (result == SPL_DISJOINT) {
483 		return 0;
484 	} else {
485 		return 1;
486 	}
487 }
488 
489 /*
490  * region_compare(): Examine lock regions and split appropriately
491  *
492  * XXX: Fix 64 bit overflow problems
493  * XXX: Check to make sure I got *ALL* the cases.
494  * XXX: This DESPERATELY needs a regression test.
495  */
496 enum split_status
497 region_compare(starte, lene, startu, lenu,
498     start1, len1, start2, len2)
499 	const u_int64_t starte, lene, startu, lenu;
500 	u_int64_t *start1, *len1, *start2, *len2;
501 {
502 	/*
503 	 * Please pay attention to the sequential exclusions
504 	 * of the if statements!!!
505 	 */
506 	enum LFLAGS lflags;
507 	enum RFLAGS rflags;
508 	enum split_status retval;
509 
510 	retval = SPL_DISJOINT;
511 
512 	if (lene == 0 && lenu == 0) {
513 		/* Examine left edge of locker */
514 		lflags = LEDGE_INSIDE;
515 		if (startu < starte) {
516 			lflags = LEDGE_LEFT;
517 		} else if (startu == starte) {
518 			lflags = LEDGE_LBOUNDARY;
519 		}
520 
521 		rflags = REDGE_RBOUNDARY; /* Both are infiinite */
522 
523 		if (lflags == LEDGE_INSIDE) {
524 			*start1 = starte;
525 			*len1 = startu - starte;
526 		}
527 
528 		if (lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) {
529 			retval = SPL_CONTAINED;
530 		} else {
531 			retval = SPL_LOCK1;
532 		}
533 	} else if (lene == 0 && lenu != 0) {
534 		/* Established lock is infinite */
535 		/* Examine left edge of unlocker */
536 		lflags = LEDGE_INSIDE;
537 		if (startu < starte) {
538 			lflags = LEDGE_LEFT;
539 		} else if (startu == starte) {
540 			lflags = LEDGE_LBOUNDARY;
541 		}
542 
543 		/* Examine right edge of unlocker */
544 		if (startu + lenu < starte) {
545 			/* Right edge of unlocker left of established lock */
546 			rflags = REDGE_LEFT;
547 			return SPL_DISJOINT;
548 		} else if (startu + lenu == starte) {
549 			/* Right edge of unlocker on start of established lock */
550 			rflags = REDGE_LBOUNDARY;
551 			return SPL_DISJOINT;
552 		} else { /* Infinifty is right of finity */
553 			/* Right edge of unlocker inside established lock */
554 			rflags = REDGE_INSIDE;
555 		}
556 
557 		if (lflags == LEDGE_INSIDE) {
558 			*start1 = starte;
559 			*len1 = startu - starte;
560 			retval |= SPL_LOCK1;
561 		}
562 
563 		if (rflags == REDGE_INSIDE) {
564 			/* Create right lock */
565 			*start2 = startu+lenu;
566 			*len2 = 0;
567 			retval |= SPL_LOCK2;
568 		}
569 	} else if (lene != 0 && lenu == 0) {
570 		/* Unlocker is infinite */
571 		/* Examine left edge of unlocker */
572 		lflags = LEDGE_RIGHT;
573 		if (startu < starte) {
574 			lflags = LEDGE_LEFT;
575 			retval = SPL_CONTAINED;
576 			return retval;
577 		} else if (startu == starte) {
578 			lflags = LEDGE_LBOUNDARY;
579 			retval = SPL_CONTAINED;
580 			return retval;
581 		} else if ((startu > starte) && (startu < starte + lene - 1)) {
582 			lflags = LEDGE_INSIDE;
583 		} else if (startu == starte + lene - 1) {
584 			lflags = LEDGE_RBOUNDARY;
585 		} else { /* startu > starte + lene -1 */
586 			lflags = LEDGE_RIGHT;
587 			return SPL_DISJOINT;
588 		}
589 
590 		rflags = REDGE_RIGHT; /* Infinity is right of finity */
591 
592 		if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) {
593 			*start1 = starte;
594 			*len1 = startu - starte;
595 			retval |= SPL_LOCK1;
596 			return retval;
597 		}
598 	} else {
599 		/* Both locks are finite */
600 
601 		/* Examine left edge of unlocker */
602 		lflags = LEDGE_RIGHT;
603 		if (startu < starte) {
604 			lflags = LEDGE_LEFT;
605 		} else if (startu == starte) {
606 			lflags = LEDGE_LBOUNDARY;
607 		} else if ((startu > starte) && (startu < starte + lene - 1)) {
608 			lflags = LEDGE_INSIDE;
609 		} else if (startu == starte + lene - 1) {
610 			lflags = LEDGE_RBOUNDARY;
611 		} else { /* startu > starte + lene -1 */
612 			lflags = LEDGE_RIGHT;
613 			return SPL_DISJOINT;
614 		}
615 
616 		/* Examine right edge of unlocker */
617 		if (startu + lenu < starte) {
618 			/* Right edge of unlocker left of established lock */
619 			rflags = REDGE_LEFT;
620 			return SPL_DISJOINT;
621 		} else if (startu + lenu == starte) {
622 			/* Right edge of unlocker on start of established lock */
623 			rflags = REDGE_LBOUNDARY;
624 			return SPL_DISJOINT;
625 		} else if (startu + lenu < starte + lene) {
626 			/* Right edge of unlocker inside established lock */
627 			rflags = REDGE_INSIDE;
628 		} else if (startu + lenu == starte + lene) {
629 			/* Right edge of unlocker on right edge of established lock */
630 			rflags = REDGE_RBOUNDARY;
631 		} else { /* startu + lenu > starte + lene */
632 			/* Right edge of unlocker is right of established lock */
633 			rflags = REDGE_RIGHT;
634 		}
635 
636 		if (lflags == LEDGE_INSIDE || lflags == LEDGE_RBOUNDARY) {
637 			/* Create left lock */
638 			*start1 = starte;
639 			*len1 = (startu - starte);
640 			retval |= SPL_LOCK1;
641 		}
642 
643 		if (rflags == REDGE_INSIDE) {
644 			/* Create right lock */
645 			*start2 = startu+lenu;
646 			*len2 = starte+lene-(startu+lenu);
647 			retval |= SPL_LOCK2;
648 		}
649 
650 		if ((lflags == LEDGE_LEFT || lflags == LEDGE_LBOUNDARY) &&
651 		    (rflags == REDGE_RBOUNDARY || rflags == REDGE_RIGHT)) {
652 			retval = SPL_CONTAINED;
653 		}
654 	}
655 	return retval;
656 }
657 
658 /*
659  * same_netobj: Compares the apprpriate bits of a netobj for identity
660  */
661 int
662 same_netobj(const netobj *n0, const netobj *n1)
663 {
664 	int retval;
665 
666 	retval = 0;
667 
668 	debuglog("Entering netobj identity check\n");
669 
670 	if (n0->n_len == n1->n_len) {
671 		debuglog("Preliminary length check passed\n");
672 		retval = !bcmp(n0->n_bytes, n1->n_bytes, n0->n_len);
673 		debuglog("netobj %smatch\n", retval ? "" : "mis");
674 	}
675 
676 	return (retval);
677 }
678 
679 /*
680  * same_filelock_identity: Compares the appropriate bits of a file_lock
681  */
682 int
683 same_filelock_identity(fl0, fl1)
684 	const struct file_lock *fl0, *fl1;
685 {
686 	int retval;
687 
688 	retval = 0;
689 
690 	debuglog("Checking filelock identity\n");
691 
692 	/*
693 	 * Check process ids and host information.
694 	 */
695 	retval = (fl0->client.svid == fl1->client.svid &&
696 	    same_netobj(&(fl0->client.oh), &(fl1->client.oh)));
697 
698 	debuglog("Exiting checking filelock identity: retval: %d\n",retval);
699 
700 	return (retval);
701 }
702 
703 /*
704  * Below here are routines associated with manipulating the NFS
705  * lock list.
706  */
707 
708 /*
709  * get_lock_matching_unlock: Return a lock which matches the given unlock lock
710  *                           or NULL otehrwise
711  * XXX: It is a shame that this duplicates so much code from test_nfslock.
712  */
713 struct file_lock *
714 get_lock_matching_unlock(const struct file_lock *fl)
715 {
716 	struct file_lock *ifl; /* Iterator */
717 
718 	debuglog("Entering get_lock_matching_unlock\n");
719 	debuglog("********Dump of fl*****************\n");
720 	dump_filelock(fl);
721 
722 	LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
723 		debuglog("Pointer to file lock: %p\n",ifl);
724 
725 		debuglog("****Dump of ifl****\n");
726 		dump_filelock(ifl);
727 		debuglog("*******************\n");
728 
729 		/*
730 		 * XXX: It is conceivable that someone could use the NLM RPC
731 		 * system to directly access filehandles.  This may be a
732 		 * security hazard as the filehandle code may bypass normal
733 		 * file access controls
734 		 */
735 		if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t)))
736 			continue;
737 
738 		debuglog("get_lock_matching_unlock: Filehandles match, "
739 		    "checking regions\n");
740 
741 		/* Filehandles match, check for region overlap */
742 		if (!regions_overlap(fl->client.l_offset, fl->client.l_len,
743 			ifl->client.l_offset, ifl->client.l_len))
744 			continue;
745 
746 		debuglog("get_lock_matching_unlock: Region overlap"
747 		    " found %llu : %llu -- %llu : %llu\n",
748 		    fl->client.l_offset,fl->client.l_len,
749 		    ifl->client.l_offset,ifl->client.l_len);
750 
751 		/* Regions overlap, check the identity */
752 		if (!same_filelock_identity(fl,ifl))
753 			continue;
754 
755 		debuglog("get_lock_matching_unlock: Duplicate lock id.  Granting\n");
756 		return (ifl);
757 	}
758 
759 	debuglog("Exiting bet_lock_matching_unlock\n");
760 
761 	return (NULL);
762 }
763 
764 /*
765  * test_nfslock: check for NFS lock in lock list
766  *
767  * This routine makes the following assumptions:
768  *    1) Nothing will adjust the lock list during a lookup
769  *
770  * This routine has an intersting quirk which bit me hard.
771  * The conflicting_fl is the pointer to the conflicting lock.
772  * However, to modify the "*pointer* to the conflicting lock" rather
773  * that the "conflicting lock itself" one must pass in a "pointer to
774  * the pointer of the conflicting lock".  Gross.
775  */
776 
777 enum nfslock_status
778 test_nfslock(const struct file_lock *fl, struct file_lock **conflicting_fl)
779 {
780 	struct file_lock *ifl; /* Iterator */
781 	enum nfslock_status retval;
782 
783 	debuglog("Entering test_nfslock\n");
784 
785 	retval = NFS_GRANTED;
786 	(*conflicting_fl) = NULL;
787 
788 	debuglog("Entering lock search loop\n");
789 
790 	debuglog("***********************************\n");
791 	debuglog("Dumping match filelock\n");
792 	debuglog("***********************************\n");
793 	dump_filelock(fl);
794 	debuglog("***********************************\n");
795 
796 	LIST_FOREACH(ifl, &nfslocklist_head, nfslocklist) {
797 		if (retval == NFS_DENIED)
798 			break;
799 
800 		debuglog("Top of lock loop\n");
801 		debuglog("Pointer to file lock: %p\n",ifl);
802 
803 		debuglog("***********************************\n");
804 		debuglog("Dumping test filelock\n");
805 		debuglog("***********************************\n");
806 		dump_filelock(ifl);
807 		debuglog("***********************************\n");
808 
809 		/*
810 		 * XXX: It is conceivable that someone could use the NLM RPC
811 		 * system to directly access filehandles.  This may be a
812 		 * security hazard as the filehandle code may bypass normal
813 		 * file access controls
814 		 */
815 		if (bcmp(&fl->filehandle, &ifl->filehandle, sizeof(fhandle_t)))
816 			continue;
817 
818 		debuglog("test_nfslock: filehandle match found\n");
819 
820 		/* Filehandles match, check for region overlap */
821 		if (!regions_overlap(fl->client.l_offset, fl->client.l_len,
822 			ifl->client.l_offset, ifl->client.l_len))
823 			continue;
824 
825 		debuglog("test_nfslock: Region overlap found"
826 		    " %llu : %llu -- %llu : %llu\n",
827 		    fl->client.l_offset,fl->client.l_len,
828 		    ifl->client.l_offset,ifl->client.l_len);
829 
830 		/* Regions overlap, check the exclusivity */
831 		if (!(fl->client.exclusive || ifl->client.exclusive))
832 			continue;
833 
834 		debuglog("test_nfslock: Exclusivity failure: %d %d\n",
835 		    fl->client.exclusive,
836 		    ifl->client.exclusive);
837 
838 		if (same_filelock_identity(fl,ifl)) {
839 			debuglog("test_nfslock: Duplicate id.  Granting\n");
840 			(*conflicting_fl) = ifl;
841 			retval = NFS_GRANTED_DUPLICATE;
842 		} else {
843 			/* locking attempt fails */
844 			debuglog("test_nfslock: Lock attempt failed\n");
845 			debuglog("Desired lock\n");
846 			dump_filelock(fl);
847 			debuglog("Conflicting lock\n");
848 			dump_filelock(ifl);
849 			(*conflicting_fl) = ifl;
850 			retval = NFS_DENIED;
851 		}
852 	}
853 
854 	debuglog("Dumping file locks\n");
855 	debuglog("Exiting test_nfslock\n");
856 
857 	return (retval);
858 }
859 
860 /*
861  * lock_nfslock: attempt to create a lock in the NFS lock list
862  *
863  * This routine tests whether the lock will be granted and then adds
864  * the entry to the lock list if so.
865  *
866  * Argument fl gets modified as its list housekeeping entries get modified
867  * upon insertion into the NFS lock list
868  *
869  * This routine makes several assumptions:
870  *    1) It is perfectly happy to grant a duplicate lock from the same pid.
871  *       While this seems to be intuitively wrong, it is required for proper
872  *       Posix semantics during unlock.  It is absolutely imperative to not
873  *       unlock the main lock before the two child locks are established. Thus,
874  *       one has to be able to create duplicate locks over an existing lock
875  *    2) It currently accepts duplicate locks from the same id,pid
876  */
877 
878 enum nfslock_status
879 lock_nfslock(struct file_lock *fl)
880 {
881 	enum nfslock_status retval;
882 	struct file_lock *dummy_fl;
883 
884 	dummy_fl = NULL;
885 
886 	debuglog("Entering lock_nfslock...\n");
887 
888 	retval = test_nfslock(fl,&dummy_fl);
889 
890 	if (retval == NFS_GRANTED || retval == NFS_GRANTED_DUPLICATE) {
891 		debuglog("Inserting lock...\n");
892 		dump_filelock(fl);
893 		LIST_INSERT_HEAD(&nfslocklist_head, fl, nfslocklist);
894 	}
895 
896 	debuglog("Exiting lock_nfslock...\n");
897 
898 	return (retval);
899 }
900 
901 /*
902  * delete_nfslock: delete an NFS lock list entry
903  *
904  * This routine is used to delete a lock out of the NFS lock list
905  * without regard to status, underlying locks, regions or anything else
906  *
907  * Note that this routine *does not deallocate memory* of the lock.
908  * It just disconnects it from the list.  The lock can then be used
909  * by other routines without fear of trashing the list.
910  */
911 
912 enum nfslock_status
913 delete_nfslock(struct file_lock *fl)
914 {
915 
916 	LIST_REMOVE(fl, nfslocklist);
917 
918 	return (NFS_GRANTED);
919 }
920 
921 enum split_status
922 split_nfslock(exist_lock, unlock_lock, left_lock, right_lock)
923 	const struct file_lock *exist_lock, *unlock_lock;
924 	struct file_lock **left_lock, **right_lock;
925 {
926 	u_int64_t start1, len1, start2, len2;
927 	enum split_status spstatus;
928 
929 	spstatus = region_compare(exist_lock->client.l_offset, exist_lock->client.l_len,
930 	    unlock_lock->client.l_offset, unlock_lock->client.l_len,
931 	    &start1, &len1, &start2, &len2);
932 
933 	if ((spstatus & SPL_LOCK1) != 0) {
934 		*left_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie, exist_lock->addr, exist_lock->client_name);
935 		if (*left_lock == NULL) {
936 			debuglog("Unable to allocate resource for split 1\n");
937 			return SPL_RESERR;
938 		}
939 
940 		fill_file_lock(*left_lock, &exist_lock->filehandle,
941 		    exist_lock->client.exclusive, exist_lock->client.svid,
942 		    start1, len1,
943 		    exist_lock->nsm_status,
944 		    exist_lock->status, exist_lock->flags, exist_lock->blocking);
945 	}
946 
947 	if ((spstatus & SPL_LOCK2) != 0) {
948 		*right_lock = allocate_file_lock(&exist_lock->client.oh, &exist_lock->client_cookie, exist_lock->addr, exist_lock->client_name);
949 		if (*right_lock == NULL) {
950 			debuglog("Unable to allocate resource for split 1\n");
951 			if (*left_lock != NULL) {
952 				deallocate_file_lock(*left_lock);
953 			}
954 			return SPL_RESERR;
955 		}
956 
957 		fill_file_lock(*right_lock, &exist_lock->filehandle,
958 		    exist_lock->client.exclusive, exist_lock->client.svid,
959 		    start2, len2,
960 		    exist_lock->nsm_status,
961 		    exist_lock->status, exist_lock->flags, exist_lock->blocking);
962 	}
963 
964 	return spstatus;
965 }
966 
967 enum nfslock_status
968 unlock_nfslock(fl, released_lock, left_lock, right_lock)
969 	const struct file_lock *fl;
970 	struct file_lock **released_lock;
971 	struct file_lock **left_lock;
972 	struct file_lock **right_lock;
973 {
974 	struct file_lock *mfl; /* Matching file lock */
975 	enum nfslock_status retval;
976 	enum split_status spstatus;
977 
978 	debuglog("Entering unlock_nfslock\n");
979 
980 	*released_lock = NULL;
981 	*left_lock = NULL;
982 	*right_lock = NULL;
983 
984 	retval = NFS_DENIED_NOLOCK;
985 
986 	debuglog("Attempting to match lock...\n");
987 	mfl = get_lock_matching_unlock(fl);
988 
989 	if (mfl != NULL) {
990 		debuglog("Unlock matched.  Querying for split\n");
991 
992 		spstatus = split_nfslock(mfl, fl, left_lock, right_lock);
993 
994 		debuglog("Split returned %d %p %p %p %p\n",spstatus,mfl,fl,*left_lock,*right_lock);
995 		debuglog("********Split dumps********");
996 		dump_filelock(mfl);
997 		dump_filelock(fl);
998 		dump_filelock(*left_lock);
999 		dump_filelock(*right_lock);
1000 		debuglog("********End Split dumps********");
1001 
1002 		if (spstatus == SPL_RESERR) {
1003 			if (*left_lock != NULL) {
1004 				deallocate_file_lock(*left_lock);
1005 				*left_lock = NULL;
1006 			}
1007 
1008 			if (*right_lock != NULL) {
1009 				deallocate_file_lock(*right_lock);
1010 				*right_lock = NULL;
1011 			}
1012 
1013 			return NFS_RESERR;
1014 		}
1015 
1016 		/* Insert new locks from split if required */
1017 		if (*left_lock != NULL) {
1018 			debuglog("Split left activated\n");
1019 			LIST_INSERT_HEAD(&nfslocklist_head, *left_lock, nfslocklist);
1020 		}
1021 
1022 		if (*right_lock != NULL) {
1023 			debuglog("Split right activated\n");
1024 			LIST_INSERT_HEAD(&nfslocklist_head, *right_lock, nfslocklist);
1025 		}
1026 
1027 		/* Unlock the lock since it matches identity */
1028 		LIST_REMOVE(mfl, nfslocklist);
1029 		*released_lock = mfl;
1030 		retval = NFS_GRANTED;
1031 	}
1032 
1033 	debuglog("Exiting unlock_nfslock\n");
1034 
1035 	return retval;
1036 }
1037 
1038 /*
1039  * Below here are the routines for manipulating the file lock directly
1040  * on the disk hardware itself
1041  */
1042 enum hwlock_status
1043 lock_hwlock(struct file_lock *fl)
1044 {
1045 	struct monfile *imf,*nmf;
1046 	int lflags, flerror;
1047 
1048 	/* Scan to see if filehandle already present */
1049 	LIST_FOREACH(imf, &monfilelist_head, monfilelist) {
1050 		if (bcmp(&fl->filehandle, &imf->filehandle,
1051 			sizeof(fl->filehandle)) == 0) {
1052 			/* imf is the correct filehandle */
1053 			break;
1054 		}
1055 	}
1056 
1057 	/*
1058 	 * Filehandle already exists (we control the file)
1059 	 * *AND* NFS has already cleared the lock for availability
1060 	 * Grant it and bump the refcount.
1061 	 */
1062 	if (imf != NULL) {
1063 		++(imf->refcount);
1064 		return (HW_GRANTED);
1065 	}
1066 
1067 	/* No filehandle found, create and go */
1068 	nmf = malloc(sizeof(struct monfile));
1069 	if (nmf == NULL) {
1070 		debuglog("hwlock resource allocation failure\n");
1071 		return (HW_RESERR);
1072 	}
1073 
1074 	/* XXX: Is O_RDWR always the correct mode? */
1075 	nmf->fd = fhopen(&fl->filehandle, O_RDWR);
1076 	if (nmf->fd < 0) {
1077 		debuglog("fhopen failed (from %16s): %32s\n",
1078 		    fl->client_name, strerror(errno));
1079 		free(nmf);
1080 		switch (errno) {
1081 		case ESTALE:
1082 			return (HW_STALEFH);
1083 		case EROFS:
1084 			return (HW_READONLY);
1085 		default:
1086 			return (HW_RESERR);
1087 		}
1088 	}
1089 
1090 	/* File opened correctly, fill the monitor struct */
1091 	bcopy(&fl->filehandle, &nmf->filehandle, sizeof(fl->filehandle));
1092 	nmf->refcount = 1;
1093 	nmf->exclusive = fl->client.exclusive;
1094 
1095 	lflags = (nmf->exclusive == 1) ?
1096 	    (LOCK_EX | LOCK_NB) : (LOCK_SH | LOCK_NB);
1097 
1098 	flerror = flock(nmf->fd, lflags);
1099 
1100 	if (flerror != 0) {
1101 		debuglog("flock failed (from %16s): %32s\n",
1102 		    fl->client_name, strerror(errno));
1103 		close(nmf->fd);
1104 		free(nmf);
1105 		switch (errno) {
1106 		case EAGAIN:
1107 			return (HW_DENIED);
1108 		case ESTALE:
1109 			return (HW_STALEFH);
1110 		case EROFS:
1111 			return (HW_READONLY);
1112 		default:
1113 			return (HW_RESERR);
1114 			break;
1115 		}
1116 	}
1117 
1118 	/* File opened and locked */
1119 	LIST_INSERT_HEAD(&monfilelist_head, nmf, monfilelist);
1120 
1121 	debuglog("flock succeeded (from %16s)\n", fl->client_name);
1122 	return (HW_GRANTED);
1123 }
1124 
1125 enum hwlock_status
1126 unlock_hwlock(const struct file_lock *fl)
1127 {
1128 	struct monfile *imf;
1129 
1130 	debuglog("Entering unlock_hwlock\n");
1131 	debuglog("Entering loop interation\n");
1132 
1133 	/* Scan to see if filehandle already present */
1134 	LIST_FOREACH(imf, &monfilelist_head, monfilelist) {
1135 		if (bcmp(&fl->filehandle, &imf->filehandle,
1136 			sizeof(fl->filehandle)) == 0) {
1137 			/* imf is the correct filehandle */
1138 			break;
1139 		}
1140 	}
1141 
1142 	debuglog("Completed iteration.  Proceeding\n");
1143 
1144 	if (imf == NULL) {
1145 		/* No lock found */
1146 		debuglog("Exiting unlock_hwlock (HW_DENIED_NOLOCK)\n");
1147 		return (HW_DENIED_NOLOCK);
1148 	}
1149 
1150 	/* Lock found */
1151 	--imf->refcount;
1152 
1153 	if (imf->refcount < 0) {
1154 		debuglog("Negative hardware reference count\n");
1155 	}
1156 
1157 	if (imf->refcount <= 0) {
1158 		close(imf->fd);
1159 		LIST_REMOVE(imf, monfilelist);
1160 		free(imf);
1161 	}
1162 	debuglog("Exiting unlock_hwlock (HW_GRANTED)\n");
1163 	return (HW_GRANTED);
1164 }
1165 
1166 enum hwlock_status
1167 test_hwlock(fl, conflicting_fl)
1168 	const struct file_lock *fl __unused;
1169 	struct file_lock **conflicting_fl __unused;
1170 {
1171 
1172 	/*
1173 	 * XXX: lock tests on hardware are not required until
1174 	 * true partial file testing is done on the underlying file
1175 	 */
1176 	return (HW_RESERR);
1177 }
1178 
1179 
1180 
1181 /*
1182  * Below here are routines for manipulating blocked lock requests
1183  * They should only be called from the XXX_partialfilelock routines
1184  * if at all possible
1185  */
1186 
1187 int
1188 duplicate_block(struct file_lock *fl)
1189 {
1190 	struct file_lock *ifl;
1191 	int retval = 0;
1192 
1193 	debuglog("Entering duplicate_block");
1194 
1195 	/*
1196 	 * Is this lock request already on the blocking list?
1197 	 * Consider it a dupe if the file handles, offset, length,
1198 	 * exclusivity and client match.
1199 	 */
1200 	LIST_FOREACH(ifl, &blockedlocklist_head, nfslocklist) {
1201 		if (!bcmp(&fl->filehandle, &ifl->filehandle,
1202 			sizeof(fhandle_t)) &&
1203 		    fl->client.exclusive == ifl->client.exclusive &&
1204 		    fl->client.l_offset == ifl->client.l_offset &&
1205 		    fl->client.l_len == ifl->client.l_len &&
1206 		    same_filelock_identity(fl, ifl)) {
1207 			retval = 1;
1208 			break;
1209 		}
1210 	}
1211 
1212 	debuglog("Exiting duplicate_block: %s\n", retval ? "already blocked"
1213 	    : "not already blocked");
1214 	return retval;
1215 }
1216 
1217 void
1218 add_blockingfilelock(struct file_lock *fl)
1219 {
1220 	debuglog("Entering add_blockingfilelock\n");
1221 
1222 	/*
1223 	 * A blocking lock request _should_ never be duplicated as a client
1224 	 * that is already blocked shouldn't be able to request another
1225 	 * lock. Alas, there are some buggy clients that do request the same
1226 	 * lock repeatedly. Make sure only unique locks are on the blocked
1227 	 * lock list.
1228 	 */
1229 	if (duplicate_block(fl)) {
1230 		debuglog("Exiting add_blockingfilelock: already blocked\n");
1231 		return;
1232 	}
1233 
1234 	/*
1235 	 * Clear the blocking flag so that it can be reused without
1236 	 * adding it to the blocking queue a second time
1237 	 */
1238 
1239 	fl->blocking = 0;
1240 	LIST_INSERT_HEAD(&blockedlocklist_head, fl, nfslocklist);
1241 
1242 	debuglog("Exiting add_blockingfilelock: added blocked lock\n");
1243 }
1244 
1245 void
1246 remove_blockingfilelock(struct file_lock *fl)
1247 {
1248 
1249 	debuglog("Entering remove_blockingfilelock\n");
1250 
1251 	LIST_REMOVE(fl, nfslocklist);
1252 
1253 	debuglog("Exiting remove_blockingfilelock\n");
1254 }
1255 
1256 void
1257 clear_blockingfilelock(const char *hostname)
1258 {
1259 	struct file_lock *ifl,*nfl;
1260 
1261 	/*
1262 	 * Normally, LIST_FOREACH is called for, but since
1263 	 * the current element *is* the iterator, deleting it
1264 	 * would mess up the iteration.  Thus, a next element
1265 	 * must be used explicitly
1266 	 */
1267 
1268 	ifl = LIST_FIRST(&blockedlocklist_head);
1269 
1270 	while (ifl != NULL) {
1271 		nfl = LIST_NEXT(ifl, nfslocklist);
1272 
1273 		if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) {
1274 			remove_blockingfilelock(ifl);
1275 			deallocate_file_lock(ifl);
1276 		}
1277 
1278 		ifl = nfl;
1279 	}
1280 }
1281 
1282 void
1283 retry_blockingfilelocklist(void)
1284 {
1285 	/* Retry all locks in the blocked list */
1286 	struct file_lock *ifl, *nfl; /* Iterator */
1287 	enum partialfilelock_status pflstatus;
1288 
1289 	debuglog("Entering retry_blockingfilelocklist\n");
1290 
1291 	LIST_FOREACH_SAFE(ifl, &blockedlocklist_head, nfslocklist, nfl) {
1292 		debuglog("Iterator choice %p\n",ifl);
1293 		debuglog("Next iterator choice %p\n",nfl);
1294 
1295 		/*
1296 		 * SUBTLE BUG: The file_lock must be removed from the
1297 		 * old list so that it's list pointers get disconnected
1298 		 * before being allowed to participate in the new list
1299 		 * which will automatically add it in if necessary.
1300 		 */
1301 
1302 		LIST_REMOVE(ifl, nfslocklist);
1303 		pflstatus = lock_partialfilelock(ifl);
1304 
1305 		if (pflstatus == PFL_GRANTED || pflstatus == PFL_GRANTED_DUPLICATE) {
1306 			debuglog("Granted blocked lock\n");
1307 			/* lock granted and is now being used */
1308 			send_granted(ifl,0);
1309 		} else {
1310 			/* Reinsert lock back into blocked list */
1311 			debuglog("Replacing blocked lock\n");
1312 			LIST_INSERT_HEAD(&blockedlocklist_head, ifl, nfslocklist);
1313 		}
1314 	}
1315 
1316 	debuglog("Exiting retry_blockingfilelocklist\n");
1317 }
1318 
1319 /*
1320  * Below here are routines associated with manipulating all
1321  * aspects of the partial file locking system (list, hardware, etc.)
1322  */
1323 
1324 /*
1325  * Please note that lock monitoring must be done at this level which
1326  * keeps track of *individual* lock requests on lock and unlock
1327  *
1328  * XXX: Split unlocking is going to make the unlock code miserable
1329  */
1330 
1331 /*
1332  * lock_partialfilelock:
1333  *
1334  * Argument fl gets modified as its list housekeeping entries get modified
1335  * upon insertion into the NFS lock list
1336  *
1337  * This routine makes several assumptions:
1338  * 1) It (will) pass locks through to flock to lock the entire underlying file
1339  *     and then parcel out NFS locks if it gets control of the file.
1340  *         This matches the old rpc.lockd file semantics (except where it
1341  *         is now more correct).  It is the safe solution, but will cause
1342  *         overly restrictive blocking if someone is trying to use the
1343  *         underlying files without using NFS.  This appears to be an
1344  *         acceptable tradeoff since most people use standalone NFS servers.
1345  * XXX: The right solution is probably kevent combined with fcntl
1346  *
1347  *    2) Nothing modifies the lock lists between testing and granting
1348  *           I have no idea whether this is a useful assumption or not
1349  */
1350 
1351 enum partialfilelock_status
1352 lock_partialfilelock(struct file_lock *fl)
1353 {
1354 	enum partialfilelock_status retval;
1355 	enum nfslock_status lnlstatus;
1356 	enum hwlock_status hwstatus;
1357 
1358 	debuglog("Entering lock_partialfilelock\n");
1359 
1360 	retval = PFL_DENIED;
1361 
1362 	/*
1363 	 * Execute the NFS lock first, if possible, as it is significantly
1364 	 * easier and less expensive to undo than the filesystem lock
1365 	 */
1366 
1367 	lnlstatus = lock_nfslock(fl);
1368 
1369 	switch (lnlstatus) {
1370 	case NFS_GRANTED:
1371 	case NFS_GRANTED_DUPLICATE:
1372 		/*
1373 		 * At this point, the NFS lock is allocated and active.
1374 		 * Remember to clean it up if the hardware lock fails
1375 		 */
1376 		hwstatus = lock_hwlock(fl);
1377 
1378 		switch (hwstatus) {
1379 		case HW_GRANTED:
1380 		case HW_GRANTED_DUPLICATE:
1381 			debuglog("HW GRANTED\n");
1382 			/*
1383 			 * XXX: Fixme: Check hwstatus for duplicate when
1384 			 * true partial file locking and accounting is
1385 			 * done on the hardware.
1386 			 */
1387 			if (lnlstatus == NFS_GRANTED_DUPLICATE) {
1388 				retval = PFL_GRANTED_DUPLICATE;
1389 			} else {
1390 				retval = PFL_GRANTED;
1391 			}
1392 			monitor_lock_host(fl->client_name);
1393 			break;
1394 		case HW_RESERR:
1395 			debuglog("HW RESERR\n");
1396 			retval = PFL_HWRESERR;
1397 			break;
1398 		case HW_DENIED:
1399 			debuglog("HW DENIED\n");
1400 			retval = PFL_HWDENIED;
1401 			break;
1402 		default:
1403 			debuglog("Unmatched hwstatus %d\n",hwstatus);
1404 			break;
1405 		}
1406 
1407 		if (retval != PFL_GRANTED &&
1408 		    retval != PFL_GRANTED_DUPLICATE) {
1409 			/* Clean up the NFS lock */
1410 			debuglog("Deleting trial NFS lock\n");
1411 			delete_nfslock(fl);
1412 		}
1413 		break;
1414 	case NFS_DENIED:
1415 		retval = PFL_NFSDENIED;
1416 		break;
1417 	case NFS_RESERR:
1418 		retval = PFL_NFSRESERR;
1419 		break;
1420 	default:
1421 		debuglog("Unmatched lnlstatus %d\n");
1422 		retval = PFL_NFSDENIED_NOLOCK;
1423 		break;
1424 	}
1425 
1426 	/*
1427 	 * By the time fl reaches here, it is completely free again on
1428 	 * failure.  The NFS lock done before attempting the
1429 	 * hardware lock has been backed out
1430 	 */
1431 
1432 	if (retval == PFL_NFSDENIED || retval == PFL_HWDENIED) {
1433 		/* Once last chance to check the lock */
1434 		if (fl->blocking == 1) {
1435 			if (retval == PFL_NFSDENIED) {
1436 				/* Queue the lock */
1437 				debuglog("BLOCKING LOCK RECEIVED\n");
1438 				retval = PFL_NFSBLOCKED;
1439 				add_blockingfilelock(fl);
1440 				dump_filelock(fl);
1441 			} else {
1442 				/* retval is okay as PFL_HWDENIED */
1443 				debuglog("BLOCKING LOCK DENIED IN HARDWARE\n");
1444 				dump_filelock(fl);
1445 			}
1446 		} else {
1447 			/* Leave retval alone, it's already correct */
1448 			debuglog("Lock denied.  Non-blocking failure\n");
1449 			dump_filelock(fl);
1450 		}
1451 	}
1452 
1453 	debuglog("Exiting lock_partialfilelock\n");
1454 
1455 	return retval;
1456 }
1457 
1458 /*
1459  * unlock_partialfilelock:
1460  *
1461  * Given a file_lock, unlock all locks which match.
1462  *
1463  * Note that a given lock might have to unlock ITSELF!  See
1464  * clear_partialfilelock for example.
1465  */
1466 
1467 enum partialfilelock_status
1468 unlock_partialfilelock(const struct file_lock *fl)
1469 {
1470 	struct file_lock *lfl,*rfl,*releasedfl,*selffl;
1471 	enum partialfilelock_status retval;
1472 	enum nfslock_status unlstatus;
1473 	enum hwlock_status unlhwstatus, lhwstatus;
1474 
1475 	debuglog("Entering unlock_partialfilelock\n");
1476 
1477 	selffl = NULL;
1478 	lfl = NULL;
1479 	rfl = NULL;
1480 	releasedfl = NULL;
1481 	retval = PFL_DENIED;
1482 
1483 	/*
1484 	 * There are significant overlap and atomicity issues
1485 	 * with partially releasing a lock.  For example, releasing
1486 	 * part of an NFS shared lock does *not* always release the
1487 	 * corresponding part of the file since there is only one
1488 	 * rpc.lockd UID but multiple users could be requesting it
1489 	 * from NFS.  Also, an unlock request should never allow
1490 	 * another process to gain a lock on the remaining parts.
1491 	 * ie. Always apply the new locks before releasing the
1492 	 * old one
1493 	 */
1494 
1495 	/*
1496 	 * Loop is required since multiple little locks
1497 	 * can be allocated and then deallocated with one
1498 	 * big unlock.
1499 	 *
1500 	 * The loop is required to be here so that the nfs &
1501 	 * hw subsystems do not need to communicate with one
1502 	 * one another
1503 	 */
1504 
1505 	do {
1506 		debuglog("Value of releasedfl: %p\n",releasedfl);
1507 		/* lfl&rfl are created *AND* placed into the NFS lock list if required */
1508 		unlstatus = unlock_nfslock(fl, &releasedfl, &lfl, &rfl);
1509 		debuglog("Value of releasedfl: %p\n",releasedfl);
1510 
1511 
1512 		/* XXX: This is grungy.  It should be refactored to be cleaner */
1513 		if (lfl != NULL) {
1514 			lhwstatus = lock_hwlock(lfl);
1515 			if (lhwstatus != HW_GRANTED &&
1516 			    lhwstatus != HW_GRANTED_DUPLICATE) {
1517 				debuglog("HW duplicate lock failure for left split\n");
1518 			}
1519 			monitor_lock_host(lfl->client_name);
1520 		}
1521 
1522 		if (rfl != NULL) {
1523 			lhwstatus = lock_hwlock(rfl);
1524 			if (lhwstatus != HW_GRANTED &&
1525 			    lhwstatus != HW_GRANTED_DUPLICATE) {
1526 				debuglog("HW duplicate lock failure for right split\n");
1527 			}
1528 			monitor_lock_host(rfl->client_name);
1529 		}
1530 
1531 		switch (unlstatus) {
1532 		case NFS_GRANTED:
1533 			/* Attempt to unlock on the hardware */
1534 			debuglog("NFS unlock granted.  Attempting hardware unlock\n");
1535 
1536 			/* This call *MUST NOT* unlock the two newly allocated locks */
1537 			unlhwstatus = unlock_hwlock(fl);
1538 			debuglog("HW unlock returned with code %d\n",unlhwstatus);
1539 
1540 			switch (unlhwstatus) {
1541 			case HW_GRANTED:
1542 				debuglog("HW unlock granted\n");
1543 				unmonitor_lock_host(releasedfl->client_name);
1544 				retval = PFL_GRANTED;
1545 				break;
1546 			case HW_DENIED_NOLOCK:
1547 				/* Huh?!?!  This shouldn't happen */
1548 				debuglog("HW unlock denied no lock\n");
1549 				retval = PFL_HWRESERR;
1550 				/* Break out of do-while */
1551 				unlstatus = NFS_RESERR;
1552 				break;
1553 			default:
1554 				debuglog("HW unlock failed\n");
1555 				retval = PFL_HWRESERR;
1556 				/* Break out of do-while */
1557 				unlstatus = NFS_RESERR;
1558 				break;
1559 			}
1560 
1561 			debuglog("Exiting with status retval: %d\n",retval);
1562 
1563 			retry_blockingfilelocklist();
1564 			break;
1565 		case NFS_DENIED_NOLOCK:
1566 			retval = PFL_GRANTED;
1567 			debuglog("All locks cleaned out\n");
1568 			break;
1569 		default:
1570 			retval = PFL_NFSRESERR;
1571 			debuglog("NFS unlock failure\n");
1572 			dump_filelock(fl);
1573 			break;
1574 		}
1575 
1576 		if (releasedfl != NULL) {
1577 			if (fl == releasedfl) {
1578 				/*
1579 				 * XXX: YECHHH!!! Attempt to unlock self succeeded
1580 				 * but we can't deallocate the space yet.  This is what
1581 				 * happens when you don't write malloc and free together
1582 				 */
1583 				debuglog("Attempt to unlock self\n");
1584 				selffl = releasedfl;
1585 			} else {
1586 				/*
1587 				 * XXX: this deallocation *still* needs to migrate closer
1588 				 * to the allocation code way up in get_lock or the allocation
1589 				 * code needs to migrate down (violation of "When you write
1590 				 * malloc you must write free")
1591 				 */
1592 
1593 				deallocate_file_lock(releasedfl);
1594 				releasedfl = NULL;
1595 			}
1596 		}
1597 
1598 	} while (unlstatus == NFS_GRANTED);
1599 
1600 	if (selffl != NULL) {
1601 		/*
1602 		 * This statement wipes out the incoming file lock (fl)
1603 		 * in spite of the fact that it is declared const
1604 		 */
1605 		debuglog("WARNING!  Destroying incoming lock pointer\n");
1606 		deallocate_file_lock(selffl);
1607 	}
1608 
1609 	debuglog("Exiting unlock_partialfilelock\n");
1610 
1611 	return retval;
1612 }
1613 
1614 /*
1615  * clear_partialfilelock
1616  *
1617  * Normally called in response to statd state number change.
1618  * Wipe out all locks held by a host.  As a bonus, the act of
1619  * doing so should automatically clear their statd entries and
1620  * unmonitor the host.
1621  */
1622 
1623 void
1624 clear_partialfilelock(const char *hostname)
1625 {
1626 	struct file_lock *ifl, *nfl;
1627 
1628 	/* Clear blocking file lock list */
1629 	clear_blockingfilelock(hostname);
1630 
1631 	/* do all required unlocks */
1632 	/* Note that unlock can smash the current pointer to a lock */
1633 
1634 	/*
1635 	 * Normally, LIST_FOREACH is called for, but since
1636 	 * the current element *is* the iterator, deleting it
1637 	 * would mess up the iteration.  Thus, a next element
1638 	 * must be used explicitly
1639 	 */
1640 
1641 	ifl = LIST_FIRST(&nfslocklist_head);
1642 
1643 	while (ifl != NULL) {
1644 		nfl = LIST_NEXT(ifl, nfslocklist);
1645 
1646 		if (strncmp(hostname, ifl->client_name, SM_MAXSTRLEN) == 0) {
1647 			/* Unlock destroys ifl out from underneath */
1648 			unlock_partialfilelock(ifl);
1649 			/* ifl is NO LONGER VALID AT THIS POINT */
1650 		}
1651 		ifl = nfl;
1652 	}
1653 }
1654 
1655 /*
1656  * test_partialfilelock:
1657  */
1658 enum partialfilelock_status
1659 test_partialfilelock(const struct file_lock *fl,
1660     struct file_lock **conflicting_fl)
1661 {
1662 	enum partialfilelock_status retval;
1663 	enum nfslock_status teststatus;
1664 
1665 	debuglog("Entering testpartialfilelock...\n");
1666 
1667 	retval = PFL_DENIED;
1668 
1669 	teststatus = test_nfslock(fl, conflicting_fl);
1670 	debuglog("test_partialfilelock: teststatus %d\n",teststatus);
1671 
1672 	if (teststatus == NFS_GRANTED || teststatus == NFS_GRANTED_DUPLICATE) {
1673 		/* XXX: Add the underlying filesystem locking code */
1674 		retval = (teststatus == NFS_GRANTED) ?
1675 		    PFL_GRANTED : PFL_GRANTED_DUPLICATE;
1676 		debuglog("Dumping locks...\n");
1677 		dump_filelock(fl);
1678 		dump_filelock(*conflicting_fl);
1679 		debuglog("Done dumping locks...\n");
1680 	} else {
1681 		retval = PFL_NFSDENIED;
1682 		debuglog("NFS test denied.\n");
1683 		dump_filelock(fl);
1684 		debuglog("Conflicting.\n");
1685 		dump_filelock(*conflicting_fl);
1686 	}
1687 
1688 	debuglog("Exiting testpartialfilelock...\n");
1689 
1690 	return retval;
1691 }
1692 
1693 /*
1694  * Below here are routines associated with translating the partial file locking
1695  * codes into useful codes to send back to the NFS RPC messaging system
1696  */
1697 
1698 /*
1699  * These routines translate the (relatively) useful return codes back onto
1700  * the few return codes which the nlm subsystems wishes to trasmit
1701  */
1702 
1703 enum nlm_stats
1704 do_test(struct file_lock *fl, struct file_lock **conflicting_fl)
1705 {
1706 	enum partialfilelock_status pfsret;
1707 	enum nlm_stats retval;
1708 
1709 	debuglog("Entering do_test...\n");
1710 
1711 	pfsret = test_partialfilelock(fl,conflicting_fl);
1712 
1713 	switch (pfsret) {
1714 	case PFL_GRANTED:
1715 		debuglog("PFL test lock granted\n");
1716 		dump_filelock(fl);
1717 		dump_filelock(*conflicting_fl);
1718 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1719 		break;
1720 	case PFL_GRANTED_DUPLICATE:
1721 		debuglog("PFL test lock granted--duplicate id detected\n");
1722 		dump_filelock(fl);
1723 		dump_filelock(*conflicting_fl);
1724 		debuglog("Clearing conflicting_fl for call semantics\n");
1725 		*conflicting_fl = NULL;
1726 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1727 		break;
1728 	case PFL_NFSDENIED:
1729 	case PFL_HWDENIED:
1730 		debuglog("PFL test lock denied\n");
1731 		dump_filelock(fl);
1732 		dump_filelock(*conflicting_fl);
1733 		retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1734 		break;
1735 	case PFL_NFSRESERR:
1736 	case PFL_HWRESERR:
1737 		debuglog("PFL test lock resource fail\n");
1738 		dump_filelock(fl);
1739 		dump_filelock(*conflicting_fl);
1740 		retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1741 		break;
1742 	default:
1743 		debuglog("PFL test lock *FAILED*\n");
1744 		dump_filelock(fl);
1745 		dump_filelock(*conflicting_fl);
1746 		retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1747 		break;
1748 	}
1749 
1750 	debuglog("Exiting do_test...\n");
1751 
1752 	return retval;
1753 }
1754 
1755 /*
1756  * do_lock: Try to acquire a lock
1757  *
1758  * This routine makes a distinction between NLM versions.  I am pretty
1759  * convinced that this should be abstracted out and bounced up a level
1760  */
1761 
1762 enum nlm_stats
1763 do_lock(struct file_lock *fl)
1764 {
1765 	enum partialfilelock_status pfsret;
1766 	enum nlm_stats retval;
1767 
1768 	debuglog("Entering do_lock...\n");
1769 
1770 	pfsret = lock_partialfilelock(fl);
1771 
1772 	switch (pfsret) {
1773 	case PFL_GRANTED:
1774 		debuglog("PFL lock granted");
1775 		dump_filelock(fl);
1776 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1777 		break;
1778 	case PFL_GRANTED_DUPLICATE:
1779 		debuglog("PFL lock granted--duplicate id detected");
1780 		dump_filelock(fl);
1781 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1782 		break;
1783 	case PFL_NFSDENIED:
1784 	case PFL_HWDENIED:
1785 		debuglog("PFL_NFS lock denied");
1786 		dump_filelock(fl);
1787 		retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1788 		break;
1789 	case PFL_NFSBLOCKED:
1790 	case PFL_HWBLOCKED:
1791 		debuglog("PFL_NFS blocking lock denied.  Queued.\n");
1792 		dump_filelock(fl);
1793 		retval = (fl->flags & LOCK_V4) ? nlm4_blocked : nlm_blocked;
1794 		break;
1795 	case PFL_NFSRESERR:
1796 	case PFL_HWRESERR:
1797 		debuglog("PFL lock resource alocation fail\n");
1798 		dump_filelock(fl);
1799 		retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1800 		break;
1801 	default:
1802 		debuglog("PFL lock *FAILED*");
1803 		dump_filelock(fl);
1804 		retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1805 		break;
1806 	}
1807 
1808 	debuglog("Exiting do_lock...\n");
1809 
1810 	return retval;
1811 }
1812 
1813 enum nlm_stats
1814 do_unlock(struct file_lock *fl)
1815 {
1816 	enum partialfilelock_status pfsret;
1817 	enum nlm_stats retval;
1818 
1819 	debuglog("Entering do_unlock...\n");
1820 	pfsret = unlock_partialfilelock(fl);
1821 
1822 	switch (pfsret) {
1823 	case PFL_GRANTED:
1824 		debuglog("PFL unlock granted");
1825 		dump_filelock(fl);
1826 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1827 		break;
1828 	case PFL_NFSDENIED:
1829 	case PFL_HWDENIED:
1830 		debuglog("PFL_NFS unlock denied");
1831 		dump_filelock(fl);
1832 		retval = (fl->flags & LOCK_V4) ? nlm4_denied : nlm_denied;
1833 		break;
1834 	case PFL_NFSDENIED_NOLOCK:
1835 	case PFL_HWDENIED_NOLOCK:
1836 		debuglog("PFL_NFS no lock found\n");
1837 		retval = (fl->flags & LOCK_V4) ? nlm4_granted : nlm_granted;
1838 		break;
1839 	case PFL_NFSRESERR:
1840 	case PFL_HWRESERR:
1841 		debuglog("PFL unlock resource failure");
1842 		dump_filelock(fl);
1843 		retval = (fl->flags & LOCK_V4) ? nlm4_denied_nolocks : nlm_denied_nolocks;
1844 		break;
1845 	default:
1846 		debuglog("PFL unlock *FAILED*");
1847 		dump_filelock(fl);
1848 		retval = (fl->flags & LOCK_V4) ? nlm4_failed : nlm_denied;
1849 		break;
1850 	}
1851 
1852 	debuglog("Exiting do_unlock...\n");
1853 
1854 	return retval;
1855 }
1856 
1857 /*
1858  * do_clear
1859  *
1860  * This routine is non-existent because it doesn't have a return code.
1861  * It is here for completeness in case someone *does* need to do return
1862  * codes later.  A decent compiler should optimize this away.
1863  */
1864 
1865 void
1866 do_clear(const char *hostname)
1867 {
1868 
1869 	clear_partialfilelock(hostname);
1870 }
1871 
1872 /*
1873  * The following routines are all called from the code which the
1874  * RPC layer invokes
1875  */
1876 
1877 /*
1878  * testlock(): inform the caller if the requested lock would be granted
1879  *
1880  * returns NULL if lock would granted
1881  * returns pointer to a conflicting nlm4_holder if not
1882  */
1883 
1884 struct nlm4_holder *
1885 testlock(struct nlm4_lock *lock, bool_t exclusive, int flags __unused)
1886 {
1887 	struct file_lock test_fl, *conflicting_fl;
1888 
1889 	bzero(&test_fl, sizeof(test_fl));
1890 
1891 	bcopy(lock->fh.n_bytes, &(test_fl.filehandle), sizeof(fhandle_t));
1892 	copy_nlm4_lock_to_nlm4_holder(lock, exclusive, &test_fl.client);
1893 
1894 	siglock();
1895 	do_test(&test_fl, &conflicting_fl);
1896 
1897 	if (conflicting_fl == NULL) {
1898 		debuglog("No conflicting lock found\n");
1899 		sigunlock();
1900 		return NULL;
1901 	} else {
1902 		debuglog("Found conflicting lock\n");
1903 		dump_filelock(conflicting_fl);
1904 		sigunlock();
1905 		return (&conflicting_fl->client);
1906 	}
1907 }
1908 
1909 /*
1910  * getlock: try to acquire the lock.
1911  * If file is already locked and we can sleep, put the lock in the list with
1912  * status LKST_WAITING; it'll be processed later.
1913  * Otherwise try to lock. If we're allowed to block, fork a child which
1914  * will do the blocking lock.
1915  */
1916 
1917 enum nlm_stats
1918 getlock(nlm4_lockargs *lckarg, struct svc_req *rqstp, const int flags)
1919 {
1920 	struct file_lock *newfl;
1921 	enum nlm_stats retval;
1922 
1923 	debuglog("Entering getlock...\n");
1924 
1925 	if (grace_expired == 0 && lckarg->reclaim == 0)
1926 		return (flags & LOCK_V4) ?
1927 		    nlm4_denied_grace_period : nlm_denied_grace_period;
1928 
1929 	/* allocate new file_lock for this request */
1930 	newfl = allocate_file_lock(&lckarg->alock.oh, &lckarg->cookie,
1931 				   (struct sockaddr *)svc_getrpccaller(rqstp->rq_xprt)->buf, lckarg->alock.caller_name);
1932 	if (newfl == NULL) {
1933 		syslog(LOG_NOTICE, "lock allocate failed: %s", strerror(errno));
1934 		/* failed */
1935 		return (flags & LOCK_V4) ?
1936 		    nlm4_denied_nolocks : nlm_denied_nolocks;
1937 	}
1938 
1939 	if (lckarg->alock.fh.n_len != sizeof(fhandle_t)) {
1940 		debuglog("received fhandle size %d, local size %d",
1941 		    lckarg->alock.fh.n_len, (int)sizeof(fhandle_t));
1942 	}
1943 
1944 	fill_file_lock(newfl, (fhandle_t *)lckarg->alock.fh.n_bytes,
1945 	    lckarg->exclusive, lckarg->alock.svid, lckarg->alock.l_offset,
1946 	    lckarg->alock.l_len,
1947 	    lckarg->state, 0, flags, lckarg->block);
1948 
1949 	/*
1950 	 * newfl is now fully constructed and deallocate_file_lock
1951 	 * can now be used to delete it
1952 	 */
1953 
1954 	siglock();
1955 	debuglog("Pointer to new lock is %p\n",newfl);
1956 
1957 	retval = do_lock(newfl);
1958 
1959 	debuglog("Pointer to new lock is %p\n",newfl);
1960 	sigunlock();
1961 
1962 	switch (retval)
1963 		{
1964 		case nlm4_granted:
1965 			/* case nlm_granted: is the same as nlm4_granted */
1966 			/* do_mon(lckarg->alock.caller_name); */
1967 			break;
1968 		case nlm4_blocked:
1969 			/* case nlm_blocked: is the same as nlm4_blocked */
1970 			/* do_mon(lckarg->alock.caller_name); */
1971 			break;
1972 		default:
1973 			deallocate_file_lock(newfl);
1974 			break;
1975 		}
1976 
1977 	debuglog("Exiting getlock...\n");
1978 
1979 	return retval;
1980 }
1981 
1982 
1983 /* unlock a filehandle */
1984 enum nlm_stats
1985 unlock(nlm4_lock *lock, const int flags __unused)
1986 {
1987 	struct file_lock fl;
1988 	enum nlm_stats err;
1989 
1990 	siglock();
1991 
1992 	debuglog("Entering unlock...\n");
1993 
1994 	bzero(&fl,sizeof(struct file_lock));
1995 	bcopy(lock->fh.n_bytes, &fl.filehandle, sizeof(fhandle_t));
1996 
1997 	copy_nlm4_lock_to_nlm4_holder(lock, 0, &fl.client);
1998 
1999 	err = do_unlock(&fl);
2000 
2001 	sigunlock();
2002 
2003 	debuglog("Exiting unlock...\n");
2004 
2005 	return err;
2006 }
2007 
2008 /*
2009  * XXX: The following monitor/unmonitor routines
2010  * have not been extensively tested (ie. no regression
2011  * script exists like for the locking sections
2012  */
2013 
2014 /*
2015  * monitor_lock_host: monitor lock hosts locally with a ref count and
2016  * inform statd
2017  */
2018 void
2019 monitor_lock_host(const char *hostname)
2020 {
2021 	struct host *ihp, *nhp;
2022 	struct mon smon;
2023 	struct sm_stat_res sres;
2024 	int rpcret, statflag;
2025 	size_t n;
2026 
2027 	rpcret = 0;
2028 	statflag = 0;
2029 
2030 	LIST_FOREACH(ihp, &hostlst_head, hostlst) {
2031 		if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) {
2032 			/* Host is already monitored, bump refcount */
2033 			++ihp->refcnt;
2034 			/* Host should only be in the monitor list once */
2035 			return;
2036 		}
2037 	}
2038 
2039 	/* Host is not yet monitored, add it */
2040 	n = strnlen(hostname, SM_MAXSTRLEN);
2041 	if (n == SM_MAXSTRLEN) {
2042 		return;
2043 	}
2044 	nhp = malloc(sizeof(*nhp) - sizeof(nhp->name) + n + 1);
2045 	if (nhp == NULL) {
2046 		debuglog("Unable to allocate entry for statd mon\n");
2047 		return;
2048 	}
2049 
2050 	/* Allocated new host entry, now fill the fields */
2051 	memcpy(nhp->name, hostname, n);
2052 	nhp->name[n] = 0;
2053 	nhp->refcnt = 1;
2054 	debuglog("Locally Monitoring host %16s\n",hostname);
2055 
2056 	debuglog("Attempting to tell statd\n");
2057 
2058 	bzero(&smon,sizeof(smon));
2059 
2060 	smon.mon_id.mon_name = nhp->name;
2061 	smon.mon_id.my_id.my_name = "localhost";
2062 	smon.mon_id.my_id.my_prog = NLM_PROG;
2063 	smon.mon_id.my_id.my_vers = NLM_SM;
2064 	smon.mon_id.my_id.my_proc = NLM_SM_NOTIFY;
2065 
2066 	rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_MON,
2067 	    (xdrproc_t)xdr_mon, &smon,
2068 	    (xdrproc_t)xdr_sm_stat_res, &sres);
2069 
2070 	if (rpcret == 0) {
2071 		if (sres.res_stat == stat_fail) {
2072 			debuglog("Statd call failed\n");
2073 			statflag = 0;
2074 		} else {
2075 			statflag = 1;
2076 		}
2077 	} else {
2078 		debuglog("Rpc call to statd failed with return value: %d\n",
2079 		    rpcret);
2080 		statflag = 0;
2081 	}
2082 
2083 	if (statflag == 1) {
2084 		LIST_INSERT_HEAD(&hostlst_head, nhp, hostlst);
2085 	} else {
2086 		free(nhp);
2087 	}
2088 
2089 }
2090 
2091 /*
2092  * unmonitor_lock_host: clear monitor ref counts and inform statd when gone
2093  */
2094 void
2095 unmonitor_lock_host(char *hostname)
2096 {
2097 	struct host *ihp;
2098 	struct mon_id smon_id;
2099 	struct sm_stat smstat;
2100 	int rpcret;
2101 
2102 	rpcret = 0;
2103 
2104 	for( ihp=LIST_FIRST(&hostlst_head); ihp != NULL;
2105 	     ihp=LIST_NEXT(ihp, hostlst)) {
2106 		if (strncmp(hostname, ihp->name, SM_MAXSTRLEN) == 0) {
2107 			/* Host is monitored, bump refcount */
2108 			--ihp->refcnt;
2109 			/* Host should only be in the monitor list once */
2110 			break;
2111 		}
2112 	}
2113 
2114 	if (ihp == NULL) {
2115 		debuglog("Could not find host %16s in mon list\n", hostname);
2116 		return;
2117 	}
2118 
2119 	if (ihp->refcnt > 0)
2120 		return;
2121 
2122 	if (ihp->refcnt < 0) {
2123 		debuglog("Negative refcount!: %d\n",
2124 		    ihp->refcnt);
2125 	}
2126 
2127 	debuglog("Attempting to unmonitor host %16s\n", hostname);
2128 
2129 	bzero(&smon_id,sizeof(smon_id));
2130 
2131 	smon_id.mon_name = hostname;
2132 	smon_id.my_id.my_name = "localhost";
2133 	smon_id.my_id.my_prog = NLM_PROG;
2134 	smon_id.my_id.my_vers = NLM_SM;
2135 	smon_id.my_id.my_proc = NLM_SM_NOTIFY;
2136 
2137 	rpcret = callrpc("localhost", SM_PROG, SM_VERS, SM_UNMON,
2138 	    (xdrproc_t)xdr_mon_id, &smon_id,
2139 	    (xdrproc_t)xdr_sm_stat, &smstat);
2140 
2141 	if (rpcret != 0) {
2142 		debuglog("Rpc call to unmonitor statd failed with "
2143 		   " return value: %d\n", rpcret);
2144 	}
2145 
2146 	LIST_REMOVE(ihp, hostlst);
2147 	free(ihp);
2148 }
2149 
2150 /*
2151  * notify: Clear all locks from a host if statd complains
2152  *
2153  * XXX: This routine has not been thoroughly tested.  However, neither
2154  * had the old one been.  It used to compare the statd crash state counter
2155  * to the current lock state.  The upshot of this was that it basically
2156  * cleared all locks from the specified host 99% of the time (with the
2157  * other 1% being a bug).  Consequently, the assumption is that clearing
2158  * all locks from a host when notified by statd is acceptable.
2159  *
2160  * Please note that this routine skips the usual level of redirection
2161  * through a do_* type routine.  This introduces a possible level of
2162  * error and might better be written as do_notify and take this one out.
2163 
2164  */
2165 
2166 void
2167 notify(const char *hostname, const int state)
2168 {
2169 	debuglog("notify from %s, new state %d", hostname, state);
2170 
2171 	siglock();
2172 	do_clear(hostname);
2173 	sigunlock();
2174 
2175 	debuglog("Leaving notify\n");
2176 }
2177 
2178 void
2179 send_granted(fl, opcode)
2180 	struct file_lock *fl;
2181 	int opcode __unused;
2182 {
2183 	CLIENT *cli;
2184 	static char dummy;
2185 	struct timeval timeo;
2186 	int success;
2187 	static struct nlm_res retval;
2188 	static struct nlm4_res retval4;
2189 
2190 	debuglog("About to send granted on blocked lock\n");
2191 
2192 	cli = get_client(fl->addr,
2193 	    (fl->flags & LOCK_V4) ? NLM_VERS4 : NLM_VERS);
2194 	if (cli == NULL) {
2195 		syslog(LOG_NOTICE, "failed to get CLIENT for %s",
2196 		    fl->client_name);
2197 		/*
2198 		 * We fail to notify remote that the lock has been granted.
2199 		 * The client will timeout and retry, the lock will be
2200 		 * granted at this time.
2201 		 */
2202 		return;
2203 	}
2204 	timeo.tv_sec = 0;
2205 	timeo.tv_usec = (fl->flags & LOCK_ASYNC) ? 0 : 500000; /* 0.5s */
2206 
2207 	if (fl->flags & LOCK_V4) {
2208 		static nlm4_testargs res;
2209 		res.cookie = fl->client_cookie;
2210 		res.exclusive = fl->client.exclusive;
2211 		res.alock.caller_name = fl->client_name;
2212 		res.alock.fh.n_len = sizeof(fhandle_t);
2213 		res.alock.fh.n_bytes = (char*)&fl->filehandle;
2214 		res.alock.oh = fl->client.oh;
2215 		res.alock.svid = fl->client.svid;
2216 		res.alock.l_offset = fl->client.l_offset;
2217 		res.alock.l_len = fl->client.l_len;
2218 		debuglog("sending v4 reply%s",
2219 			 (fl->flags & LOCK_ASYNC) ? " (async)":"");
2220 		if (fl->flags & LOCK_ASYNC) {
2221 			success = clnt_call(cli, NLM4_GRANTED_MSG,
2222 			    (xdrproc_t)xdr_nlm4_testargs, &res,
2223 			    (xdrproc_t)xdr_void, &dummy, timeo);
2224 		} else {
2225 			success = clnt_call(cli, NLM4_GRANTED,
2226 			    (xdrproc_t)xdr_nlm4_testargs, &res,
2227 			    (xdrproc_t)xdr_nlm4_res, &retval4, timeo);
2228 		}
2229 	} else {
2230 		static nlm_testargs res;
2231 
2232 		res.cookie = fl->client_cookie;
2233 		res.exclusive = fl->client.exclusive;
2234 		res.alock.caller_name = fl->client_name;
2235 		res.alock.fh.n_len = sizeof(fhandle_t);
2236 		res.alock.fh.n_bytes = (char*)&fl->filehandle;
2237 		res.alock.oh = fl->client.oh;
2238 		res.alock.svid = fl->client.svid;
2239 		res.alock.l_offset = fl->client.l_offset;
2240 		res.alock.l_len = fl->client.l_len;
2241 		debuglog("sending v1 reply%s",
2242 			 (fl->flags & LOCK_ASYNC) ? " (async)":"");
2243 		if (fl->flags & LOCK_ASYNC) {
2244 			success = clnt_call(cli, NLM_GRANTED_MSG,
2245 			    (xdrproc_t)xdr_nlm_testargs, &res,
2246 			    (xdrproc_t)xdr_void, &dummy, timeo);
2247 		} else {
2248 			success = clnt_call(cli, NLM_GRANTED,
2249 			    (xdrproc_t)xdr_nlm_testargs, &res,
2250 			    (xdrproc_t)xdr_nlm_res, &retval, timeo);
2251 		}
2252 	}
2253 	if (debug_level > 2)
2254 		debuglog("clnt_call returns %d(%s) for granted",
2255 			 success, clnt_sperrno(success));
2256 
2257 }
2258 
2259 /*
2260  * Routines below here have not been modified in the overhaul
2261  */
2262 
2263 /*
2264  * Are these two routines still required since lockd is not spawning off
2265  * children to service locks anymore?  Presumably they were originally
2266  * put in place to prevent a one child from changing the lock list out
2267  * from under another one.
2268  */
2269 
2270 void
2271 siglock(void)
2272 {
2273   sigset_t block;
2274 
2275   sigemptyset(&block);
2276   sigaddset(&block, SIGCHLD);
2277 
2278   if (sigprocmask(SIG_BLOCK, &block, NULL) < 0) {
2279     syslog(LOG_WARNING, "siglock failed: %s", strerror(errno));
2280   }
2281 }
2282 
2283 void
2284 sigunlock(void)
2285 {
2286   sigset_t block;
2287 
2288   sigemptyset(&block);
2289   sigaddset(&block, SIGCHLD);
2290 
2291   if (sigprocmask(SIG_UNBLOCK, &block, NULL) < 0) {
2292     syslog(LOG_WARNING, "sigunlock failed: %s", strerror(errno));
2293   }
2294 }
2295