xref: /illumos-gate/usr/src/uts/common/syscall/fcntl.c (revision bdfc6d18da790deeec2e0eb09c625902defe2498)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /* ONC_PLUS EXTRACT START */
23 /*
24  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
25  * Use is subject to license terms.
26  */
27 
28 /*	Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T	*/
29 /*	  All Rights Reserved  	*/
30 
31 /*
32  * Portions of this source code were derived from Berkeley 4.3 BSD
33  * under license from the Regents of the University of California.
34  */
35 
36 #pragma ident	"%Z%%M%	%I%	%E% SMI"
37 /* ONC_PLUS EXTRACT END */
38 
39 #include <sys/param.h>
40 #include <sys/isa_defs.h>
41 #include <sys/types.h>
42 #include <sys/sysmacros.h>
43 #include <sys/systm.h>
44 #include <sys/errno.h>
45 #include <sys/fcntl.h>
46 /* ONC_PLUS EXTRACT START */
47 #include <sys/flock.h>
48 /* ONC_PLUS EXTRACT END */
49 #include <sys/vnode.h>
50 #include <sys/file.h>
51 #include <sys/mode.h>
52 #include <sys/proc.h>
53 #include <sys/filio.h>
54 #include <sys/share.h>
55 #include <sys/debug.h>
56 #include <sys/rctl.h>
57 #include <sys/nbmlock.h>
58 
59 /* ONC_PLUS EXTRACT START */
60 static int flock_check(vnode_t *, flock64_t *, offset_t, offset_t);
61 static int flock_get_start(vnode_t *, flock64_t *, offset_t, u_offset_t *);
62 static void fd_too_big(proc_t *);
63 
64 /*
65  * File control.
66  */
67 int
68 fcntl(int fdes, int cmd, intptr_t arg)
69 {
70 	int iarg;
71 	int error = 0;
72 	int retval;
73 	proc_t *p;
74 	file_t *fp;
75 	vnode_t *vp;
76 	u_offset_t offset;
77 	u_offset_t start;
78 	struct vattr vattr;
79 	int in_crit;
80 	int flag;
81 	struct flock sbf;
82 	struct flock64 bf;
83 	struct o_flock obf;
84 	struct flock64_32 bf64_32;
85 	struct fshare fsh;
86 	struct shrlock shr;
87 	struct shr_locowner shr_own;
88 	offset_t maxoffset;
89 	model_t datamodel;
90 
91 #if defined(_ILP32) && !defined(lint) && defined(_SYSCALL32)
92 	ASSERT(sizeof (struct flock) == sizeof (struct flock32));
93 	ASSERT(sizeof (struct flock64) == sizeof (struct flock64_32));
94 #endif
95 #if defined(_LP64) && !defined(lint) && defined(_SYSCALL32)
96 	ASSERT(sizeof (struct flock) == sizeof (struct flock64_64));
97 	ASSERT(sizeof (struct flock64) == sizeof (struct flock64_64));
98 #endif
99 
100 	/*
101 	 * First, for speed, deal with the subset of cases
102 	 * that do not require getf() / releasef().
103 	 */
104 	switch (cmd) {
105 	case F_GETFD:
106 		if ((error = f_getfd_error(fdes, &flag)) == 0)
107 			retval = flag;
108 		goto out;
109 
110 	case F_SETFD:
111 		error = f_setfd_error(fdes, (int)arg);
112 		retval = 0;
113 		goto out;
114 
115 	case F_GETFL:
116 		if ((error = f_getfl(fdes, &flag)) == 0)
117 			retval = (flag & (FMASK | FASYNC)) + FOPEN;
118 		goto out;
119 
120 	case F_GETXFL:
121 		if ((error = f_getfl(fdes, &flag)) == 0)
122 			retval = flag + FOPEN;
123 		goto out;
124 	}
125 
126 	/*
127 	 * Second, for speed, deal with the subset of cases that
128 	 * require getf() / releasef() but do not require copyin.
129 	 */
130 	if ((fp = getf(fdes)) == NULL) {
131 		error = EBADF;
132 		goto out;
133 	}
134 	iarg = (int)arg;
135 
136 	switch (cmd) {
137 /* ONC_PLUS EXTRACT END */
138 
139 	case F_DUPFD:
140 		p = curproc;
141 		if ((uint_t)iarg >= p->p_fno_ctl) {
142 			if (iarg >= 0)
143 				fd_too_big(p);
144 			error = EINVAL;
145 		} else if ((retval = ufalloc_file(iarg, fp)) == -1) {
146 			error = EMFILE;
147 		} else {
148 			mutex_enter(&fp->f_tlock);
149 			fp->f_count++;
150 			mutex_exit(&fp->f_tlock);
151 		}
152 		goto done;
153 
154 	case F_DUP2FD:
155 		p = curproc;
156 		if (fdes == iarg) {
157 			retval = iarg;
158 		} else if ((uint_t)iarg >= p->p_fno_ctl) {
159 			if (iarg >= 0)
160 				fd_too_big(p);
161 			error = EBADF;
162 		} else {
163 			/*
164 			 * We can't hold our getf(fdes) across the call to
165 			 * closeandsetf() because it creates a window for
166 			 * deadlock: if one thread is doing dup2(a, b) while
167 			 * another is doing dup2(b, a), each one will block
168 			 * waiting for the other to call releasef().  The
169 			 * solution is to increment the file reference count
170 			 * (which we have to do anyway), then releasef(fdes),
171 			 * then closeandsetf().  Incrementing f_count ensures
172 			 * that fp won't disappear after we call releasef().
173 			 */
174 			mutex_enter(&fp->f_tlock);
175 			fp->f_count++;
176 			mutex_exit(&fp->f_tlock);
177 			releasef(fdes);
178 			(void) closeandsetf(iarg, fp);
179 			retval = iarg;
180 			goto out;
181 		}
182 		goto done;
183 
184 	case F_SETFL:
185 		vp = fp->f_vnode;
186 		flag = fp->f_flag;
187 		if ((iarg & (FNONBLOCK|FNDELAY)) == (FNONBLOCK|FNDELAY))
188 			iarg &= ~FNDELAY;
189 		if ((error = VOP_SETFL(vp, flag, iarg, fp->f_cred)) == 0) {
190 			iarg &= FMASK;
191 			mutex_enter(&fp->f_tlock);
192 			fp->f_flag &= ~FMASK | (FREAD|FWRITE);
193 			fp->f_flag |= (iarg - FOPEN) & ~(FREAD|FWRITE);
194 			mutex_exit(&fp->f_tlock);
195 		}
196 		retval = 0;
197 		goto done;
198 	}
199 
200 	/*
201 	 * Finally, deal with the expensive cases.
202 	 */
203 	retval = 0;
204 	in_crit = 0;
205 	maxoffset = MAXOFF_T;
206 	datamodel = DATAMODEL_NATIVE;
207 #if defined(_SYSCALL32_IMPL)
208 	if ((datamodel = get_udatamodel()) == DATAMODEL_ILP32)
209 		maxoffset = MAXOFF32_T;
210 #endif
211 
212 	vp = fp->f_vnode;
213 	flag = fp->f_flag;
214 	offset = fp->f_offset;
215 
216 	switch (cmd) {
217 /* ONC_PLUS EXTRACT START */
218 	/*
219 	 * The file system and vnode layers understand and implement
220 	 * locking with flock64 structures. So here once we pass through
221 	 * the test for compatibility as defined by LFS API, (for F_SETLK,
222 	 * F_SETLKW, F_GETLK, F_GETLKW, F_FREESP) we transform
223 	 * the flock structure to a flock64 structure and send it to the
224 	 * lower layers. Similarly in case of GETLK the returned flock64
225 	 * structure is transformed to a flock structure if everything fits
226 	 * in nicely, otherwise we return EOVERFLOW.
227 	 */
228 
229 	case F_GETLK:
230 	case F_O_GETLK:
231 	case F_SETLK:
232 	case F_SETLKW:
233 	case F_SETLK_NBMAND:
234 
235 		/*
236 		 * Copy in input fields only.
237 		 */
238 
239 		if (cmd == F_O_GETLK) {
240 			if (datamodel != DATAMODEL_ILP32) {
241 				error = EINVAL;
242 				break;
243 			}
244 
245 			if (copyin((void *)arg, &obf, sizeof (obf))) {
246 				error = EFAULT;
247 				break;
248 			}
249 			bf.l_type = obf.l_type;
250 			bf.l_whence = obf.l_whence;
251 			bf.l_start = (off64_t)obf.l_start;
252 			bf.l_len = (off64_t)obf.l_len;
253 			bf.l_sysid = (int)obf.l_sysid;
254 			bf.l_pid = obf.l_pid;
255 		} else if (datamodel == DATAMODEL_NATIVE) {
256 			if (copyin((void *)arg, &sbf, sizeof (sbf))) {
257 				error = EFAULT;
258 				break;
259 			}
260 			/*
261 			 * XXX	In an LP64 kernel with an LP64 application
262 			 *	there's no need to do a structure copy here
263 			 *	struct flock == struct flock64. However,
264 			 *	we did it this way to avoid more conditional
265 			 *	compilation.
266 			 */
267 			bf.l_type = sbf.l_type;
268 			bf.l_whence = sbf.l_whence;
269 			bf.l_start = (off64_t)sbf.l_start;
270 			bf.l_len = (off64_t)sbf.l_len;
271 			bf.l_sysid = sbf.l_sysid;
272 			bf.l_pid = sbf.l_pid;
273 		}
274 #if defined(_SYSCALL32_IMPL)
275 		else {
276 			struct flock32 sbf32;
277 			if (copyin((void *)arg, &sbf32, sizeof (sbf32))) {
278 				error = EFAULT;
279 				break;
280 			}
281 			bf.l_type = sbf32.l_type;
282 			bf.l_whence = sbf32.l_whence;
283 			bf.l_start = (off64_t)sbf32.l_start;
284 			bf.l_len = (off64_t)sbf32.l_len;
285 			bf.l_sysid = sbf32.l_sysid;
286 			bf.l_pid = sbf32.l_pid;
287 		}
288 #endif /* _SYSCALL32_IMPL */
289 
290 		/*
291 		 * 64-bit support: check for overflow for 32-bit lock ops
292 		 */
293 		if ((error = flock_check(vp, &bf, offset, maxoffset)) != 0)
294 			break;
295 
296 		/*
297 		 * Not all of the filesystems understand F_O_GETLK, and
298 		 * there's no need for them to know.  Map it to F_GETLK.
299 		 */
300 		if ((error = VOP_FRLOCK(vp, (cmd == F_O_GETLK) ? F_GETLK : cmd,
301 		    &bf, flag, offset, NULL, fp->f_cred)) != 0)
302 			break;
303 
304 		/*
305 		 * If command is GETLK and no lock is found, only
306 		 * the type field is changed.
307 		 */
308 		if ((cmd == F_O_GETLK || cmd == F_GETLK) &&
309 		    bf.l_type == F_UNLCK) {
310 			/* l_type always first entry, always a short */
311 			if (copyout(&bf.l_type, &((struct flock *)arg)->l_type,
312 			    sizeof (bf.l_type)))
313 				error = EFAULT;
314 			break;
315 		}
316 
317 		if (cmd == F_O_GETLK) {
318 			/*
319 			 * Return an SVR3 flock structure to the user.
320 			 */
321 			obf.l_type = (int16_t)bf.l_type;
322 			obf.l_whence = (int16_t)bf.l_whence;
323 			obf.l_start = (int32_t)bf.l_start;
324 			obf.l_len = (int32_t)bf.l_len;
325 			if (bf.l_sysid > SHRT_MAX || bf.l_pid > SHRT_MAX) {
326 				/*
327 				 * One or both values for the above fields
328 				 * is too large to store in an SVR3 flock
329 				 * structure.
330 				 */
331 				error = EOVERFLOW;
332 				break;
333 			}
334 			obf.l_sysid = (int16_t)bf.l_sysid;
335 			obf.l_pid = (int16_t)bf.l_pid;
336 			if (copyout(&obf, (void *)arg, sizeof (obf)))
337 				error = EFAULT;
338 		} else if (cmd == F_GETLK) {
339 			/*
340 			 * Copy out SVR4 flock.
341 			 */
342 			int i;
343 
344 			if (bf.l_start > maxoffset || bf.l_len > maxoffset) {
345 				error = EOVERFLOW;
346 				break;
347 			}
348 
349 			if (datamodel == DATAMODEL_NATIVE) {
350 				for (i = 0; i < 4; i++)
351 					sbf.l_pad[i] = 0;
352 				/*
353 				 * XXX	In an LP64 kernel with an LP64
354 				 *	application there's no need to do a
355 				 *	structure copy here as currently
356 				 *	struct flock == struct flock64.
357 				 *	We did it this way to avoid more
358 				 *	conditional compilation.
359 				 */
360 				sbf.l_type = bf.l_type;
361 				sbf.l_whence = bf.l_whence;
362 				sbf.l_start = (off_t)bf.l_start;
363 				sbf.l_len = (off_t)bf.l_len;
364 				sbf.l_sysid = bf.l_sysid;
365 				sbf.l_pid = bf.l_pid;
366 				if (copyout(&sbf, (void *)arg, sizeof (sbf)))
367 					error = EFAULT;
368 			}
369 #if defined(_SYSCALL32_IMPL)
370 			else {
371 				struct flock32 sbf32;
372 				if (bf.l_start > MAXOFF32_T ||
373 				    bf.l_len > MAXOFF32_T) {
374 					error = EOVERFLOW;
375 					break;
376 				}
377 				for (i = 0; i < 4; i++)
378 					sbf32.l_pad[i] = 0;
379 				sbf32.l_type = (int16_t)bf.l_type;
380 				sbf32.l_whence = (int16_t)bf.l_whence;
381 				sbf32.l_start = (off32_t)bf.l_start;
382 				sbf32.l_len = (off32_t)bf.l_len;
383 				sbf32.l_sysid = (int32_t)bf.l_sysid;
384 				sbf32.l_pid = (pid32_t)bf.l_pid;
385 				if (copyout(&sbf32,
386 				    (void *)arg, sizeof (sbf32)))
387 					error = EFAULT;
388 			}
389 #endif
390 		}
391 		break;
392 /* ONC_PLUS EXTRACT END */
393 
394 	case F_CHKFL:
395 		/*
396 		 * This is for internal use only, to allow the vnode layer
397 		 * to validate a flags setting before applying it.  User
398 		 * programs can't issue it.
399 		 */
400 		error = EINVAL;
401 		break;
402 
403 	case F_ALLOCSP:
404 	case F_FREESP:
405 		if ((flag & FWRITE) == 0) {
406 			error = EBADF;
407 			break;
408 		}
409 		if (vp->v_type != VREG) {
410 			error = EINVAL;
411 			break;
412 		}
413 
414 #if defined(_ILP32) || defined(_SYSCALL32_IMPL)
415 		if (datamodel == DATAMODEL_ILP32) {
416 			struct flock32 sbf32;
417 			/*
418 			 * For compatibility we overlay an SVR3 flock on an SVR4
419 			 * flock.  This works because the input field offsets
420 			 * in "struct flock" were preserved.
421 			 */
422 			if (copyin((void *)arg, &sbf32, sizeof (sbf32))) {
423 				error = EFAULT;
424 				break;
425 			} else {
426 				bf.l_type = sbf32.l_type;
427 				bf.l_whence = sbf32.l_whence;
428 				bf.l_start = (off64_t)sbf32.l_start;
429 				bf.l_len = (off64_t)sbf32.l_len;
430 				bf.l_sysid = sbf32.l_sysid;
431 				bf.l_pid = sbf32.l_pid;
432 			}
433 		}
434 #endif /* _ILP32 || _SYSCALL32_IMPL */
435 
436 #if defined(_LP64)
437 		if (datamodel == DATAMODEL_LP64) {
438 			if (copyin((void *)arg, &bf, sizeof (bf))) {
439 				error = EFAULT;
440 				break;
441 			}
442 		}
443 #endif
444 
445 		if ((error = flock_check(vp, &bf, offset, maxoffset)) != 0)
446 			break;
447 
448 		if (vp->v_type == VREG && bf.l_len == 0 &&
449 		    bf.l_start > OFFSET_MAX(fp)) {
450 			error = EFBIG;
451 			break;
452 		}
453 
454 		/*
455 		 * Make sure that there are no conflicting non-blocking
456 		 * mandatory locks in the region being manipulated. If
457 		 * there are such locks then return EACCES.
458 		 */
459 		if ((error = flock_get_start(vp, &bf, offset, &start)) != 0)
460 			break;
461 
462 		if (nbl_need_check(vp)) {
463 			u_offset_t	begin;
464 			ssize_t		length;
465 
466 			nbl_start_crit(vp, RW_READER);
467 			in_crit = 1;
468 			vattr.va_mask = AT_SIZE;
469 			if ((error = VOP_GETATTR(vp, &vattr, 0, CRED())) != 0)
470 				break;
471 			begin = start > vattr.va_size ? vattr.va_size : start;
472 			length = vattr.va_size > start ? vattr.va_size - start :
473 				start - vattr.va_size;
474 			if (nbl_conflict(vp, NBL_WRITE, begin, length, 0)) {
475 				error = EACCES;
476 				break;
477 			}
478 		}
479 		error = VOP_SPACE(vp, cmd, &bf, flag, offset, fp->f_cred, NULL);
480 		break;
481 
482 #if !defined(_LP64) || defined(_SYSCALL32_IMPL)
483 /* ONC_PLUS EXTRACT START */
484 	case F_GETLK64:
485 	case F_SETLK64:
486 	case F_SETLKW64:
487 	case F_SETLK64_NBMAND:
488 		/*
489 		 * Large Files: Here we set cmd as *LK and send it to
490 		 * lower layers. *LK64 is only for the user land.
491 		 * Most of the comments described above for F_SETLK
492 		 * applies here too.
493 		 * Large File support is only needed for ILP32 apps!
494 		 */
495 		if (datamodel != DATAMODEL_ILP32) {
496 			error = EINVAL;
497 			break;
498 		}
499 
500 		if (cmd == F_GETLK64)
501 			cmd = F_GETLK;
502 		else if (cmd == F_SETLK64)
503 			cmd = F_SETLK;
504 		else if (cmd == F_SETLKW64)
505 			cmd = F_SETLKW;
506 		else if (cmd == F_SETLK64_NBMAND)
507 			cmd = F_SETLK_NBMAND;
508 
509 		/*
510 		 * Note that the size of flock64 is different in the ILP32
511 		 * and LP64 models, due to the sucking l_pad field.
512 		 * We do not want to assume that the flock64 structure is
513 		 * laid out in the same in ILP32 and LP64 environments, so
514 		 * we will copy in the ILP32 version of flock64 explicitly
515 		 * and copy it to the native flock64 structure.
516 		 */
517 
518 		if (copyin((void *)arg, &bf64_32, sizeof (bf64_32))) {
519 			error = EFAULT;
520 			break;
521 		}
522 		bf.l_type = (short)bf64_32.l_type;
523 		bf.l_whence = (short)bf64_32.l_whence;
524 		bf.l_start = bf64_32.l_start;
525 		bf.l_len = bf64_32.l_len;
526 		bf.l_sysid = (int)bf64_32.l_sysid;
527 		bf.l_pid = (pid_t)bf64_32.l_pid;
528 
529 		if ((error = flock_check(vp, &bf, offset, MAXOFFSET_T)) != 0)
530 			break;
531 
532 		if ((error = VOP_FRLOCK(vp, cmd, &bf, flag, offset,
533 		    NULL, fp->f_cred)) != 0)
534 			break;
535 
536 		if ((cmd == F_GETLK) && bf.l_type == F_UNLCK) {
537 			if (copyout(&bf.l_type, &((struct flock *)arg)->l_type,
538 			    sizeof (bf.l_type)))
539 				error = EFAULT;
540 			break;
541 		}
542 
543 		if (cmd == F_GETLK) {
544 			int i;
545 
546 			/*
547 			 * We do not want to assume that the flock64 structure
548 			 * is laid out in the same in ILP32 and LP64
549 			 * environments, so we will copy out the ILP32 version
550 			 * of flock64 explicitly after copying the native
551 			 * flock64 structure to it.
552 			 */
553 			for (i = 0; i < 4; i++)
554 				bf64_32.l_pad[i] = 0;
555 			bf64_32.l_type = (int16_t)bf.l_type;
556 			bf64_32.l_whence = (int16_t)bf.l_whence;
557 			bf64_32.l_start = bf.l_start;
558 			bf64_32.l_len = bf.l_len;
559 			bf64_32.l_sysid = (int32_t)bf.l_sysid;
560 			bf64_32.l_pid = (pid32_t)bf.l_pid;
561 			if (copyout(&bf64_32, (void *)arg, sizeof (bf64_32)))
562 				error = EFAULT;
563 		}
564 		break;
565 /* ONC_PLUS EXTRACT END */
566 
567 	case F_FREESP64:
568 		if (datamodel != DATAMODEL_ILP32) {
569 			error = EINVAL;
570 			break;
571 		}
572 		cmd = F_FREESP;
573 		if ((flag & FWRITE) == 0)
574 			error = EBADF;
575 		else if (vp->v_type != VREG)
576 			error = EINVAL;
577 		else if (copyin((void *)arg, &bf64_32, sizeof (bf64_32)))
578 			error = EFAULT;
579 		else {
580 			/*
581 			 * Note that the size of flock64 is different in
582 			 * the ILP32 and LP64 models, due to the l_pad field.
583 			 * We do not want to assume that the flock64 structure
584 			 * is laid out the same in ILP32 and LP64
585 			 * environments, so we will copy in the ILP32
586 			 * version of flock64 explicitly and copy it to
587 			 * the native flock64 structure.
588 			 */
589 			bf.l_type = (short)bf64_32.l_type;
590 			bf.l_whence = (short)bf64_32.l_whence;
591 			bf.l_start = bf64_32.l_start;
592 			bf.l_len = bf64_32.l_len;
593 			bf.l_sysid = (int)bf64_32.l_sysid;
594 			bf.l_pid = (pid_t)bf64_32.l_pid;
595 
596 			if ((error = flock_check(vp, &bf, offset,
597 			    MAXOFFSET_T)) != 0)
598 				break;
599 
600 			if (vp->v_type == VREG && bf.l_len == 0 &&
601 			    bf.l_start > OFFSET_MAX(fp)) {
602 				error = EFBIG;
603 				break;
604 			}
605 			/*
606 			 * Make sure that there are no conflicting non-blocking
607 			 * mandatory locks in the region being manipulated. If
608 			 * there are such locks then return EACCES.
609 			 */
610 			if ((error = flock_get_start(vp, &bf, offset,
611 			    &start)) != 0)
612 				break;
613 			if (nbl_need_check(vp)) {
614 				u_offset_t	begin;
615 				ssize_t		length;
616 
617 				nbl_start_crit(vp, RW_READER);
618 				in_crit = 1;
619 				vattr.va_mask = AT_SIZE;
620 				if ((error = VOP_GETATTR(vp, &vattr, 0,
621 				    CRED())) != 0)
622 					break;
623 				begin = start > vattr.va_size ?
624 					vattr.va_size : start;
625 				length = vattr.va_size > start ?
626 						vattr.va_size - start :
627 						start - vattr.va_size;
628 				if (nbl_conflict(vp, NBL_WRITE, begin,
629 				    length, 0)) {
630 					error = EACCES;
631 					break;
632 				}
633 			}
634 			error = VOP_SPACE(vp, cmd, &bf, flag, offset,
635 			    fp->f_cred, NULL);
636 		}
637 		break;
638 #endif /*  !_LP64 || _SYSCALL32_IMPL */
639 
640 /* ONC_PLUS EXTRACT START */
641 	case F_SHARE:
642 	case F_SHARE_NBMAND:
643 	case F_UNSHARE:
644 
645 		/*
646 		 * Copy in input fields only.
647 		 */
648 		if (copyin((void *)arg, &fsh, sizeof (fsh))) {
649 			error = EFAULT;
650 			break;
651 		}
652 
653 		/*
654 		 * Local share reservations always have this simple form
655 		 */
656 		shr.s_access = fsh.f_access;
657 		shr.s_deny = fsh.f_deny;
658 		shr.s_sysid = 0;
659 		shr.s_pid = ttoproc(curthread)->p_pid;
660 		shr_own.sl_pid = shr.s_pid;
661 		shr_own.sl_id = fsh.f_id;
662 		shr.s_own_len = sizeof (shr_own);
663 		shr.s_owner = (caddr_t)&shr_own;
664 		error = VOP_SHRLOCK(vp, cmd, &shr, flag, fp->f_cred);
665 /* ONC_PLUS EXTRACT END */
666 		break;
667 
668 	default:
669 		error = EINVAL;
670 		break;
671 	}
672 
673 	if (in_crit)
674 		nbl_end_crit(vp);
675 
676 done:
677 	releasef(fdes);
678 out:
679 	if (error)
680 		return (set_errno(error));
681 	return (retval);
682 }
683 
684 int
685 dup(int fd)
686 {
687 	return (fcntl(fd, F_DUPFD, 0));
688 }
689 
690 /* ONC_PLUS EXTRACT START */
691 int
692 flock_check(vnode_t *vp, flock64_t *flp, offset_t offset, offset_t max)
693 {
694 	struct vattr	vattr;
695 	int	error;
696 	u_offset_t start, end;
697 
698 	/*
699 	 * Determine the starting point of the request
700 	 */
701 	switch (flp->l_whence) {
702 	case 0:		/* SEEK_SET */
703 		start = (u_offset_t)flp->l_start;
704 		if (start > max)
705 			return (EINVAL);
706 		break;
707 	case 1:		/* SEEK_CUR */
708 		if (flp->l_start > (max - offset))
709 			return (EOVERFLOW);
710 		start = (u_offset_t)(flp->l_start + offset);
711 		if (start > max)
712 			return (EINVAL);
713 		break;
714 	case 2:		/* SEEK_END */
715 		vattr.va_mask = AT_SIZE;
716 		if (error = VOP_GETATTR(vp, &vattr, 0, CRED()))
717 			return (error);
718 		if (flp->l_start > (max - (offset_t)vattr.va_size))
719 			return (EOVERFLOW);
720 		start = (u_offset_t)(flp->l_start + (offset_t)vattr.va_size);
721 		if (start > max)
722 			return (EINVAL);
723 		break;
724 	default:
725 		return (EINVAL);
726 	}
727 
728 	/*
729 	 * Determine the range covered by the request.
730 	 */
731 	if (flp->l_len == 0)
732 		end = MAXEND;
733 	else if ((offset_t)flp->l_len > 0) {
734 		if (flp->l_len > (max - start + 1))
735 			return (EOVERFLOW);
736 		end = (u_offset_t)(start + (flp->l_len - 1));
737 		ASSERT(end <= max);
738 	} else {
739 		/*
740 		 * Negative length; why do we even allow this ?
741 		 * Because this allows easy specification of
742 		 * the last n bytes of the file.
743 		 */
744 		end = start;
745 		start += (u_offset_t)flp->l_len;
746 		(start)++;
747 		if (start > max)
748 			return (EINVAL);
749 		ASSERT(end <= max);
750 	}
751 	ASSERT(start <= max);
752 	if (flp->l_type == F_UNLCK && flp->l_len > 0 &&
753 	    end == (offset_t)max) {
754 		flp->l_len = 0;
755 	}
756 	if (start  > end)
757 		return (EINVAL);
758 	return (0);
759 }
760 
761 static int
762 flock_get_start(vnode_t *vp, flock64_t *flp, offset_t offset, u_offset_t *start)
763 {
764 	struct vattr	vattr;
765 	int	error;
766 
767 	/*
768 	 * Determine the starting point of the request. Assume that it is
769 	 * a valid starting point.
770 	 */
771 	switch (flp->l_whence) {
772 	case 0:		/* SEEK_SET */
773 		*start = (u_offset_t)flp->l_start;
774 		break;
775 	case 1:		/* SEEK_CUR */
776 		*start = (u_offset_t)(flp->l_start + offset);
777 		break;
778 	case 2:		/* SEEK_END */
779 		vattr.va_mask = AT_SIZE;
780 		if (error = VOP_GETATTR(vp, &vattr, 0, CRED()))
781 			return (error);
782 		*start = (u_offset_t)(flp->l_start + (offset_t)vattr.va_size);
783 		break;
784 	default:
785 		return (EINVAL);
786 	}
787 
788 	return (0);
789 }
790 
791 /*
792  * Take rctl action when the requested file descriptor is too big.
793  */
794 static void
795 fd_too_big(proc_t *p)
796 {
797 	mutex_enter(&p->p_lock);
798 	(void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE],
799 	    p->p_rctls, p, RCA_SAFE);
800 	mutex_exit(&p->p_lock);
801 }
802 /* ONC_PLUS EXTRACT END */
803