xref: /titanic_41/usr/src/uts/common/syscall/fcntl.c (revision d89fccd8788afe1e920f842edd883fe192a1b8fe)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /* ONC_PLUS EXTRACT START */
23 /*
24  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
25  * Use is subject to license terms.
26  */
27 
28 /*	Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T	*/
29 /*	  All Rights Reserved  	*/
30 
31 /*
32  * Portions of this source code were derived from Berkeley 4.3 BSD
33  * under license from the Regents of the University of California.
34  */
35 
36 #pragma ident	"%Z%%M%	%I%	%E% SMI"
37 /* ONC_PLUS EXTRACT END */
38 
39 #include <sys/param.h>
40 #include <sys/isa_defs.h>
41 #include <sys/types.h>
42 #include <sys/sysmacros.h>
43 #include <sys/systm.h>
44 #include <sys/errno.h>
45 #include <sys/fcntl.h>
46 /* ONC_PLUS EXTRACT START */
47 #include <sys/flock.h>
48 /* ONC_PLUS EXTRACT END */
49 #include <sys/vnode.h>
50 #include <sys/file.h>
51 #include <sys/mode.h>
52 #include <sys/proc.h>
53 #include <sys/filio.h>
54 #include <sys/share.h>
55 #include <sys/debug.h>
56 #include <sys/rctl.h>
57 #include <sys/nbmlock.h>
58 
59 #include <sys/cmn_err.h>
60 
61 /* ONC_PLUS EXTRACT START */
62 static int flock_check(vnode_t *, flock64_t *, offset_t, offset_t);
63 static int flock_get_start(vnode_t *, flock64_t *, offset_t, u_offset_t *);
64 static void fd_too_big(proc_t *);
65 
66 /*
67  * File control.
68  */
69 int
70 fcntl(int fdes, int cmd, intptr_t arg)
71 {
72 	int iarg;
73 	int error = 0;
74 	int retval;
75 	proc_t *p;
76 	file_t *fp;
77 	vnode_t *vp;
78 	u_offset_t offset;
79 	u_offset_t start;
80 	struct vattr vattr;
81 	int in_crit;
82 	int flag;
83 	struct flock sbf;
84 	struct flock64 bf;
85 	struct o_flock obf;
86 	struct flock64_32 bf64_32;
87 	struct fshare fsh;
88 	struct shrlock shr;
89 	struct shr_locowner shr_own;
90 	offset_t maxoffset;
91 	model_t datamodel;
92 
93 #if defined(_ILP32) && !defined(lint) && defined(_SYSCALL32)
94 	ASSERT(sizeof (struct flock) == sizeof (struct flock32));
95 	ASSERT(sizeof (struct flock64) == sizeof (struct flock64_32));
96 #endif
97 #if defined(_LP64) && !defined(lint) && defined(_SYSCALL32)
98 	ASSERT(sizeof (struct flock) == sizeof (struct flock64_64));
99 	ASSERT(sizeof (struct flock64) == sizeof (struct flock64_64));
100 #endif
101 
102 	/*
103 	 * First, for speed, deal with the subset of cases
104 	 * that do not require getf() / releasef().
105 	 */
106 	switch (cmd) {
107 	case F_GETFD:
108 		if ((error = f_getfd_error(fdes, &flag)) == 0)
109 			retval = flag;
110 		goto out;
111 
112 	case F_SETFD:
113 		error = f_setfd_error(fdes, (int)arg);
114 		retval = 0;
115 		goto out;
116 
117 	case F_GETFL:
118 		if ((error = f_getfl(fdes, &flag)) == 0)
119 			retval = (flag & (FMASK | FASYNC)) + FOPEN;
120 		goto out;
121 
122 	case F_GETXFL:
123 		if ((error = f_getfl(fdes, &flag)) == 0)
124 			retval = flag + FOPEN;
125 		goto out;
126 	}
127 
128 	/*
129 	 * Second, for speed, deal with the subset of cases that
130 	 * require getf() / releasef() but do not require copyin.
131 	 */
132 	if ((fp = getf(fdes)) == NULL) {
133 		error = EBADF;
134 		goto out;
135 	}
136 	iarg = (int)arg;
137 
138 	switch (cmd) {
139 /* ONC_PLUS EXTRACT END */
140 
141 	case F_DUPFD:
142 		p = curproc;
143 		if ((uint_t)iarg >= p->p_fno_ctl) {
144 			if (iarg >= 0)
145 				fd_too_big(p);
146 			error = EINVAL;
147 		} else if ((retval = ufalloc_file(iarg, fp)) == -1) {
148 			error = EMFILE;
149 		} else {
150 			mutex_enter(&fp->f_tlock);
151 			fp->f_count++;
152 			mutex_exit(&fp->f_tlock);
153 		}
154 		goto done;
155 
156 	case F_DUP2FD:
157 		p = curproc;
158 		if (fdes == iarg) {
159 			retval = iarg;
160 		} else if ((uint_t)iarg >= p->p_fno_ctl) {
161 			if (iarg >= 0)
162 				fd_too_big(p);
163 			error = EBADF;
164 		} else {
165 			/*
166 			 * We can't hold our getf(fdes) across the call to
167 			 * closeandsetf() because it creates a window for
168 			 * deadlock: if one thread is doing dup2(a, b) while
169 			 * another is doing dup2(b, a), each one will block
170 			 * waiting for the other to call releasef().  The
171 			 * solution is to increment the file reference count
172 			 * (which we have to do anyway), then releasef(fdes),
173 			 * then closeandsetf().  Incrementing f_count ensures
174 			 * that fp won't disappear after we call releasef().
175 			 */
176 			mutex_enter(&fp->f_tlock);
177 			fp->f_count++;
178 			mutex_exit(&fp->f_tlock);
179 			releasef(fdes);
180 			(void) closeandsetf(iarg, fp);
181 			retval = iarg;
182 			goto out;
183 		}
184 		goto done;
185 
186 	case F_SETFL:
187 		vp = fp->f_vnode;
188 		flag = fp->f_flag;
189 		if ((iarg & (FNONBLOCK|FNDELAY)) == (FNONBLOCK|FNDELAY))
190 			iarg &= ~FNDELAY;
191 		if ((error = VOP_SETFL(vp, flag, iarg, fp->f_cred)) == 0) {
192 			iarg &= FMASK;
193 			mutex_enter(&fp->f_tlock);
194 			fp->f_flag &= ~FMASK | (FREAD|FWRITE);
195 			fp->f_flag |= (iarg - FOPEN) & ~(FREAD|FWRITE);
196 			mutex_exit(&fp->f_tlock);
197 		}
198 		retval = 0;
199 		goto done;
200 	}
201 
202 	/*
203 	 * Finally, deal with the expensive cases.
204 	 */
205 	retval = 0;
206 	in_crit = 0;
207 	maxoffset = MAXOFF_T;
208 	datamodel = DATAMODEL_NATIVE;
209 #if defined(_SYSCALL32_IMPL)
210 	if ((datamodel = get_udatamodel()) == DATAMODEL_ILP32)
211 		maxoffset = MAXOFF32_T;
212 #endif
213 
214 	vp = fp->f_vnode;
215 	flag = fp->f_flag;
216 	offset = fp->f_offset;
217 
218 	switch (cmd) {
219 /* ONC_PLUS EXTRACT START */
220 	/*
221 	 * The file system and vnode layers understand and implement
222 	 * locking with flock64 structures. So here once we pass through
223 	 * the test for compatibility as defined by LFS API, (for F_SETLK,
224 	 * F_SETLKW, F_GETLK, F_GETLKW, F_FREESP) we transform
225 	 * the flock structure to a flock64 structure and send it to the
226 	 * lower layers. Similarly in case of GETLK the returned flock64
227 	 * structure is transformed to a flock structure if everything fits
228 	 * in nicely, otherwise we return EOVERFLOW.
229 	 */
230 
231 	case F_GETLK:
232 	case F_O_GETLK:
233 	case F_SETLK:
234 	case F_SETLKW:
235 	case F_SETLK_NBMAND:
236 
237 		/*
238 		 * Copy in input fields only.
239 		 */
240 
241 		if (cmd == F_O_GETLK) {
242 			if (datamodel != DATAMODEL_ILP32) {
243 				error = EINVAL;
244 				break;
245 			}
246 
247 			if (copyin((void *)arg, &obf, sizeof (obf))) {
248 				error = EFAULT;
249 				break;
250 			}
251 			bf.l_type = obf.l_type;
252 			bf.l_whence = obf.l_whence;
253 			bf.l_start = (off64_t)obf.l_start;
254 			bf.l_len = (off64_t)obf.l_len;
255 			bf.l_sysid = (int)obf.l_sysid;
256 			bf.l_pid = obf.l_pid;
257 		} else if (datamodel == DATAMODEL_NATIVE) {
258 			if (copyin((void *)arg, &sbf, sizeof (sbf))) {
259 				error = EFAULT;
260 				break;
261 			}
262 			/*
263 			 * XXX	In an LP64 kernel with an LP64 application
264 			 *	there's no need to do a structure copy here
265 			 *	struct flock == struct flock64. However,
266 			 *	we did it this way to avoid more conditional
267 			 *	compilation.
268 			 */
269 			bf.l_type = sbf.l_type;
270 			bf.l_whence = sbf.l_whence;
271 			bf.l_start = (off64_t)sbf.l_start;
272 			bf.l_len = (off64_t)sbf.l_len;
273 			bf.l_sysid = sbf.l_sysid;
274 			bf.l_pid = sbf.l_pid;
275 		}
276 #if defined(_SYSCALL32_IMPL)
277 		else {
278 			struct flock32 sbf32;
279 			if (copyin((void *)arg, &sbf32, sizeof (sbf32))) {
280 				error = EFAULT;
281 				break;
282 			}
283 			bf.l_type = sbf32.l_type;
284 			bf.l_whence = sbf32.l_whence;
285 			bf.l_start = (off64_t)sbf32.l_start;
286 			bf.l_len = (off64_t)sbf32.l_len;
287 			bf.l_sysid = sbf32.l_sysid;
288 			bf.l_pid = sbf32.l_pid;
289 		}
290 #endif /* _SYSCALL32_IMPL */
291 
292 		/*
293 		 * 64-bit support: check for overflow for 32-bit lock ops
294 		 */
295 		if ((error = flock_check(vp, &bf, offset, maxoffset)) != 0)
296 			break;
297 
298 		/*
299 		 * Not all of the filesystems understand F_O_GETLK, and
300 		 * there's no need for them to know.  Map it to F_GETLK.
301 		 */
302 		if ((error = VOP_FRLOCK(vp, (cmd == F_O_GETLK) ? F_GETLK : cmd,
303 		    &bf, flag, offset, NULL, fp->f_cred)) != 0)
304 			break;
305 
306 		/*
307 		 * If command is GETLK and no lock is found, only
308 		 * the type field is changed.
309 		 */
310 		if ((cmd == F_O_GETLK || cmd == F_GETLK) &&
311 		    bf.l_type == F_UNLCK) {
312 			/* l_type always first entry, always a short */
313 			if (copyout(&bf.l_type, &((struct flock *)arg)->l_type,
314 			    sizeof (bf.l_type)))
315 				error = EFAULT;
316 			break;
317 		}
318 
319 		if (cmd == F_O_GETLK) {
320 			/*
321 			 * Return an SVR3 flock structure to the user.
322 			 */
323 			obf.l_type = (int16_t)bf.l_type;
324 			obf.l_whence = (int16_t)bf.l_whence;
325 			obf.l_start = (int32_t)bf.l_start;
326 			obf.l_len = (int32_t)bf.l_len;
327 			if (bf.l_sysid > SHRT_MAX || bf.l_pid > SHRT_MAX) {
328 				/*
329 				 * One or both values for the above fields
330 				 * is too large to store in an SVR3 flock
331 				 * structure.
332 				 */
333 				error = EOVERFLOW;
334 				break;
335 			}
336 			obf.l_sysid = (int16_t)bf.l_sysid;
337 			obf.l_pid = (int16_t)bf.l_pid;
338 			if (copyout(&obf, (void *)arg, sizeof (obf)))
339 				error = EFAULT;
340 		} else if (cmd == F_GETLK) {
341 			/*
342 			 * Copy out SVR4 flock.
343 			 */
344 			int i;
345 
346 			if (bf.l_start > maxoffset || bf.l_len > maxoffset) {
347 				error = EOVERFLOW;
348 				break;
349 			}
350 
351 			if (datamodel == DATAMODEL_NATIVE) {
352 				for (i = 0; i < 4; i++)
353 					sbf.l_pad[i] = 0;
354 				/*
355 				 * XXX	In an LP64 kernel with an LP64
356 				 *	application there's no need to do a
357 				 *	structure copy here as currently
358 				 *	struct flock == struct flock64.
359 				 *	We did it this way to avoid more
360 				 *	conditional compilation.
361 				 */
362 				sbf.l_type = bf.l_type;
363 				sbf.l_whence = bf.l_whence;
364 				sbf.l_start = (off_t)bf.l_start;
365 				sbf.l_len = (off_t)bf.l_len;
366 				sbf.l_sysid = bf.l_sysid;
367 				sbf.l_pid = bf.l_pid;
368 				if (copyout(&sbf, (void *)arg, sizeof (sbf)))
369 					error = EFAULT;
370 			}
371 #if defined(_SYSCALL32_IMPL)
372 			else {
373 				struct flock32 sbf32;
374 				if (bf.l_start > MAXOFF32_T ||
375 				    bf.l_len > MAXOFF32_T) {
376 					error = EOVERFLOW;
377 					break;
378 				}
379 				for (i = 0; i < 4; i++)
380 					sbf32.l_pad[i] = 0;
381 				sbf32.l_type = (int16_t)bf.l_type;
382 				sbf32.l_whence = (int16_t)bf.l_whence;
383 				sbf32.l_start = (off32_t)bf.l_start;
384 				sbf32.l_len = (off32_t)bf.l_len;
385 				sbf32.l_sysid = (int32_t)bf.l_sysid;
386 				sbf32.l_pid = (pid32_t)bf.l_pid;
387 				if (copyout(&sbf32,
388 				    (void *)arg, sizeof (sbf32)))
389 					error = EFAULT;
390 			}
391 #endif
392 		}
393 		break;
394 /* ONC_PLUS EXTRACT END */
395 
396 	case F_CHKFL:
397 		/*
398 		 * This is for internal use only, to allow the vnode layer
399 		 * to validate a flags setting before applying it.  User
400 		 * programs can't issue it.
401 		 */
402 		error = EINVAL;
403 		break;
404 
405 	case F_ALLOCSP:
406 	case F_FREESP:
407 	case F_ALLOCSP64:
408 	case F_FREESP64:
409 		if ((flag & FWRITE) == 0) {
410 			error = EBADF;
411 			break;
412 		}
413 
414 		if (vp->v_type != VREG) {
415 			error = EINVAL;
416 			break;
417 		}
418 
419 		if (datamodel != DATAMODEL_ILP32 &&
420 		    (cmd == F_ALLOCSP64 || cmd == F_FREESP64)) {
421 			error = EINVAL;
422 			break;
423 		}
424 
425 #if defined(_ILP32) || defined(_SYSCALL32_IMPL)
426 		if (datamodel == DATAMODEL_ILP32 &&
427 		    (cmd == F_ALLOCSP || cmd == F_FREESP)) {
428 			struct flock32 sbf32;
429 			/*
430 			 * For compatibility we overlay an SVR3 flock on an SVR4
431 			 * flock.  This works because the input field offsets
432 			 * in "struct flock" were preserved.
433 			 */
434 			if (copyin((void *)arg, &sbf32, sizeof (sbf32))) {
435 				error = EFAULT;
436 				break;
437 			} else {
438 				bf.l_type = sbf32.l_type;
439 				bf.l_whence = sbf32.l_whence;
440 				bf.l_start = (off64_t)sbf32.l_start;
441 				bf.l_len = (off64_t)sbf32.l_len;
442 				bf.l_sysid = sbf32.l_sysid;
443 				bf.l_pid = sbf32.l_pid;
444 			}
445 		}
446 #endif /* _ILP32 || _SYSCALL32_IMPL */
447 
448 #if defined(_LP64)
449 		if (datamodel == DATAMODEL_LP64 &&
450 		    (cmd == F_ALLOCSP || cmd == F_FREESP)) {
451 			if (copyin((void *)arg, &bf, sizeof (bf))) {
452 				error = EFAULT;
453 				break;
454 			}
455 		}
456 #endif /* defined(_LP64) */
457 
458 #if !defined(_LP64) || defined(_SYSCALL32_IMPL)
459 		if (datamodel == DATAMODEL_ILP32 &&
460 		    (cmd == F_ALLOCSP64 || cmd == F_FREESP64)) {
461 			if (copyin((void *)arg, &bf64_32, sizeof (bf64_32))) {
462 				error = EFAULT;
463 				break;
464 			} else {
465 				/*
466 				 * Note that the size of flock64 is different in
467 				 * the ILP32 and LP64 models, due to the l_pad
468 				 * field. We do not want to assume that the
469 				 * flock64 structure is laid out the same in
470 				 * ILP32 and LP64 environments, so we will
471 				 * copy in the ILP32 version of flock64
472 				 * explicitly and copy it to the native
473 				 * flock64 structure.
474 				 */
475 				bf.l_type = (short)bf64_32.l_type;
476 				bf.l_whence = (short)bf64_32.l_whence;
477 				bf.l_start = bf64_32.l_start;
478 				bf.l_len = bf64_32.l_len;
479 				bf.l_sysid = (int)bf64_32.l_sysid;
480 				bf.l_pid = (pid_t)bf64_32.l_pid;
481 			}
482 		}
483 #endif /* !defined(_LP64) || defined(_SYSCALL32_IMPL) */
484 
485 		if (cmd == F_ALLOCSP || cmd == F_FREESP)
486 			error = flock_check(vp, &bf, offset, maxoffset);
487 		else if (cmd == F_ALLOCSP64 || cmd == F_FREESP64)
488 			error = flock_check(vp, &bf, offset, MAXOFFSET_T);
489 		if (error)
490 			break;
491 
492 		if (vp->v_type == VREG && bf.l_len == 0 &&
493 		    bf.l_start > OFFSET_MAX(fp)) {
494 			error = EFBIG;
495 			break;
496 		}
497 
498 		/*
499 		 * Make sure that there are no conflicting non-blocking
500 		 * mandatory locks in the region being manipulated. If
501 		 * there are such locks then return EACCES.
502 		 */
503 		if ((error = flock_get_start(vp, &bf, offset, &start)) != 0)
504 			break;
505 
506 		if (nbl_need_check(vp)) {
507 			u_offset_t	begin;
508 			ssize_t		length;
509 
510 			nbl_start_crit(vp, RW_READER);
511 			in_crit = 1;
512 			vattr.va_mask = AT_SIZE;
513 			if ((error = VOP_GETATTR(vp, &vattr, 0, CRED())) != 0)
514 				break;
515 			begin = start > vattr.va_size ? vattr.va_size : start;
516 			length = vattr.va_size > start ? vattr.va_size - start :
517 				start - vattr.va_size;
518 			if (nbl_conflict(vp, NBL_WRITE, begin, length, 0)) {
519 				error = EACCES;
520 				break;
521 			}
522 		}
523 
524 		if (cmd == F_ALLOCSP64)
525 			cmd = F_ALLOCSP;
526 		else if (cmd == F_FREESP64)
527 			cmd = F_FREESP;
528 
529 		error = VOP_SPACE(vp, cmd, &bf, flag, offset, fp->f_cred, NULL);
530 
531 		break;
532 
533 #if !defined(_LP64) || defined(_SYSCALL32_IMPL)
534 /* ONC_PLUS EXTRACT START */
535 	case F_GETLK64:
536 	case F_SETLK64:
537 	case F_SETLKW64:
538 	case F_SETLK64_NBMAND:
539 		/*
540 		 * Large Files: Here we set cmd as *LK and send it to
541 		 * lower layers. *LK64 is only for the user land.
542 		 * Most of the comments described above for F_SETLK
543 		 * applies here too.
544 		 * Large File support is only needed for ILP32 apps!
545 		 */
546 		if (datamodel != DATAMODEL_ILP32) {
547 			error = EINVAL;
548 			break;
549 		}
550 
551 		if (cmd == F_GETLK64)
552 			cmd = F_GETLK;
553 		else if (cmd == F_SETLK64)
554 			cmd = F_SETLK;
555 		else if (cmd == F_SETLKW64)
556 			cmd = F_SETLKW;
557 		else if (cmd == F_SETLK64_NBMAND)
558 			cmd = F_SETLK_NBMAND;
559 
560 		/*
561 		 * Note that the size of flock64 is different in the ILP32
562 		 * and LP64 models, due to the sucking l_pad field.
563 		 * We do not want to assume that the flock64 structure is
564 		 * laid out in the same in ILP32 and LP64 environments, so
565 		 * we will copy in the ILP32 version of flock64 explicitly
566 		 * and copy it to the native flock64 structure.
567 		 */
568 
569 		if (copyin((void *)arg, &bf64_32, sizeof (bf64_32))) {
570 			error = EFAULT;
571 			break;
572 		}
573 
574 		bf.l_type = (short)bf64_32.l_type;
575 		bf.l_whence = (short)bf64_32.l_whence;
576 		bf.l_start = bf64_32.l_start;
577 		bf.l_len = bf64_32.l_len;
578 		bf.l_sysid = (int)bf64_32.l_sysid;
579 		bf.l_pid = (pid_t)bf64_32.l_pid;
580 
581 		if ((error = flock_check(vp, &bf, offset, MAXOFFSET_T)) != 0)
582 			break;
583 
584 		if ((error = VOP_FRLOCK(vp, cmd, &bf, flag, offset,
585 		    NULL, fp->f_cred)) != 0)
586 			break;
587 
588 		if ((cmd == F_GETLK) && bf.l_type == F_UNLCK) {
589 			if (copyout(&bf.l_type, &((struct flock *)arg)->l_type,
590 			    sizeof (bf.l_type)))
591 				error = EFAULT;
592 			break;
593 		}
594 
595 		if (cmd == F_GETLK) {
596 			int i;
597 
598 			/*
599 			 * We do not want to assume that the flock64 structure
600 			 * is laid out in the same in ILP32 and LP64
601 			 * environments, so we will copy out the ILP32 version
602 			 * of flock64 explicitly after copying the native
603 			 * flock64 structure to it.
604 			 */
605 			for (i = 0; i < 4; i++)
606 				bf64_32.l_pad[i] = 0;
607 			bf64_32.l_type = (int16_t)bf.l_type;
608 			bf64_32.l_whence = (int16_t)bf.l_whence;
609 			bf64_32.l_start = bf.l_start;
610 			bf64_32.l_len = bf.l_len;
611 			bf64_32.l_sysid = (int32_t)bf.l_sysid;
612 			bf64_32.l_pid = (pid32_t)bf.l_pid;
613 			if (copyout(&bf64_32, (void *)arg, sizeof (bf64_32)))
614 				error = EFAULT;
615 		}
616 		break;
617 /* ONC_PLUS EXTRACT END */
618 #endif /* !defined(_LP64) || defined(_SYSCALL32_IMPL) */
619 
620 /* ONC_PLUS EXTRACT START */
621 	case F_SHARE:
622 	case F_SHARE_NBMAND:
623 	case F_UNSHARE:
624 
625 		/*
626 		 * Copy in input fields only.
627 		 */
628 		if (copyin((void *)arg, &fsh, sizeof (fsh))) {
629 			error = EFAULT;
630 			break;
631 		}
632 
633 		/*
634 		 * Local share reservations always have this simple form
635 		 */
636 		shr.s_access = fsh.f_access;
637 		shr.s_deny = fsh.f_deny;
638 		shr.s_sysid = 0;
639 		shr.s_pid = ttoproc(curthread)->p_pid;
640 		shr_own.sl_pid = shr.s_pid;
641 		shr_own.sl_id = fsh.f_id;
642 		shr.s_own_len = sizeof (shr_own);
643 		shr.s_owner = (caddr_t)&shr_own;
644 		error = VOP_SHRLOCK(vp, cmd, &shr, flag, fp->f_cred);
645 /* ONC_PLUS EXTRACT END */
646 		break;
647 
648 	default:
649 		error = EINVAL;
650 		break;
651 	}
652 
653 	if (in_crit)
654 		nbl_end_crit(vp);
655 
656 done:
657 	releasef(fdes);
658 out:
659 	if (error)
660 		return (set_errno(error));
661 	return (retval);
662 }
663 
664 int
665 dup(int fd)
666 {
667 	return (fcntl(fd, F_DUPFD, 0));
668 }
669 
670 /* ONC_PLUS EXTRACT START */
671 int
672 flock_check(vnode_t *vp, flock64_t *flp, offset_t offset, offset_t max)
673 {
674 	struct vattr	vattr;
675 	int	error;
676 	u_offset_t start, end;
677 
678 	/*
679 	 * Determine the starting point of the request
680 	 */
681 	switch (flp->l_whence) {
682 	case 0:		/* SEEK_SET */
683 		start = (u_offset_t)flp->l_start;
684 		if (start > max)
685 			return (EINVAL);
686 		break;
687 	case 1:		/* SEEK_CUR */
688 		if (flp->l_start > (max - offset))
689 			return (EOVERFLOW);
690 		start = (u_offset_t)(flp->l_start + offset);
691 		if (start > max)
692 			return (EINVAL);
693 		break;
694 	case 2:		/* SEEK_END */
695 		vattr.va_mask = AT_SIZE;
696 		if (error = VOP_GETATTR(vp, &vattr, 0, CRED()))
697 			return (error);
698 		if (flp->l_start > (max - (offset_t)vattr.va_size))
699 			return (EOVERFLOW);
700 		start = (u_offset_t)(flp->l_start + (offset_t)vattr.va_size);
701 		if (start > max)
702 			return (EINVAL);
703 		break;
704 	default:
705 		return (EINVAL);
706 	}
707 
708 	/*
709 	 * Determine the range covered by the request.
710 	 */
711 	if (flp->l_len == 0)
712 		end = MAXEND;
713 	else if ((offset_t)flp->l_len > 0) {
714 		if (flp->l_len > (max - start + 1))
715 			return (EOVERFLOW);
716 		end = (u_offset_t)(start + (flp->l_len - 1));
717 		ASSERT(end <= max);
718 	} else {
719 		/*
720 		 * Negative length; why do we even allow this ?
721 		 * Because this allows easy specification of
722 		 * the last n bytes of the file.
723 		 */
724 		end = start;
725 		start += (u_offset_t)flp->l_len;
726 		(start)++;
727 		if (start > max)
728 			return (EINVAL);
729 		ASSERT(end <= max);
730 	}
731 	ASSERT(start <= max);
732 	if (flp->l_type == F_UNLCK && flp->l_len > 0 &&
733 	    end == (offset_t)max) {
734 		flp->l_len = 0;
735 	}
736 	if (start  > end)
737 		return (EINVAL);
738 	return (0);
739 }
740 
741 static int
742 flock_get_start(vnode_t *vp, flock64_t *flp, offset_t offset, u_offset_t *start)
743 {
744 	struct vattr	vattr;
745 	int	error;
746 
747 	/*
748 	 * Determine the starting point of the request. Assume that it is
749 	 * a valid starting point.
750 	 */
751 	switch (flp->l_whence) {
752 	case 0:		/* SEEK_SET */
753 		*start = (u_offset_t)flp->l_start;
754 		break;
755 	case 1:		/* SEEK_CUR */
756 		*start = (u_offset_t)(flp->l_start + offset);
757 		break;
758 	case 2:		/* SEEK_END */
759 		vattr.va_mask = AT_SIZE;
760 		if (error = VOP_GETATTR(vp, &vattr, 0, CRED()))
761 			return (error);
762 		*start = (u_offset_t)(flp->l_start + (offset_t)vattr.va_size);
763 		break;
764 	default:
765 		return (EINVAL);
766 	}
767 
768 	return (0);
769 }
770 
771 /*
772  * Take rctl action when the requested file descriptor is too big.
773  */
774 static void
775 fd_too_big(proc_t *p)
776 {
777 	mutex_enter(&p->p_lock);
778 	(void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE],
779 	    p->p_rctls, p, RCA_SAFE);
780 	mutex_exit(&p->p_lock);
781 }
782 /* ONC_PLUS EXTRACT END */
783