xref: /titanic_50/usr/src/uts/common/syscall/fcntl.c (revision cb511613a15cb3949eadffd67b37d3c665b4ef22)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /* ONC_PLUS EXTRACT START */
23 /*
24  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
25  * Use is subject to license terms.
26  */
27 
28 /*	Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T	*/
29 /*	  All Rights Reserved  	*/
30 
31 /*
32  * Portions of this source code were derived from Berkeley 4.3 BSD
33  * under license from the Regents of the University of California.
34  */
35 
36 #pragma ident	"%Z%%M%	%I%	%E% SMI"
37 /* ONC_PLUS EXTRACT END */
38 
39 #include <sys/param.h>
40 #include <sys/isa_defs.h>
41 #include <sys/types.h>
42 #include <sys/sysmacros.h>
43 #include <sys/systm.h>
44 #include <sys/errno.h>
45 #include <sys/fcntl.h>
46 /* ONC_PLUS EXTRACT START */
47 #include <sys/flock.h>
48 /* ONC_PLUS EXTRACT END */
49 #include <sys/vnode.h>
50 #include <sys/file.h>
51 #include <sys/mode.h>
52 #include <sys/proc.h>
53 #include <sys/filio.h>
54 #include <sys/share.h>
55 #include <sys/debug.h>
56 #include <sys/rctl.h>
57 #include <sys/nbmlock.h>
58 
59 #include <sys/cmn_err.h>
60 
61 /* ONC_PLUS EXTRACT START */
62 static int flock_check(vnode_t *, flock64_t *, offset_t, offset_t);
63 static int flock_get_start(vnode_t *, flock64_t *, offset_t, u_offset_t *);
64 static void fd_too_big(proc_t *);
65 
66 /*
67  * File control.
68  */
69 int
70 fcntl(int fdes, int cmd, intptr_t arg)
71 {
72 	int iarg;
73 	int error = 0;
74 	int retval;
75 	proc_t *p;
76 	file_t *fp;
77 	vnode_t *vp;
78 	u_offset_t offset;
79 	u_offset_t start;
80 	struct vattr vattr;
81 	int in_crit;
82 	int flag;
83 	struct flock sbf;
84 	struct flock64 bf;
85 	struct o_flock obf;
86 	struct flock64_32 bf64_32;
87 	struct fshare fsh;
88 	struct shrlock shr;
89 	struct shr_locowner shr_own;
90 	offset_t maxoffset;
91 	model_t datamodel;
92 	int fdres;
93 
94 #if defined(_ILP32) && !defined(lint) && defined(_SYSCALL32)
95 	ASSERT(sizeof (struct flock) == sizeof (struct flock32));
96 	ASSERT(sizeof (struct flock64) == sizeof (struct flock64_32));
97 #endif
98 #if defined(_LP64) && !defined(lint) && defined(_SYSCALL32)
99 	ASSERT(sizeof (struct flock) == sizeof (struct flock64_64));
100 	ASSERT(sizeof (struct flock64) == sizeof (struct flock64_64));
101 #endif
102 
103 	/*
104 	 * First, for speed, deal with the subset of cases
105 	 * that do not require getf() / releasef().
106 	 */
107 	switch (cmd) {
108 	case F_GETFD:
109 		if ((error = f_getfd_error(fdes, &flag)) == 0)
110 			retval = flag;
111 		goto out;
112 
113 	case F_SETFD:
114 		error = f_setfd_error(fdes, (int)arg);
115 		retval = 0;
116 		goto out;
117 
118 	case F_GETFL:
119 		if ((error = f_getfl(fdes, &flag)) == 0)
120 			retval = (flag & (FMASK | FASYNC)) + FOPEN;
121 		goto out;
122 
123 	case F_GETXFL:
124 		if ((error = f_getfl(fdes, &flag)) == 0)
125 			retval = flag + FOPEN;
126 		goto out;
127 
128 	case F_BADFD:
129 		if ((error = f_badfd(fdes, &fdres, (int)arg)) == 0)
130 			retval = fdres;
131 		goto out;
132 	}
133 
134 	/*
135 	 * Second, for speed, deal with the subset of cases that
136 	 * require getf() / releasef() but do not require copyin.
137 	 */
138 	if ((fp = getf(fdes)) == NULL) {
139 		error = EBADF;
140 		goto out;
141 	}
142 	iarg = (int)arg;
143 
144 	switch (cmd) {
145 /* ONC_PLUS EXTRACT END */
146 
147 	case F_DUPFD:
148 		p = curproc;
149 		if ((uint_t)iarg >= p->p_fno_ctl) {
150 			if (iarg >= 0)
151 				fd_too_big(p);
152 			error = EINVAL;
153 		} else if ((retval = ufalloc_file(iarg, fp)) == -1) {
154 			error = EMFILE;
155 		} else {
156 			mutex_enter(&fp->f_tlock);
157 			fp->f_count++;
158 			mutex_exit(&fp->f_tlock);
159 		}
160 		goto done;
161 
162 	case F_DUP2FD:
163 		p = curproc;
164 		if (fdes == iarg) {
165 			retval = iarg;
166 		} else if ((uint_t)iarg >= p->p_fno_ctl) {
167 			if (iarg >= 0)
168 				fd_too_big(p);
169 			error = EBADF;
170 		} else {
171 			/*
172 			 * We can't hold our getf(fdes) across the call to
173 			 * closeandsetf() because it creates a window for
174 			 * deadlock: if one thread is doing dup2(a, b) while
175 			 * another is doing dup2(b, a), each one will block
176 			 * waiting for the other to call releasef().  The
177 			 * solution is to increment the file reference count
178 			 * (which we have to do anyway), then releasef(fdes),
179 			 * then closeandsetf().  Incrementing f_count ensures
180 			 * that fp won't disappear after we call releasef().
181 			 * When closeandsetf() fails, we try avoid calling
182 			 * closef() because of all the side effects.
183 			 */
184 			mutex_enter(&fp->f_tlock);
185 			fp->f_count++;
186 			mutex_exit(&fp->f_tlock);
187 			releasef(fdes);
188 			if ((error = closeandsetf(iarg, fp)) == 0) {
189 				retval = iarg;
190 			} else {
191 				mutex_enter(&fp->f_tlock);
192 				if (fp->f_count > 1) {
193 					fp->f_count--;
194 					mutex_exit(&fp->f_tlock);
195 				} else {
196 					mutex_exit(&fp->f_tlock);
197 					(void) closef(fp);
198 				}
199 			}
200 			goto out;
201 		}
202 		goto done;
203 
204 	case F_SETFL:
205 		vp = fp->f_vnode;
206 		flag = fp->f_flag;
207 		if ((iarg & (FNONBLOCK|FNDELAY)) == (FNONBLOCK|FNDELAY))
208 			iarg &= ~FNDELAY;
209 		if ((error = VOP_SETFL(vp, flag, iarg, fp->f_cred, NULL)) ==
210 		    0) {
211 			iarg &= FMASK;
212 			mutex_enter(&fp->f_tlock);
213 			fp->f_flag &= ~FMASK | (FREAD|FWRITE);
214 			fp->f_flag |= (iarg - FOPEN) & ~(FREAD|FWRITE);
215 			mutex_exit(&fp->f_tlock);
216 		}
217 		retval = 0;
218 		goto done;
219 	}
220 
221 	/*
222 	 * Finally, deal with the expensive cases.
223 	 */
224 	retval = 0;
225 	in_crit = 0;
226 	maxoffset = MAXOFF_T;
227 	datamodel = DATAMODEL_NATIVE;
228 #if defined(_SYSCALL32_IMPL)
229 	if ((datamodel = get_udatamodel()) == DATAMODEL_ILP32)
230 		maxoffset = MAXOFF32_T;
231 #endif
232 
233 	vp = fp->f_vnode;
234 	flag = fp->f_flag;
235 	offset = fp->f_offset;
236 
237 	switch (cmd) {
238 /* ONC_PLUS EXTRACT START */
239 	/*
240 	 * The file system and vnode layers understand and implement
241 	 * locking with flock64 structures. So here once we pass through
242 	 * the test for compatibility as defined by LFS API, (for F_SETLK,
243 	 * F_SETLKW, F_GETLK, F_GETLKW, F_FREESP) we transform
244 	 * the flock structure to a flock64 structure and send it to the
245 	 * lower layers. Similarly in case of GETLK the returned flock64
246 	 * structure is transformed to a flock structure if everything fits
247 	 * in nicely, otherwise we return EOVERFLOW.
248 	 */
249 
250 	case F_GETLK:
251 	case F_O_GETLK:
252 	case F_SETLK:
253 	case F_SETLKW:
254 	case F_SETLK_NBMAND:
255 
256 		/*
257 		 * Copy in input fields only.
258 		 */
259 
260 		if (cmd == F_O_GETLK) {
261 			if (datamodel != DATAMODEL_ILP32) {
262 				error = EINVAL;
263 				break;
264 			}
265 
266 			if (copyin((void *)arg, &obf, sizeof (obf))) {
267 				error = EFAULT;
268 				break;
269 			}
270 			bf.l_type = obf.l_type;
271 			bf.l_whence = obf.l_whence;
272 			bf.l_start = (off64_t)obf.l_start;
273 			bf.l_len = (off64_t)obf.l_len;
274 			bf.l_sysid = (int)obf.l_sysid;
275 			bf.l_pid = obf.l_pid;
276 		} else if (datamodel == DATAMODEL_NATIVE) {
277 			if (copyin((void *)arg, &sbf, sizeof (sbf))) {
278 				error = EFAULT;
279 				break;
280 			}
281 			/*
282 			 * XXX	In an LP64 kernel with an LP64 application
283 			 *	there's no need to do a structure copy here
284 			 *	struct flock == struct flock64. However,
285 			 *	we did it this way to avoid more conditional
286 			 *	compilation.
287 			 */
288 			bf.l_type = sbf.l_type;
289 			bf.l_whence = sbf.l_whence;
290 			bf.l_start = (off64_t)sbf.l_start;
291 			bf.l_len = (off64_t)sbf.l_len;
292 			bf.l_sysid = sbf.l_sysid;
293 			bf.l_pid = sbf.l_pid;
294 		}
295 #if defined(_SYSCALL32_IMPL)
296 		else {
297 			struct flock32 sbf32;
298 			if (copyin((void *)arg, &sbf32, sizeof (sbf32))) {
299 				error = EFAULT;
300 				break;
301 			}
302 			bf.l_type = sbf32.l_type;
303 			bf.l_whence = sbf32.l_whence;
304 			bf.l_start = (off64_t)sbf32.l_start;
305 			bf.l_len = (off64_t)sbf32.l_len;
306 			bf.l_sysid = sbf32.l_sysid;
307 			bf.l_pid = sbf32.l_pid;
308 		}
309 #endif /* _SYSCALL32_IMPL */
310 
311 		/*
312 		 * 64-bit support: check for overflow for 32-bit lock ops
313 		 */
314 		if ((error = flock_check(vp, &bf, offset, maxoffset)) != 0)
315 			break;
316 
317 		/*
318 		 * Not all of the filesystems understand F_O_GETLK, and
319 		 * there's no need for them to know.  Map it to F_GETLK.
320 		 */
321 		if ((error = VOP_FRLOCK(vp, (cmd == F_O_GETLK) ? F_GETLK : cmd,
322 		    &bf, flag, offset, NULL, fp->f_cred, NULL)) != 0)
323 			break;
324 
325 		/*
326 		 * If command is GETLK and no lock is found, only
327 		 * the type field is changed.
328 		 */
329 		if ((cmd == F_O_GETLK || cmd == F_GETLK) &&
330 		    bf.l_type == F_UNLCK) {
331 			/* l_type always first entry, always a short */
332 			if (copyout(&bf.l_type, &((struct flock *)arg)->l_type,
333 			    sizeof (bf.l_type)))
334 				error = EFAULT;
335 			break;
336 		}
337 
338 		if (cmd == F_O_GETLK) {
339 			/*
340 			 * Return an SVR3 flock structure to the user.
341 			 */
342 			obf.l_type = (int16_t)bf.l_type;
343 			obf.l_whence = (int16_t)bf.l_whence;
344 			obf.l_start = (int32_t)bf.l_start;
345 			obf.l_len = (int32_t)bf.l_len;
346 			if (bf.l_sysid > SHRT_MAX || bf.l_pid > SHRT_MAX) {
347 				/*
348 				 * One or both values for the above fields
349 				 * is too large to store in an SVR3 flock
350 				 * structure.
351 				 */
352 				error = EOVERFLOW;
353 				break;
354 			}
355 			obf.l_sysid = (int16_t)bf.l_sysid;
356 			obf.l_pid = (int16_t)bf.l_pid;
357 			if (copyout(&obf, (void *)arg, sizeof (obf)))
358 				error = EFAULT;
359 		} else if (cmd == F_GETLK) {
360 			/*
361 			 * Copy out SVR4 flock.
362 			 */
363 			int i;
364 
365 			if (bf.l_start > maxoffset || bf.l_len > maxoffset) {
366 				error = EOVERFLOW;
367 				break;
368 			}
369 
370 			if (datamodel == DATAMODEL_NATIVE) {
371 				for (i = 0; i < 4; i++)
372 					sbf.l_pad[i] = 0;
373 				/*
374 				 * XXX	In an LP64 kernel with an LP64
375 				 *	application there's no need to do a
376 				 *	structure copy here as currently
377 				 *	struct flock == struct flock64.
378 				 *	We did it this way to avoid more
379 				 *	conditional compilation.
380 				 */
381 				sbf.l_type = bf.l_type;
382 				sbf.l_whence = bf.l_whence;
383 				sbf.l_start = (off_t)bf.l_start;
384 				sbf.l_len = (off_t)bf.l_len;
385 				sbf.l_sysid = bf.l_sysid;
386 				sbf.l_pid = bf.l_pid;
387 				if (copyout(&sbf, (void *)arg, sizeof (sbf)))
388 					error = EFAULT;
389 			}
390 #if defined(_SYSCALL32_IMPL)
391 			else {
392 				struct flock32 sbf32;
393 				if (bf.l_start > MAXOFF32_T ||
394 				    bf.l_len > MAXOFF32_T) {
395 					error = EOVERFLOW;
396 					break;
397 				}
398 				for (i = 0; i < 4; i++)
399 					sbf32.l_pad[i] = 0;
400 				sbf32.l_type = (int16_t)bf.l_type;
401 				sbf32.l_whence = (int16_t)bf.l_whence;
402 				sbf32.l_start = (off32_t)bf.l_start;
403 				sbf32.l_len = (off32_t)bf.l_len;
404 				sbf32.l_sysid = (int32_t)bf.l_sysid;
405 				sbf32.l_pid = (pid32_t)bf.l_pid;
406 				if (copyout(&sbf32,
407 				    (void *)arg, sizeof (sbf32)))
408 					error = EFAULT;
409 			}
410 #endif
411 		}
412 		break;
413 /* ONC_PLUS EXTRACT END */
414 
415 	case F_CHKFL:
416 		/*
417 		 * This is for internal use only, to allow the vnode layer
418 		 * to validate a flags setting before applying it.  User
419 		 * programs can't issue it.
420 		 */
421 		error = EINVAL;
422 		break;
423 
424 	case F_ALLOCSP:
425 	case F_FREESP:
426 	case F_ALLOCSP64:
427 	case F_FREESP64:
428 		/*
429 		 * Test for not-a-regular-file (and returning EINVAL)
430 		 * before testing for open-for-writing (and returning EBADF).
431 		 * This is relied upon by posix_fallocate() in libc.
432 		 */
433 		if (vp->v_type != VREG) {
434 			error = EINVAL;
435 			break;
436 		}
437 
438 		if ((flag & FWRITE) == 0) {
439 			error = EBADF;
440 			break;
441 		}
442 
443 		if (datamodel != DATAMODEL_ILP32 &&
444 		    (cmd == F_ALLOCSP64 || cmd == F_FREESP64)) {
445 			error = EINVAL;
446 			break;
447 		}
448 
449 #if defined(_ILP32) || defined(_SYSCALL32_IMPL)
450 		if (datamodel == DATAMODEL_ILP32 &&
451 		    (cmd == F_ALLOCSP || cmd == F_FREESP)) {
452 			struct flock32 sbf32;
453 			/*
454 			 * For compatibility we overlay an SVR3 flock on an SVR4
455 			 * flock.  This works because the input field offsets
456 			 * in "struct flock" were preserved.
457 			 */
458 			if (copyin((void *)arg, &sbf32, sizeof (sbf32))) {
459 				error = EFAULT;
460 				break;
461 			} else {
462 				bf.l_type = sbf32.l_type;
463 				bf.l_whence = sbf32.l_whence;
464 				bf.l_start = (off64_t)sbf32.l_start;
465 				bf.l_len = (off64_t)sbf32.l_len;
466 				bf.l_sysid = sbf32.l_sysid;
467 				bf.l_pid = sbf32.l_pid;
468 			}
469 		}
470 #endif /* _ILP32 || _SYSCALL32_IMPL */
471 
472 #if defined(_LP64)
473 		if (datamodel == DATAMODEL_LP64 &&
474 		    (cmd == F_ALLOCSP || cmd == F_FREESP)) {
475 			if (copyin((void *)arg, &bf, sizeof (bf))) {
476 				error = EFAULT;
477 				break;
478 			}
479 		}
480 #endif /* defined(_LP64) */
481 
482 #if !defined(_LP64) || defined(_SYSCALL32_IMPL)
483 		if (datamodel == DATAMODEL_ILP32 &&
484 		    (cmd == F_ALLOCSP64 || cmd == F_FREESP64)) {
485 			if (copyin((void *)arg, &bf64_32, sizeof (bf64_32))) {
486 				error = EFAULT;
487 				break;
488 			} else {
489 				/*
490 				 * Note that the size of flock64 is different in
491 				 * the ILP32 and LP64 models, due to the l_pad
492 				 * field. We do not want to assume that the
493 				 * flock64 structure is laid out the same in
494 				 * ILP32 and LP64 environments, so we will
495 				 * copy in the ILP32 version of flock64
496 				 * explicitly and copy it to the native
497 				 * flock64 structure.
498 				 */
499 				bf.l_type = (short)bf64_32.l_type;
500 				bf.l_whence = (short)bf64_32.l_whence;
501 				bf.l_start = bf64_32.l_start;
502 				bf.l_len = bf64_32.l_len;
503 				bf.l_sysid = (int)bf64_32.l_sysid;
504 				bf.l_pid = (pid_t)bf64_32.l_pid;
505 			}
506 		}
507 #endif /* !defined(_LP64) || defined(_SYSCALL32_IMPL) */
508 
509 		if (cmd == F_ALLOCSP || cmd == F_FREESP)
510 			error = flock_check(vp, &bf, offset, maxoffset);
511 		else if (cmd == F_ALLOCSP64 || cmd == F_FREESP64)
512 			error = flock_check(vp, &bf, offset, MAXOFFSET_T);
513 		if (error)
514 			break;
515 
516 		if (vp->v_type == VREG && bf.l_len == 0 &&
517 		    bf.l_start > OFFSET_MAX(fp)) {
518 			error = EFBIG;
519 			break;
520 		}
521 
522 		/*
523 		 * Make sure that there are no conflicting non-blocking
524 		 * mandatory locks in the region being manipulated. If
525 		 * there are such locks then return EACCES.
526 		 */
527 		if ((error = flock_get_start(vp, &bf, offset, &start)) != 0)
528 			break;
529 
530 		if (nbl_need_check(vp)) {
531 			u_offset_t	begin;
532 			ssize_t		length;
533 
534 			nbl_start_crit(vp, RW_READER);
535 			in_crit = 1;
536 			vattr.va_mask = AT_SIZE;
537 			if ((error = VOP_GETATTR(vp, &vattr, 0, CRED(), NULL))
538 			    != 0)
539 				break;
540 			begin = start > vattr.va_size ? vattr.va_size : start;
541 			length = vattr.va_size > start ? vattr.va_size - start :
542 			    start - vattr.va_size;
543 			if (nbl_conflict(vp, NBL_WRITE, begin, length, 0,
544 			    NULL)) {
545 				error = EACCES;
546 				break;
547 			}
548 		}
549 
550 		if (cmd == F_ALLOCSP64)
551 			cmd = F_ALLOCSP;
552 		else if (cmd == F_FREESP64)
553 			cmd = F_FREESP;
554 
555 		error = VOP_SPACE(vp, cmd, &bf, flag, offset, fp->f_cred, NULL);
556 
557 		break;
558 
559 #if !defined(_LP64) || defined(_SYSCALL32_IMPL)
560 /* ONC_PLUS EXTRACT START */
561 	case F_GETLK64:
562 	case F_SETLK64:
563 	case F_SETLKW64:
564 	case F_SETLK64_NBMAND:
565 		/*
566 		 * Large Files: Here we set cmd as *LK and send it to
567 		 * lower layers. *LK64 is only for the user land.
568 		 * Most of the comments described above for F_SETLK
569 		 * applies here too.
570 		 * Large File support is only needed for ILP32 apps!
571 		 */
572 		if (datamodel != DATAMODEL_ILP32) {
573 			error = EINVAL;
574 			break;
575 		}
576 
577 		if (cmd == F_GETLK64)
578 			cmd = F_GETLK;
579 		else if (cmd == F_SETLK64)
580 			cmd = F_SETLK;
581 		else if (cmd == F_SETLKW64)
582 			cmd = F_SETLKW;
583 		else if (cmd == F_SETLK64_NBMAND)
584 			cmd = F_SETLK_NBMAND;
585 
586 		/*
587 		 * Note that the size of flock64 is different in the ILP32
588 		 * and LP64 models, due to the sucking l_pad field.
589 		 * We do not want to assume that the flock64 structure is
590 		 * laid out in the same in ILP32 and LP64 environments, so
591 		 * we will copy in the ILP32 version of flock64 explicitly
592 		 * and copy it to the native flock64 structure.
593 		 */
594 
595 		if (copyin((void *)arg, &bf64_32, sizeof (bf64_32))) {
596 			error = EFAULT;
597 			break;
598 		}
599 
600 		bf.l_type = (short)bf64_32.l_type;
601 		bf.l_whence = (short)bf64_32.l_whence;
602 		bf.l_start = bf64_32.l_start;
603 		bf.l_len = bf64_32.l_len;
604 		bf.l_sysid = (int)bf64_32.l_sysid;
605 		bf.l_pid = (pid_t)bf64_32.l_pid;
606 
607 		if ((error = flock_check(vp, &bf, offset, MAXOFFSET_T)) != 0)
608 			break;
609 
610 		if ((error = VOP_FRLOCK(vp, cmd, &bf, flag, offset,
611 		    NULL, fp->f_cred, NULL)) != 0)
612 			break;
613 
614 		if ((cmd == F_GETLK) && bf.l_type == F_UNLCK) {
615 			if (copyout(&bf.l_type, &((struct flock *)arg)->l_type,
616 			    sizeof (bf.l_type)))
617 				error = EFAULT;
618 			break;
619 		}
620 
621 		if (cmd == F_GETLK) {
622 			int i;
623 
624 			/*
625 			 * We do not want to assume that the flock64 structure
626 			 * is laid out in the same in ILP32 and LP64
627 			 * environments, so we will copy out the ILP32 version
628 			 * of flock64 explicitly after copying the native
629 			 * flock64 structure to it.
630 			 */
631 			for (i = 0; i < 4; i++)
632 				bf64_32.l_pad[i] = 0;
633 			bf64_32.l_type = (int16_t)bf.l_type;
634 			bf64_32.l_whence = (int16_t)bf.l_whence;
635 			bf64_32.l_start = bf.l_start;
636 			bf64_32.l_len = bf.l_len;
637 			bf64_32.l_sysid = (int32_t)bf.l_sysid;
638 			bf64_32.l_pid = (pid32_t)bf.l_pid;
639 			if (copyout(&bf64_32, (void *)arg, sizeof (bf64_32)))
640 				error = EFAULT;
641 		}
642 		break;
643 /* ONC_PLUS EXTRACT END */
644 #endif /* !defined(_LP64) || defined(_SYSCALL32_IMPL) */
645 
646 /* ONC_PLUS EXTRACT START */
647 	case F_SHARE:
648 	case F_SHARE_NBMAND:
649 	case F_UNSHARE:
650 
651 		/*
652 		 * Copy in input fields only.
653 		 */
654 		if (copyin((void *)arg, &fsh, sizeof (fsh))) {
655 			error = EFAULT;
656 			break;
657 		}
658 
659 		/*
660 		 * Local share reservations always have this simple form
661 		 */
662 		shr.s_access = fsh.f_access;
663 		shr.s_deny = fsh.f_deny;
664 		shr.s_sysid = 0;
665 		shr.s_pid = ttoproc(curthread)->p_pid;
666 		shr_own.sl_pid = shr.s_pid;
667 		shr_own.sl_id = fsh.f_id;
668 		shr.s_own_len = sizeof (shr_own);
669 		shr.s_owner = (caddr_t)&shr_own;
670 		error = VOP_SHRLOCK(vp, cmd, &shr, flag, fp->f_cred, NULL);
671 /* ONC_PLUS EXTRACT END */
672 		break;
673 
674 	default:
675 		error = EINVAL;
676 		break;
677 	}
678 
679 	if (in_crit)
680 		nbl_end_crit(vp);
681 
682 done:
683 	releasef(fdes);
684 out:
685 	if (error)
686 		return (set_errno(error));
687 	return (retval);
688 }
689 
690 int
691 dup(int fd)
692 {
693 	return (fcntl(fd, F_DUPFD, 0));
694 }
695 
696 /* ONC_PLUS EXTRACT START */
697 int
698 flock_check(vnode_t *vp, flock64_t *flp, offset_t offset, offset_t max)
699 {
700 	struct vattr	vattr;
701 	int	error;
702 	u_offset_t start, end;
703 
704 	/*
705 	 * Determine the starting point of the request
706 	 */
707 	switch (flp->l_whence) {
708 	case 0:		/* SEEK_SET */
709 		start = (u_offset_t)flp->l_start;
710 		if (start > max)
711 			return (EINVAL);
712 		break;
713 	case 1:		/* SEEK_CUR */
714 		if (flp->l_start > (max - offset))
715 			return (EOVERFLOW);
716 		start = (u_offset_t)(flp->l_start + offset);
717 		if (start > max)
718 			return (EINVAL);
719 		break;
720 	case 2:		/* SEEK_END */
721 		vattr.va_mask = AT_SIZE;
722 		if (error = VOP_GETATTR(vp, &vattr, 0, CRED(), NULL))
723 			return (error);
724 		if (flp->l_start > (max - (offset_t)vattr.va_size))
725 			return (EOVERFLOW);
726 		start = (u_offset_t)(flp->l_start + (offset_t)vattr.va_size);
727 		if (start > max)
728 			return (EINVAL);
729 		break;
730 	default:
731 		return (EINVAL);
732 	}
733 
734 	/*
735 	 * Determine the range covered by the request.
736 	 */
737 	if (flp->l_len == 0)
738 		end = MAXEND;
739 	else if ((offset_t)flp->l_len > 0) {
740 		if (flp->l_len > (max - start + 1))
741 			return (EOVERFLOW);
742 		end = (u_offset_t)(start + (flp->l_len - 1));
743 		ASSERT(end <= max);
744 	} else {
745 		/*
746 		 * Negative length; why do we even allow this ?
747 		 * Because this allows easy specification of
748 		 * the last n bytes of the file.
749 		 */
750 		end = start;
751 		start += (u_offset_t)flp->l_len;
752 		(start)++;
753 		if (start > max)
754 			return (EINVAL);
755 		ASSERT(end <= max);
756 	}
757 	ASSERT(start <= max);
758 	if (flp->l_type == F_UNLCK && flp->l_len > 0 &&
759 	    end == (offset_t)max) {
760 		flp->l_len = 0;
761 	}
762 	if (start  > end)
763 		return (EINVAL);
764 	return (0);
765 }
766 
767 static int
768 flock_get_start(vnode_t *vp, flock64_t *flp, offset_t offset, u_offset_t *start)
769 {
770 	struct vattr	vattr;
771 	int	error;
772 
773 	/*
774 	 * Determine the starting point of the request. Assume that it is
775 	 * a valid starting point.
776 	 */
777 	switch (flp->l_whence) {
778 	case 0:		/* SEEK_SET */
779 		*start = (u_offset_t)flp->l_start;
780 		break;
781 	case 1:		/* SEEK_CUR */
782 		*start = (u_offset_t)(flp->l_start + offset);
783 		break;
784 	case 2:		/* SEEK_END */
785 		vattr.va_mask = AT_SIZE;
786 		if (error = VOP_GETATTR(vp, &vattr, 0, CRED(), NULL))
787 			return (error);
788 		*start = (u_offset_t)(flp->l_start + (offset_t)vattr.va_size);
789 		break;
790 	default:
791 		return (EINVAL);
792 	}
793 
794 	return (0);
795 }
796 
797 /*
798  * Take rctl action when the requested file descriptor is too big.
799  */
800 static void
801 fd_too_big(proc_t *p)
802 {
803 	mutex_enter(&p->p_lock);
804 	(void) rctl_action(rctlproc_legacy[RLIMIT_NOFILE],
805 	    p->p_rctls, p, RCA_SAFE);
806 	mutex_exit(&p->p_lock);
807 }
808 /* ONC_PLUS EXTRACT END */
809