vm_mmap.c (e4ca250d4bf1d5e2a40ad0f4a7ec960fa294f848) vm_mmap.c (190609dd48b42678141934248d836da0c67471b1)
1/*
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1991, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department.

--- 31 unchanged lines hidden (view full) ---

40 * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
41 * $FreeBSD$
42 */
43
44/*
45 * Mapped file (mmap) interface to VM
46 */
47
1/*
2 * Copyright (c) 1988 University of Utah.
3 * Copyright (c) 1991, 1993
4 * The Regents of the University of California. All rights reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * the Systems Programming Group of the University of Utah Computer
8 * Science Department.

--- 31 unchanged lines hidden (view full) ---

40 * @(#)vm_mmap.c 8.4 (Berkeley) 1/12/94
41 * $FreeBSD$
42 */
43
44/*
45 * Mapped file (mmap) interface to VM
46 */
47
48#include "opt_bleed.h"
48#include "opt_compat.h"
49#include "opt_rlimit.h"
50
51#include <sys/param.h>
52#include <sys/systm.h>
53#include <sys/kernel.h>
54#include <sys/lock.h>
55#include <sys/mutex.h>

--- 465 unchanged lines hidden (view full) ---

521
522 /*
523 * XXX Gak! If size is zero we are supposed to sync "all modified
524 * pages with the region containing addr". Unfortunately, we don't
525 * really keep track of individual mmaps so we approximate by flushing
526 * the range of the map entry containing addr. This can be incorrect
527 * if the region splits or is coalesced with a neighbor.
528 */
49#include "opt_compat.h"
50#include "opt_rlimit.h"
51
52#include <sys/param.h>
53#include <sys/systm.h>
54#include <sys/kernel.h>
55#include <sys/lock.h>
56#include <sys/mutex.h>

--- 465 unchanged lines hidden (view full) ---

522
523 /*
524 * XXX Gak! If size is zero we are supposed to sync "all modified
525 * pages with the region containing addr". Unfortunately, we don't
526 * really keep track of individual mmaps so we approximate by flushing
527 * the range of the map entry containing addr. This can be incorrect
528 * if the region splits or is coalesced with a neighbor.
529 */
530#ifndef BLEED
531 mtx_lock(&Giant);
532#endif
529 mtx_lock(&vm_mtx);
530 if (size == 0) {
531 vm_map_entry_t entry;
532
533 vm_map_lock_read(map);
534 rv = vm_map_lookup_entry(map, addr, &entry);
535 vm_map_unlock_read(map);
536 if (rv == FALSE) {
537 mtx_unlock(&vm_mtx);
533 mtx_lock(&vm_mtx);
534 if (size == 0) {
535 vm_map_entry_t entry;
536
537 vm_map_lock_read(map);
538 rv = vm_map_lookup_entry(map, addr, &entry);
539 vm_map_unlock_read(map);
540 if (rv == FALSE) {
541 mtx_unlock(&vm_mtx);
542#ifndef BLEED
543 mtx_unlock(&Giant);
544#endif
538 return (EINVAL);
539 }
540 addr = entry->start;
541 size = entry->end - entry->start;
542 }
543
544 /*
545 * Clean the pages and interpret the return value.
546 */
547 rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0,
548 (flags & MS_INVALIDATE) != 0);
549
550 mtx_unlock(&vm_mtx);
545 return (EINVAL);
546 }
547 addr = entry->start;
548 size = entry->end - entry->start;
549 }
550
551 /*
552 * Clean the pages and interpret the return value.
553 */
554 rv = vm_map_clean(map, addr, addr + size, (flags & MS_ASYNC) == 0,
555 (flags & MS_INVALIDATE) != 0);
556
557 mtx_unlock(&vm_mtx);
558#ifndef BLEED
559 mtx_unlock(&Giant);
560#endif
551 switch (rv) {
552 case KERN_SUCCESS:
553 break;
554 case KERN_INVALID_ADDRESS:
555 return (EINVAL); /* Sun returns ENOMEM? */
556 case KERN_FAILURE:
557 return (EIO);
558 default:

--- 142 unchanged lines hidden (view full) ---

701
702 pageoff = (addr & PAGE_MASK);
703 addr -= pageoff;
704 size += pageoff;
705 size = (vm_size_t) round_page(size);
706 if (addr + size < addr)
707 return(EINVAL);
708
561 switch (rv) {
562 case KERN_SUCCESS:
563 break;
564 case KERN_INVALID_ADDRESS:
565 return (EINVAL); /* Sun returns ENOMEM? */
566 case KERN_FAILURE:
567 return (EIO);
568 default:

--- 142 unchanged lines hidden (view full) ---

711
712 pageoff = (addr & PAGE_MASK);
713 addr -= pageoff;
714 size += pageoff;
715 size = (vm_size_t) round_page(size);
716 if (addr + size < addr)
717 return(EINVAL);
718
719#ifndef BLEED
720 mtx_lock(&Giant);
721#endif
709 mtx_lock(&vm_mtx);
710 ret = vm_map_inherit(&p->p_vmspace->vm_map, addr, addr+size,
711 inherit);
712 mtx_unlock(&vm_mtx);
722 mtx_lock(&vm_mtx);
723 ret = vm_map_inherit(&p->p_vmspace->vm_map, addr, addr+size,
724 inherit);
725 mtx_unlock(&vm_mtx);
726#ifndef BLEED
727 mtx_unlock(&Giant);
728#endif
713
714 switch (ret) {
715 case KERN_SUCCESS:
716 return (0);
717 case KERN_PROTECTION_FAILURE:
718 return (EACCES);
719 }
720 return (EINVAL);

--- 37 unchanged lines hidden (view full) ---

758
759 /*
760 * Since this routine is only advisory, we default to conservative
761 * behavior.
762 */
763 start = trunc_page((vm_offset_t) uap->addr);
764 end = round_page((vm_offset_t) uap->addr + uap->len);
765
729
730 switch (ret) {
731 case KERN_SUCCESS:
732 return (0);
733 case KERN_PROTECTION_FAILURE:
734 return (EACCES);
735 }
736 return (EINVAL);

--- 37 unchanged lines hidden (view full) ---

774
775 /*
776 * Since this routine is only advisory, we default to conservative
777 * behavior.
778 */
779 start = trunc_page((vm_offset_t) uap->addr);
780 end = round_page((vm_offset_t) uap->addr + uap->len);
781
782#ifndef BLEED
783 mtx_lock(&Giant);
784#endif
766 mtx_lock(&vm_mtx);
767 ret = vm_map_madvise(&p->p_vmspace->vm_map, start, end, uap->behav);
768 mtx_unlock(&vm_mtx);
785 mtx_lock(&vm_mtx);
786 ret = vm_map_madvise(&p->p_vmspace->vm_map, start, end, uap->behav);
787 mtx_unlock(&vm_mtx);
788#ifndef BLEED
789 mtx_unlock(&Giant);
790#endif
769 return (ret ? EINVAL : 0);
770}
771
772#ifndef _SYS_SYSPROTO_H_
773struct mincore_args {
774 const void *addr;
775 size_t len;
776 char *vec;

--- 30 unchanged lines hidden (view full) ---

807 return (EINVAL);
808
809 /*
810 * Address of byte vector
811 */
812 vec = uap->vec;
813
814 map = &p->p_vmspace->vm_map;
791 return (ret ? EINVAL : 0);
792}
793
794#ifndef _SYS_SYSPROTO_H_
795struct mincore_args {
796 const void *addr;
797 size_t len;
798 char *vec;

--- 30 unchanged lines hidden (view full) ---

829 return (EINVAL);
830
831 /*
832 * Address of byte vector
833 */
834 vec = uap->vec;
835
836 map = &p->p_vmspace->vm_map;
837#ifndef BLEED
838 mtx_lock(&Giant);
839#endif
815 mtx_lock(&vm_mtx);
816 pmap = vmspace_pmap(p->p_vmspace);
817
818 vm_map_lock_read(map);
819RestartScan:
820 timestamp = map->timestamp;
821
822 if (!vm_map_lookup_entry(map, addr, &entry))

--- 78 unchanged lines hidden (view full) ---

901
902 /*
903 * If we have skipped map entries, we need to make sure that
904 * the byte vector is zeroed for those skipped entries.
905 */
906 while((lastvecindex + 1) < vecindex) {
907 error = subyte( vec + lastvecindex, 0);
908 if (error) {
840 mtx_lock(&vm_mtx);
841 pmap = vmspace_pmap(p->p_vmspace);
842
843 vm_map_lock_read(map);
844RestartScan:
845 timestamp = map->timestamp;
846
847 if (!vm_map_lookup_entry(map, addr, &entry))

--- 78 unchanged lines hidden (view full) ---

926
927 /*
928 * If we have skipped map entries, we need to make sure that
929 * the byte vector is zeroed for those skipped entries.
930 */
931 while((lastvecindex + 1) < vecindex) {
932 error = subyte( vec + lastvecindex, 0);
933 if (error) {
934#ifndef BLEED
935 mtx_unlock(&Giant);
936#endif
909 return (EFAULT);
910 }
911 ++lastvecindex;
912 }
913
914 /*
915 * Pass the page information to the user
916 */
917 error = subyte( vec + vecindex, mincoreinfo);
918 if (error) {
937 return (EFAULT);
938 }
939 ++lastvecindex;
940 }
941
942 /*
943 * Pass the page information to the user
944 */
945 error = subyte( vec + vecindex, mincoreinfo);
946 if (error) {
947#ifndef BLEED
948 mtx_unlock(&Giant);
949#endif
919 return (EFAULT);
920 }
921
922 /*
923 * If the map has changed, due to the subyte, the previous
924 * output may be invalid.
925 */
926 mtx_lock(&vm_mtx);

--- 15 unchanged lines hidden (view full) ---

942
943 /*
944 * Zero the last entries in the byte vector.
945 */
946 vecindex = OFF_TO_IDX(end - first_addr);
947 while((lastvecindex + 1) < vecindex) {
948 error = subyte( vec + lastvecindex, 0);
949 if (error) {
950 return (EFAULT);
951 }
952
953 /*
954 * If the map has changed, due to the subyte, the previous
955 * output may be invalid.
956 */
957 mtx_lock(&vm_mtx);

--- 15 unchanged lines hidden (view full) ---

973
974 /*
975 * Zero the last entries in the byte vector.
976 */
977 vecindex = OFF_TO_IDX(end - first_addr);
978 while((lastvecindex + 1) < vecindex) {
979 error = subyte( vec + lastvecindex, 0);
980 if (error) {
981#ifndef BLEED
982 mtx_unlock(&Giant);
983#endif
950 return (EFAULT);
951 }
952 ++lastvecindex;
953 }
954
955 /*
956 * If the map has changed, due to the subyte, the previous
957 * output may be invalid.
958 */
959 mtx_lock(&vm_mtx);
960 vm_map_lock_read(map);
961 if (timestamp != map->timestamp)
962 goto RestartScan;
963 vm_map_unlock_read(map);
964 mtx_unlock(&vm_mtx);
984 return (EFAULT);
985 }
986 ++lastvecindex;
987 }
988
989 /*
990 * If the map has changed, due to the subyte, the previous
991 * output may be invalid.
992 */
993 mtx_lock(&vm_mtx);
994 vm_map_lock_read(map);
995 if (timestamp != map->timestamp)
996 goto RestartScan;
997 vm_map_unlock_read(map);
998 mtx_unlock(&vm_mtx);
999#ifndef BLEED
1000 mtx_unlock(&Giant);
1001#endif
965
966 return (0);
967}
968
969#ifndef _SYS_SYSPROTO_H_
970struct mlock_args {
971 const void *addr;
972 size_t len;

--- 28 unchanged lines hidden (view full) ---

1001 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
1002 return (ENOMEM);
1003#else
1004 error = suser(p);
1005 if (error)
1006 return (error);
1007#endif
1008
1002
1003 return (0);
1004}
1005
1006#ifndef _SYS_SYSPROTO_H_
1007struct mlock_args {
1008 const void *addr;
1009 size_t len;

--- 28 unchanged lines hidden (view full) ---

1038 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur)
1039 return (ENOMEM);
1040#else
1041 error = suser(p);
1042 if (error)
1043 return (error);
1044#endif
1045
1046#ifndef BLEED
1047 mtx_lock(&Giant);
1048#endif
1009 mtx_lock(&vm_mtx);
1010 error = vm_map_user_pageable(&p->p_vmspace->vm_map, addr,
1011 addr + size, FALSE);
1012 mtx_unlock(&vm_mtx);
1049 mtx_lock(&vm_mtx);
1050 error = vm_map_user_pageable(&p->p_vmspace->vm_map, addr,
1051 addr + size, FALSE);
1052 mtx_unlock(&vm_mtx);
1053#ifndef BLEED
1054 mtx_unlock(&Giant);
1055#endif
1013 return (error == KERN_SUCCESS ? 0 : ENOMEM);
1014}
1015
1016#ifndef _SYS_SYSPROTO_H_
1017struct mlockall_args {
1018 int how;
1019};
1020#endif

--- 48 unchanged lines hidden (view full) ---

1069 return (EINVAL);
1070
1071#ifndef pmap_wired_count
1072 error = suser(p);
1073 if (error)
1074 return (error);
1075#endif
1076
1056 return (error == KERN_SUCCESS ? 0 : ENOMEM);
1057}
1058
1059#ifndef _SYS_SYSPROTO_H_
1060struct mlockall_args {
1061 int how;
1062};
1063#endif

--- 48 unchanged lines hidden (view full) ---

1112 return (EINVAL);
1113
1114#ifndef pmap_wired_count
1115 error = suser(p);
1116 if (error)
1117 return (error);
1118#endif
1119
1120#ifndef BLEED
1121 mtx_lock(&Giant);
1122#endif
1077 mtx_lock(&vm_mtx);
1078 error = vm_map_user_pageable(&p->p_vmspace->vm_map, addr,
1079 addr + size, TRUE);
1080 mtx_unlock(&vm_mtx);
1123 mtx_lock(&vm_mtx);
1124 error = vm_map_user_pageable(&p->p_vmspace->vm_map, addr,
1125 addr + size, TRUE);
1126 mtx_unlock(&vm_mtx);
1127#ifndef BLEED
1128 mtx_unlock(&Giant);
1129#endif
1081 return (error == KERN_SUCCESS ? 0 : ENOMEM);
1082}
1083
1084/*
1085 * Internal version of mmap.
1086 * Currently used by mmap, exec, and sys5 shared memory.
1087 * Handle is either a vnode pointer or NULL for MAP_ANON.
1088 */

--- 158 unchanged lines hidden ---
1130 return (error == KERN_SUCCESS ? 0 : ENOMEM);
1131}
1132
1133/*
1134 * Internal version of mmap.
1135 * Currently used by mmap, exec, and sys5 shared memory.
1136 * Handle is either a vnode pointer or NULL for MAP_ANON.
1137 */

--- 158 unchanged lines hidden ---