sysv_shm.c (e911eafcbae5366e66f9560ca18bf60c5f8157a4) sysv_shm.c (aa8de40ae504c80301d07b7a4cfa74359792cc72)
1/* $Id: sysv_shm.c,v 1.18 1996/02/23 18:49:18 peter Exp $ */
1/* $Id: sysv_shm.c,v 1.19 1996/05/02 14:20:26 phk Exp $ */
2/* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
3
4/*
5 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:

--- 126 unchanged lines hidden (view full) ---

136static void
137shm_deallocate_segment(shmseg)
138 struct shmid_ds *shmseg;
139{
140 struct shm_handle *shm_handle;
141 size_t size;
142
143 shm_handle = shmseg->shm_internal;
2/* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
3
4/*
5 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:

--- 126 unchanged lines hidden (view full) ---

136static void
137shm_deallocate_segment(shmseg)
138 struct shmid_ds *shmseg;
139{
140 struct shm_handle *shm_handle;
141 size_t size;
142
143 shm_handle = shmseg->shm_internal;
144 size = (shmseg->shm_segsz + PAGE_SIZE-1) & ~(PAGE_SIZE-1);
144 size = round_page(shmseg->shm_segsz);
145 (void) vm_map_remove(sysvshm_map, shm_handle->kva, shm_handle->kva + size);
146 free((caddr_t)shm_handle, M_SHM);
147 shmseg->shm_internal = NULL;
148 shm_committed -= btoc(size);
149 shm_nused--;
150 shmseg->shm_perm.mode = SHMSEG_FREE;
151}
152
153static int
154shm_delete_mapping(p, shmmap_s)
155 struct proc *p;
156 struct shmmap_state *shmmap_s;
157{
158 struct shmid_ds *shmseg;
159 int segnum, result;
160 size_t size;
161
162 segnum = IPCID_TO_IX(shmmap_s->shmid);
163 shmseg = &shmsegs[segnum];
145 (void) vm_map_remove(sysvshm_map, shm_handle->kva, shm_handle->kva + size);
146 free((caddr_t)shm_handle, M_SHM);
147 shmseg->shm_internal = NULL;
148 shm_committed -= btoc(size);
149 shm_nused--;
150 shmseg->shm_perm.mode = SHMSEG_FREE;
151}
152
153static int
154shm_delete_mapping(p, shmmap_s)
155 struct proc *p;
156 struct shmmap_state *shmmap_s;
157{
158 struct shmid_ds *shmseg;
159 int segnum, result;
160 size_t size;
161
162 segnum = IPCID_TO_IX(shmmap_s->shmid);
163 shmseg = &shmsegs[segnum];
164 size = (shmseg->shm_segsz + PAGE_SIZE-1) & ~(PAGE_SIZE-1);
164 size = round_page(shmseg->shm_segsz);
165 result = vm_map_remove(&p->p_vmspace->vm_map, shmmap_s->va, shmmap_s->va + size);
166 if (result != KERN_SUCCESS)
167 return EINVAL;
168 shmmap_s->shmid = -1;
169 shmseg->shm_dtime = time.tv_sec;
170 if ((--shmseg->shm_nattch <= 0) &&
171 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
172 shm_deallocate_segment(shmseg);

--- 68 unchanged lines hidden (view full) ---

241 return error;
242 for (i = 0; i < shminfo.shmseg; i++) {
243 if (shmmap_s->shmid == -1)
244 break;
245 shmmap_s++;
246 }
247 if (i >= shminfo.shmseg)
248 return EMFILE;
165 result = vm_map_remove(&p->p_vmspace->vm_map, shmmap_s->va, shmmap_s->va + size);
166 if (result != KERN_SUCCESS)
167 return EINVAL;
168 shmmap_s->shmid = -1;
169 shmseg->shm_dtime = time.tv_sec;
170 if ((--shmseg->shm_nattch <= 0) &&
171 (shmseg->shm_perm.mode & SHMSEG_REMOVED)) {
172 shm_deallocate_segment(shmseg);

--- 68 unchanged lines hidden (view full) ---

241 return error;
242 for (i = 0; i < shminfo.shmseg; i++) {
243 if (shmmap_s->shmid == -1)
244 break;
245 shmmap_s++;
246 }
247 if (i >= shminfo.shmseg)
248 return EMFILE;
249 size = (shmseg->shm_segsz + PAGE_SIZE-1) & ~(PAGE_SIZE-1);
249 size = round_page(shmseg->shm_segsz);
250 prot = VM_PROT_READ;
251 if ((uap->shmflg & SHM_RDONLY) == 0)
252 prot |= VM_PROT_WRITE;
253 flags = MAP_ANON | MAP_SHARED;
254 if (uap->shmaddr) {
255 flags |= MAP_FIXED;
256 if (uap->shmflg & SHM_RND)
257 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);

--- 200 unchanged lines hidden (view full) ---

458 struct ucred *cred = p->p_ucred;
459 struct shmid_ds *shmseg;
460 struct shm_handle *shm_handle;
461
462 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
463 return EINVAL;
464 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
465 return ENOSPC;
250 prot = VM_PROT_READ;
251 if ((uap->shmflg & SHM_RDONLY) == 0)
252 prot |= VM_PROT_WRITE;
253 flags = MAP_ANON | MAP_SHARED;
254 if (uap->shmaddr) {
255 flags |= MAP_FIXED;
256 if (uap->shmflg & SHM_RND)
257 attach_va = (vm_offset_t)uap->shmaddr & ~(SHMLBA-1);

--- 200 unchanged lines hidden (view full) ---

458 struct ucred *cred = p->p_ucred;
459 struct shmid_ds *shmseg;
460 struct shm_handle *shm_handle;
461
462 if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
463 return EINVAL;
464 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
465 return ENOSPC;
466 size = (shmseg->shm_segsz + PAGE_SIZE-1) & ~(PAGE_SIZE-1);
466 size = round_page(shmseg->shm_segsz);
467 if (shm_committed + btoc(size) > shminfo.shmall)
468 return ENOMEM;
469 if (shm_last_free < 0) {
470 for (i = 0; i < shminfo.shmmni; i++)
471 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
472 break;
473 if (i == shminfo.shmmni)
474 panic("shmseg free count inconsistent");

--- 142 unchanged lines hidden ---
467 if (shm_committed + btoc(size) > shminfo.shmall)
468 return ENOMEM;
469 if (shm_last_free < 0) {
470 for (i = 0; i < shminfo.shmmni; i++)
471 if (shmsegs[i].shm_perm.mode & SHMSEG_FREE)
472 break;
473 if (i == shminfo.shmmni)
474 panic("shmseg free count inconsistent");

--- 142 unchanged lines hidden ---