1 /* 2 * linux/ipc/shm.c 3 * Copyright (C) 1992, 1993 Krishna Balasubramanian 4 * Many improvements/fixes by Bruno Haible. 5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. 6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. 7 * 8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com> 9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de> 10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr> 11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com> 12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com> 13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com> 14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com> 15 * 16 * support for audit of ipc object properties and permission changes 17 * Dustin Kirkland <dustin.kirkland@us.ibm.com> 18 */ 19 20 #include <linux/config.h> 21 #include <linux/slab.h> 22 #include <linux/mm.h> 23 #include <linux/hugetlb.h> 24 #include <linux/shm.h> 25 #include <linux/init.h> 26 #include <linux/file.h> 27 #include <linux/mman.h> 28 #include <linux/shmem_fs.h> 29 #include <linux/security.h> 30 #include <linux/syscalls.h> 31 #include <linux/audit.h> 32 #include <linux/capability.h> 33 #include <linux/ptrace.h> 34 #include <linux/seq_file.h> 35 #include <linux/mutex.h> 36 37 #include <asm/uaccess.h> 38 39 #include "util.h" 40 41 static struct file_operations shm_file_operations; 42 static struct vm_operations_struct shm_vm_ops; 43 44 static struct ipc_ids shm_ids; 45 46 #define shm_lock(id) ((struct shmid_kernel*)ipc_lock(&shm_ids,id)) 47 #define shm_unlock(shp) ipc_unlock(&(shp)->shm_perm) 48 #define shm_get(id) ((struct shmid_kernel*)ipc_get(&shm_ids,id)) 49 #define shm_buildid(id, seq) \ 50 ipc_buildid(&shm_ids, id, seq) 51 52 static int newseg (key_t key, int shmflg, size_t size); 53 static void shm_open (struct vm_area_struct *shmd); 54 static void shm_close (struct vm_area_struct *shmd); 55 #ifdef CONFIG_PROC_FS 56 static int sysvipc_shm_proc_show(struct seq_file *s, void *it); 57 #endif 58 59 size_t shm_ctlmax = SHMMAX; 60 size_t shm_ctlall = SHMALL; 61 int shm_ctlmni = SHMMNI; 62 63 static int shm_tot; /* total number of shared memory pages */ 64 65 void __init shm_init (void) 66 { 67 ipc_init_ids(&shm_ids, 1); 68 ipc_init_proc_interface("sysvipc/shm", 69 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n", 70 &shm_ids, 71 sysvipc_shm_proc_show); 72 } 73 74 static inline int shm_checkid(struct shmid_kernel *s, int id) 75 { 76 if (ipc_checkid(&shm_ids,&s->shm_perm,id)) 77 return -EIDRM; 78 return 0; 79 } 80 81 static inline struct shmid_kernel *shm_rmid(int id) 82 { 83 return (struct shmid_kernel *)ipc_rmid(&shm_ids,id); 84 } 85 86 static inline int shm_addid(struct shmid_kernel *shp) 87 { 88 return ipc_addid(&shm_ids, &shp->shm_perm, shm_ctlmni); 89 } 90 91 92 93 static inline void shm_inc (int id) { 94 struct shmid_kernel *shp; 95 96 shp = shm_lock(id); 97 BUG_ON(!shp); 98 shp->shm_atim = get_seconds(); 99 shp->shm_lprid = current->tgid; 100 shp->shm_nattch++; 101 shm_unlock(shp); 102 } 103 104 /* This is called by fork, once for every shm attach. */ 105 static void shm_open (struct vm_area_struct *shmd) 106 { 107 shm_inc (shmd->vm_file->f_dentry->d_inode->i_ino); 108 } 109 110 /* 111 * shm_destroy - free the struct shmid_kernel 112 * 113 * @shp: struct to free 114 * 115 * It has to be called with shp and shm_ids.mutex locked, 116 * but returns with shp unlocked and freed. 117 */ 118 static void shm_destroy (struct shmid_kernel *shp) 119 { 120 shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; 121 shm_rmid (shp->id); 122 shm_unlock(shp); 123 if (!is_file_hugepages(shp->shm_file)) 124 shmem_lock(shp->shm_file, 0, shp->mlock_user); 125 else 126 user_shm_unlock(shp->shm_file->f_dentry->d_inode->i_size, 127 shp->mlock_user); 128 fput (shp->shm_file); 129 security_shm_free(shp); 130 ipc_rcu_putref(shp); 131 } 132 133 /* 134 * remove the attach descriptor shmd. 135 * free memory for segment if it is marked destroyed. 136 * The descriptor has already been removed from the current->mm->mmap list 137 * and will later be kfree()d. 138 */ 139 static void shm_close (struct vm_area_struct *shmd) 140 { 141 struct file * file = shmd->vm_file; 142 int id = file->f_dentry->d_inode->i_ino; 143 struct shmid_kernel *shp; 144 145 mutex_lock(&shm_ids.mutex); 146 /* remove from the list of attaches of the shm segment */ 147 shp = shm_lock(id); 148 BUG_ON(!shp); 149 shp->shm_lprid = current->tgid; 150 shp->shm_dtim = get_seconds(); 151 shp->shm_nattch--; 152 if(shp->shm_nattch == 0 && 153 shp->shm_perm.mode & SHM_DEST) 154 shm_destroy (shp); 155 else 156 shm_unlock(shp); 157 mutex_unlock(&shm_ids.mutex); 158 } 159 160 static int shm_mmap(struct file * file, struct vm_area_struct * vma) 161 { 162 int ret; 163 164 ret = shmem_mmap(file, vma); 165 if (ret == 0) { 166 vma->vm_ops = &shm_vm_ops; 167 if (!(vma->vm_flags & VM_WRITE)) 168 vma->vm_flags &= ~VM_MAYWRITE; 169 shm_inc(file->f_dentry->d_inode->i_ino); 170 } 171 172 return ret; 173 } 174 175 static struct file_operations shm_file_operations = { 176 .mmap = shm_mmap, 177 #ifndef CONFIG_MMU 178 .get_unmapped_area = shmem_get_unmapped_area, 179 #endif 180 }; 181 182 static struct vm_operations_struct shm_vm_ops = { 183 .open = shm_open, /* callback for a new vm-area open */ 184 .close = shm_close, /* callback for when the vm-area is released */ 185 .nopage = shmem_nopage, 186 #if defined(CONFIG_NUMA) && defined(CONFIG_SHMEM) 187 .set_policy = shmem_set_policy, 188 .get_policy = shmem_get_policy, 189 #endif 190 }; 191 192 static int newseg (key_t key, int shmflg, size_t size) 193 { 194 int error; 195 struct shmid_kernel *shp; 196 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT; 197 struct file * file; 198 char name[13]; 199 int id; 200 201 if (size < SHMMIN || size > shm_ctlmax) 202 return -EINVAL; 203 204 if (shm_tot + numpages >= shm_ctlall) 205 return -ENOSPC; 206 207 shp = ipc_rcu_alloc(sizeof(*shp)); 208 if (!shp) 209 return -ENOMEM; 210 211 shp->shm_perm.key = key; 212 shp->shm_perm.mode = (shmflg & S_IRWXUGO); 213 shp->mlock_user = NULL; 214 215 shp->shm_perm.security = NULL; 216 error = security_shm_alloc(shp); 217 if (error) { 218 ipc_rcu_putref(shp); 219 return error; 220 } 221 222 if (shmflg & SHM_HUGETLB) { 223 /* hugetlb_zero_setup takes care of mlock user accounting */ 224 file = hugetlb_zero_setup(size); 225 shp->mlock_user = current->user; 226 } else { 227 int acctflag = VM_ACCOUNT; 228 /* 229 * Do not allow no accounting for OVERCOMMIT_NEVER, even 230 * if it's asked for. 231 */ 232 if ((shmflg & SHM_NORESERVE) && 233 sysctl_overcommit_memory != OVERCOMMIT_NEVER) 234 acctflag = 0; 235 sprintf (name, "SYSV%08x", key); 236 file = shmem_file_setup(name, size, acctflag); 237 } 238 error = PTR_ERR(file); 239 if (IS_ERR(file)) 240 goto no_file; 241 242 error = -ENOSPC; 243 id = shm_addid(shp); 244 if(id == -1) 245 goto no_id; 246 247 shp->shm_cprid = current->tgid; 248 shp->shm_lprid = 0; 249 shp->shm_atim = shp->shm_dtim = 0; 250 shp->shm_ctim = get_seconds(); 251 shp->shm_segsz = size; 252 shp->shm_nattch = 0; 253 shp->id = shm_buildid(id,shp->shm_perm.seq); 254 shp->shm_file = file; 255 file->f_dentry->d_inode->i_ino = shp->id; 256 257 /* Hugetlb ops would have already been assigned. */ 258 if (!(shmflg & SHM_HUGETLB)) 259 file->f_op = &shm_file_operations; 260 261 shm_tot += numpages; 262 shm_unlock(shp); 263 return shp->id; 264 265 no_id: 266 fput(file); 267 no_file: 268 security_shm_free(shp); 269 ipc_rcu_putref(shp); 270 return error; 271 } 272 273 asmlinkage long sys_shmget (key_t key, size_t size, int shmflg) 274 { 275 struct shmid_kernel *shp; 276 int err, id = 0; 277 278 mutex_lock(&shm_ids.mutex); 279 if (key == IPC_PRIVATE) { 280 err = newseg(key, shmflg, size); 281 } else if ((id = ipc_findkey(&shm_ids, key)) == -1) { 282 if (!(shmflg & IPC_CREAT)) 283 err = -ENOENT; 284 else 285 err = newseg(key, shmflg, size); 286 } else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) { 287 err = -EEXIST; 288 } else { 289 shp = shm_lock(id); 290 BUG_ON(shp==NULL); 291 if (shp->shm_segsz < size) 292 err = -EINVAL; 293 else if (ipcperms(&shp->shm_perm, shmflg)) 294 err = -EACCES; 295 else { 296 int shmid = shm_buildid(id, shp->shm_perm.seq); 297 err = security_shm_associate(shp, shmflg); 298 if (!err) 299 err = shmid; 300 } 301 shm_unlock(shp); 302 } 303 mutex_unlock(&shm_ids.mutex); 304 305 return err; 306 } 307 308 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) 309 { 310 switch(version) { 311 case IPC_64: 312 return copy_to_user(buf, in, sizeof(*in)); 313 case IPC_OLD: 314 { 315 struct shmid_ds out; 316 317 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); 318 out.shm_segsz = in->shm_segsz; 319 out.shm_atime = in->shm_atime; 320 out.shm_dtime = in->shm_dtime; 321 out.shm_ctime = in->shm_ctime; 322 out.shm_cpid = in->shm_cpid; 323 out.shm_lpid = in->shm_lpid; 324 out.shm_nattch = in->shm_nattch; 325 326 return copy_to_user(buf, &out, sizeof(out)); 327 } 328 default: 329 return -EINVAL; 330 } 331 } 332 333 struct shm_setbuf { 334 uid_t uid; 335 gid_t gid; 336 mode_t mode; 337 }; 338 339 static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version) 340 { 341 switch(version) { 342 case IPC_64: 343 { 344 struct shmid64_ds tbuf; 345 346 if (copy_from_user(&tbuf, buf, sizeof(tbuf))) 347 return -EFAULT; 348 349 out->uid = tbuf.shm_perm.uid; 350 out->gid = tbuf.shm_perm.gid; 351 out->mode = tbuf.shm_perm.mode; 352 353 return 0; 354 } 355 case IPC_OLD: 356 { 357 struct shmid_ds tbuf_old; 358 359 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) 360 return -EFAULT; 361 362 out->uid = tbuf_old.shm_perm.uid; 363 out->gid = tbuf_old.shm_perm.gid; 364 out->mode = tbuf_old.shm_perm.mode; 365 366 return 0; 367 } 368 default: 369 return -EINVAL; 370 } 371 } 372 373 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) 374 { 375 switch(version) { 376 case IPC_64: 377 return copy_to_user(buf, in, sizeof(*in)); 378 case IPC_OLD: 379 { 380 struct shminfo out; 381 382 if(in->shmmax > INT_MAX) 383 out.shmmax = INT_MAX; 384 else 385 out.shmmax = (int)in->shmmax; 386 387 out.shmmin = in->shmmin; 388 out.shmmni = in->shmmni; 389 out.shmseg = in->shmseg; 390 out.shmall = in->shmall; 391 392 return copy_to_user(buf, &out, sizeof(out)); 393 } 394 default: 395 return -EINVAL; 396 } 397 } 398 399 static void shm_get_stat(unsigned long *rss, unsigned long *swp) 400 { 401 int i; 402 403 *rss = 0; 404 *swp = 0; 405 406 for (i = 0; i <= shm_ids.max_id; i++) { 407 struct shmid_kernel *shp; 408 struct inode *inode; 409 410 shp = shm_get(i); 411 if(!shp) 412 continue; 413 414 inode = shp->shm_file->f_dentry->d_inode; 415 416 if (is_file_hugepages(shp->shm_file)) { 417 struct address_space *mapping = inode->i_mapping; 418 *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages; 419 } else { 420 struct shmem_inode_info *info = SHMEM_I(inode); 421 spin_lock(&info->lock); 422 *rss += inode->i_mapping->nrpages; 423 *swp += info->swapped; 424 spin_unlock(&info->lock); 425 } 426 } 427 } 428 429 asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) 430 { 431 struct shm_setbuf setbuf; 432 struct shmid_kernel *shp; 433 int err, version; 434 435 if (cmd < 0 || shmid < 0) { 436 err = -EINVAL; 437 goto out; 438 } 439 440 version = ipc_parse_version(&cmd); 441 442 switch (cmd) { /* replace with proc interface ? */ 443 case IPC_INFO: 444 { 445 struct shminfo64 shminfo; 446 447 err = security_shm_shmctl(NULL, cmd); 448 if (err) 449 return err; 450 451 memset(&shminfo,0,sizeof(shminfo)); 452 shminfo.shmmni = shminfo.shmseg = shm_ctlmni; 453 shminfo.shmmax = shm_ctlmax; 454 shminfo.shmall = shm_ctlall; 455 456 shminfo.shmmin = SHMMIN; 457 if(copy_shminfo_to_user (buf, &shminfo, version)) 458 return -EFAULT; 459 /* reading a integer is always atomic */ 460 err= shm_ids.max_id; 461 if(err<0) 462 err = 0; 463 goto out; 464 } 465 case SHM_INFO: 466 { 467 struct shm_info shm_info; 468 469 err = security_shm_shmctl(NULL, cmd); 470 if (err) 471 return err; 472 473 memset(&shm_info,0,sizeof(shm_info)); 474 mutex_lock(&shm_ids.mutex); 475 shm_info.used_ids = shm_ids.in_use; 476 shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp); 477 shm_info.shm_tot = shm_tot; 478 shm_info.swap_attempts = 0; 479 shm_info.swap_successes = 0; 480 err = shm_ids.max_id; 481 mutex_unlock(&shm_ids.mutex); 482 if(copy_to_user (buf, &shm_info, sizeof(shm_info))) { 483 err = -EFAULT; 484 goto out; 485 } 486 487 err = err < 0 ? 0 : err; 488 goto out; 489 } 490 case SHM_STAT: 491 case IPC_STAT: 492 { 493 struct shmid64_ds tbuf; 494 int result; 495 memset(&tbuf, 0, sizeof(tbuf)); 496 shp = shm_lock(shmid); 497 if(shp==NULL) { 498 err = -EINVAL; 499 goto out; 500 } else if(cmd==SHM_STAT) { 501 err = -EINVAL; 502 if (shmid > shm_ids.max_id) 503 goto out_unlock; 504 result = shm_buildid(shmid, shp->shm_perm.seq); 505 } else { 506 err = shm_checkid(shp,shmid); 507 if(err) 508 goto out_unlock; 509 result = 0; 510 } 511 err=-EACCES; 512 if (ipcperms (&shp->shm_perm, S_IRUGO)) 513 goto out_unlock; 514 err = security_shm_shmctl(shp, cmd); 515 if (err) 516 goto out_unlock; 517 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm); 518 tbuf.shm_segsz = shp->shm_segsz; 519 tbuf.shm_atime = shp->shm_atim; 520 tbuf.shm_dtime = shp->shm_dtim; 521 tbuf.shm_ctime = shp->shm_ctim; 522 tbuf.shm_cpid = shp->shm_cprid; 523 tbuf.shm_lpid = shp->shm_lprid; 524 if (!is_file_hugepages(shp->shm_file)) 525 tbuf.shm_nattch = shp->shm_nattch; 526 else 527 tbuf.shm_nattch = file_count(shp->shm_file) - 1; 528 shm_unlock(shp); 529 if(copy_shmid_to_user (buf, &tbuf, version)) 530 err = -EFAULT; 531 else 532 err = result; 533 goto out; 534 } 535 case SHM_LOCK: 536 case SHM_UNLOCK: 537 { 538 shp = shm_lock(shmid); 539 if(shp==NULL) { 540 err = -EINVAL; 541 goto out; 542 } 543 err = shm_checkid(shp,shmid); 544 if(err) 545 goto out_unlock; 546 547 err = audit_ipc_obj(&(shp->shm_perm)); 548 if (err) 549 goto out_unlock; 550 551 if (!capable(CAP_IPC_LOCK)) { 552 err = -EPERM; 553 if (current->euid != shp->shm_perm.uid && 554 current->euid != shp->shm_perm.cuid) 555 goto out_unlock; 556 if (cmd == SHM_LOCK && 557 !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur) 558 goto out_unlock; 559 } 560 561 err = security_shm_shmctl(shp, cmd); 562 if (err) 563 goto out_unlock; 564 565 if(cmd==SHM_LOCK) { 566 struct user_struct * user = current->user; 567 if (!is_file_hugepages(shp->shm_file)) { 568 err = shmem_lock(shp->shm_file, 1, user); 569 if (!err) { 570 shp->shm_perm.mode |= SHM_LOCKED; 571 shp->mlock_user = user; 572 } 573 } 574 } else if (!is_file_hugepages(shp->shm_file)) { 575 shmem_lock(shp->shm_file, 0, shp->mlock_user); 576 shp->shm_perm.mode &= ~SHM_LOCKED; 577 shp->mlock_user = NULL; 578 } 579 shm_unlock(shp); 580 goto out; 581 } 582 case IPC_RMID: 583 { 584 /* 585 * We cannot simply remove the file. The SVID states 586 * that the block remains until the last person 587 * detaches from it, then is deleted. A shmat() on 588 * an RMID segment is legal in older Linux and if 589 * we change it apps break... 590 * 591 * Instead we set a destroyed flag, and then blow 592 * the name away when the usage hits zero. 593 */ 594 mutex_lock(&shm_ids.mutex); 595 shp = shm_lock(shmid); 596 err = -EINVAL; 597 if (shp == NULL) 598 goto out_up; 599 err = shm_checkid(shp, shmid); 600 if(err) 601 goto out_unlock_up; 602 603 err = audit_ipc_obj(&(shp->shm_perm)); 604 if (err) 605 goto out_unlock_up; 606 607 if (current->euid != shp->shm_perm.uid && 608 current->euid != shp->shm_perm.cuid && 609 !capable(CAP_SYS_ADMIN)) { 610 err=-EPERM; 611 goto out_unlock_up; 612 } 613 614 err = security_shm_shmctl(shp, cmd); 615 if (err) 616 goto out_unlock_up; 617 618 if (shp->shm_nattch){ 619 shp->shm_perm.mode |= SHM_DEST; 620 /* Do not find it any more */ 621 shp->shm_perm.key = IPC_PRIVATE; 622 shm_unlock(shp); 623 } else 624 shm_destroy (shp); 625 mutex_unlock(&shm_ids.mutex); 626 goto out; 627 } 628 629 case IPC_SET: 630 { 631 if (copy_shmid_from_user (&setbuf, buf, version)) { 632 err = -EFAULT; 633 goto out; 634 } 635 mutex_lock(&shm_ids.mutex); 636 shp = shm_lock(shmid); 637 err=-EINVAL; 638 if(shp==NULL) 639 goto out_up; 640 err = shm_checkid(shp,shmid); 641 if(err) 642 goto out_unlock_up; 643 err = audit_ipc_obj(&(shp->shm_perm)); 644 if (err) 645 goto out_unlock_up; 646 err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode); 647 if (err) 648 goto out_unlock_up; 649 err=-EPERM; 650 if (current->euid != shp->shm_perm.uid && 651 current->euid != shp->shm_perm.cuid && 652 !capable(CAP_SYS_ADMIN)) { 653 goto out_unlock_up; 654 } 655 656 err = security_shm_shmctl(shp, cmd); 657 if (err) 658 goto out_unlock_up; 659 660 shp->shm_perm.uid = setbuf.uid; 661 shp->shm_perm.gid = setbuf.gid; 662 shp->shm_perm.mode = (shp->shm_perm.mode & ~S_IRWXUGO) 663 | (setbuf.mode & S_IRWXUGO); 664 shp->shm_ctim = get_seconds(); 665 break; 666 } 667 668 default: 669 err = -EINVAL; 670 goto out; 671 } 672 673 err = 0; 674 out_unlock_up: 675 shm_unlock(shp); 676 out_up: 677 mutex_unlock(&shm_ids.mutex); 678 goto out; 679 out_unlock: 680 shm_unlock(shp); 681 out: 682 return err; 683 } 684 685 /* 686 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. 687 * 688 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The 689 * "raddr" thing points to kernel space, and there has to be a wrapper around 690 * this. 691 */ 692 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) 693 { 694 struct shmid_kernel *shp; 695 unsigned long addr; 696 unsigned long size; 697 struct file * file; 698 int err; 699 unsigned long flags; 700 unsigned long prot; 701 unsigned long o_flags; 702 int acc_mode; 703 void *user_addr; 704 705 if (shmid < 0) { 706 err = -EINVAL; 707 goto out; 708 } else if ((addr = (ulong)shmaddr)) { 709 if (addr & (SHMLBA-1)) { 710 if (shmflg & SHM_RND) 711 addr &= ~(SHMLBA-1); /* round down */ 712 else 713 #ifndef __ARCH_FORCE_SHMLBA 714 if (addr & ~PAGE_MASK) 715 #endif 716 return -EINVAL; 717 } 718 flags = MAP_SHARED | MAP_FIXED; 719 } else { 720 if ((shmflg & SHM_REMAP)) 721 return -EINVAL; 722 723 flags = MAP_SHARED; 724 } 725 726 if (shmflg & SHM_RDONLY) { 727 prot = PROT_READ; 728 o_flags = O_RDONLY; 729 acc_mode = S_IRUGO; 730 } else { 731 prot = PROT_READ | PROT_WRITE; 732 o_flags = O_RDWR; 733 acc_mode = S_IRUGO | S_IWUGO; 734 } 735 if (shmflg & SHM_EXEC) { 736 prot |= PROT_EXEC; 737 acc_mode |= S_IXUGO; 738 } 739 740 /* 741 * We cannot rely on the fs check since SYSV IPC does have an 742 * additional creator id... 743 */ 744 shp = shm_lock(shmid); 745 if(shp == NULL) { 746 err = -EINVAL; 747 goto out; 748 } 749 err = shm_checkid(shp,shmid); 750 if (err) { 751 shm_unlock(shp); 752 goto out; 753 } 754 if (ipcperms(&shp->shm_perm, acc_mode)) { 755 shm_unlock(shp); 756 err = -EACCES; 757 goto out; 758 } 759 760 err = security_shm_shmat(shp, shmaddr, shmflg); 761 if (err) { 762 shm_unlock(shp); 763 return err; 764 } 765 766 file = shp->shm_file; 767 size = i_size_read(file->f_dentry->d_inode); 768 shp->shm_nattch++; 769 shm_unlock(shp); 770 771 down_write(¤t->mm->mmap_sem); 772 if (addr && !(shmflg & SHM_REMAP)) { 773 user_addr = ERR_PTR(-EINVAL); 774 if (find_vma_intersection(current->mm, addr, addr + size)) 775 goto invalid; 776 /* 777 * If shm segment goes below stack, make sure there is some 778 * space left for the stack to grow (at least 4 pages). 779 */ 780 if (addr < current->mm->start_stack && 781 addr > current->mm->start_stack - size - PAGE_SIZE * 5) 782 goto invalid; 783 } 784 785 user_addr = (void*) do_mmap (file, addr, size, prot, flags, 0); 786 787 invalid: 788 up_write(¤t->mm->mmap_sem); 789 790 mutex_lock(&shm_ids.mutex); 791 shp = shm_lock(shmid); 792 BUG_ON(!shp); 793 shp->shm_nattch--; 794 if(shp->shm_nattch == 0 && 795 shp->shm_perm.mode & SHM_DEST) 796 shm_destroy (shp); 797 else 798 shm_unlock(shp); 799 mutex_unlock(&shm_ids.mutex); 800 801 *raddr = (unsigned long) user_addr; 802 err = 0; 803 if (IS_ERR(user_addr)) 804 err = PTR_ERR(user_addr); 805 out: 806 return err; 807 } 808 809 asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg) 810 { 811 unsigned long ret; 812 long err; 813 814 err = do_shmat(shmid, shmaddr, shmflg, &ret); 815 if (err) 816 return err; 817 force_successful_syscall_return(); 818 return (long)ret; 819 } 820 821 /* 822 * detach and kill segment if marked destroyed. 823 * The work is done in shm_close. 824 */ 825 asmlinkage long sys_shmdt(char __user *shmaddr) 826 { 827 struct mm_struct *mm = current->mm; 828 struct vm_area_struct *vma, *next; 829 unsigned long addr = (unsigned long)shmaddr; 830 loff_t size = 0; 831 int retval = -EINVAL; 832 833 if (addr & ~PAGE_MASK) 834 return retval; 835 836 down_write(&mm->mmap_sem); 837 838 /* 839 * This function tries to be smart and unmap shm segments that 840 * were modified by partial mlock or munmap calls: 841 * - It first determines the size of the shm segment that should be 842 * unmapped: It searches for a vma that is backed by shm and that 843 * started at address shmaddr. It records it's size and then unmaps 844 * it. 845 * - Then it unmaps all shm vmas that started at shmaddr and that 846 * are within the initially determined size. 847 * Errors from do_munmap are ignored: the function only fails if 848 * it's called with invalid parameters or if it's called to unmap 849 * a part of a vma. Both calls in this function are for full vmas, 850 * the parameters are directly copied from the vma itself and always 851 * valid - therefore do_munmap cannot fail. (famous last words?) 852 */ 853 /* 854 * If it had been mremap()'d, the starting address would not 855 * match the usual checks anyway. So assume all vma's are 856 * above the starting address given. 857 */ 858 vma = find_vma(mm, addr); 859 860 while (vma) { 861 next = vma->vm_next; 862 863 /* 864 * Check if the starting address would match, i.e. it's 865 * a fragment created by mprotect() and/or munmap(), or it 866 * otherwise it starts at this address with no hassles. 867 */ 868 if ((vma->vm_ops == &shm_vm_ops || is_vm_hugetlb_page(vma)) && 869 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { 870 871 872 size = vma->vm_file->f_dentry->d_inode->i_size; 873 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 874 /* 875 * We discovered the size of the shm segment, so 876 * break out of here and fall through to the next 877 * loop that uses the size information to stop 878 * searching for matching vma's. 879 */ 880 retval = 0; 881 vma = next; 882 break; 883 } 884 vma = next; 885 } 886 887 /* 888 * We need look no further than the maximum address a fragment 889 * could possibly have landed at. Also cast things to loff_t to 890 * prevent overflows and make comparisions vs. equal-width types. 891 */ 892 size = PAGE_ALIGN(size); 893 while (vma && (loff_t)(vma->vm_end - addr) <= size) { 894 next = vma->vm_next; 895 896 /* finding a matching vma now does not alter retval */ 897 if ((vma->vm_ops == &shm_vm_ops || is_vm_hugetlb_page(vma)) && 898 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) 899 900 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 901 vma = next; 902 } 903 904 up_write(&mm->mmap_sem); 905 return retval; 906 } 907 908 #ifdef CONFIG_PROC_FS 909 static int sysvipc_shm_proc_show(struct seq_file *s, void *it) 910 { 911 struct shmid_kernel *shp = it; 912 char *format; 913 914 #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n" 915 #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n" 916 917 if (sizeof(size_t) <= sizeof(int)) 918 format = SMALL_STRING; 919 else 920 format = BIG_STRING; 921 return seq_printf(s, format, 922 shp->shm_perm.key, 923 shp->id, 924 shp->shm_perm.mode, 925 shp->shm_segsz, 926 shp->shm_cprid, 927 shp->shm_lprid, 928 is_file_hugepages(shp->shm_file) ? (file_count(shp->shm_file) - 1) : shp->shm_nattch, 929 shp->shm_perm.uid, 930 shp->shm_perm.gid, 931 shp->shm_perm.cuid, 932 shp->shm_perm.cgid, 933 shp->shm_atim, 934 shp->shm_dtim, 935 shp->shm_ctim); 936 } 937 #endif 938