1 /* 2 * linux/ipc/shm.c 3 * Copyright (C) 1992, 1993 Krishna Balasubramanian 4 * Many improvements/fixes by Bruno Haible. 5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. 6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. 7 * 8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com> 9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de> 10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr> 11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com> 12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com> 13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com> 14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com> 15 * 16 */ 17 18 #include <linux/config.h> 19 #include <linux/slab.h> 20 #include <linux/mm.h> 21 #include <linux/hugetlb.h> 22 #include <linux/shm.h> 23 #include <linux/init.h> 24 #include <linux/file.h> 25 #include <linux/mman.h> 26 #include <linux/shmem_fs.h> 27 #include <linux/security.h> 28 #include <linux/syscalls.h> 29 #include <linux/audit.h> 30 #include <linux/capability.h> 31 #include <linux/ptrace.h> 32 #include <linux/seq_file.h> 33 #include <linux/mutex.h> 34 35 #include <asm/uaccess.h> 36 37 #include "util.h" 38 39 static struct file_operations shm_file_operations; 40 static struct vm_operations_struct shm_vm_ops; 41 42 static struct ipc_ids shm_ids; 43 44 #define shm_lock(id) ((struct shmid_kernel*)ipc_lock(&shm_ids,id)) 45 #define shm_unlock(shp) ipc_unlock(&(shp)->shm_perm) 46 #define shm_get(id) ((struct shmid_kernel*)ipc_get(&shm_ids,id)) 47 #define shm_buildid(id, seq) \ 48 ipc_buildid(&shm_ids, id, seq) 49 50 static int newseg (key_t key, int shmflg, size_t size); 51 static void shm_open (struct vm_area_struct *shmd); 52 static void shm_close (struct vm_area_struct *shmd); 53 #ifdef CONFIG_PROC_FS 54 static int sysvipc_shm_proc_show(struct seq_file *s, void *it); 55 #endif 56 57 size_t shm_ctlmax = SHMMAX; 58 size_t shm_ctlall = SHMALL; 59 int shm_ctlmni = SHMMNI; 60 61 static int shm_tot; /* total number of shared memory pages */ 62 63 void __init shm_init (void) 64 { 65 ipc_init_ids(&shm_ids, 1); 66 ipc_init_proc_interface("sysvipc/shm", 67 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n", 68 &shm_ids, 69 sysvipc_shm_proc_show); 70 } 71 72 static inline int shm_checkid(struct shmid_kernel *s, int id) 73 { 74 if (ipc_checkid(&shm_ids,&s->shm_perm,id)) 75 return -EIDRM; 76 return 0; 77 } 78 79 static inline struct shmid_kernel *shm_rmid(int id) 80 { 81 return (struct shmid_kernel *)ipc_rmid(&shm_ids,id); 82 } 83 84 static inline int shm_addid(struct shmid_kernel *shp) 85 { 86 return ipc_addid(&shm_ids, &shp->shm_perm, shm_ctlmni); 87 } 88 89 90 91 static inline void shm_inc (int id) { 92 struct shmid_kernel *shp; 93 94 shp = shm_lock(id); 95 BUG_ON(!shp); 96 shp->shm_atim = get_seconds(); 97 shp->shm_lprid = current->tgid; 98 shp->shm_nattch++; 99 shm_unlock(shp); 100 } 101 102 /* This is called by fork, once for every shm attach. */ 103 static void shm_open (struct vm_area_struct *shmd) 104 { 105 shm_inc (shmd->vm_file->f_dentry->d_inode->i_ino); 106 } 107 108 /* 109 * shm_destroy - free the struct shmid_kernel 110 * 111 * @shp: struct to free 112 * 113 * It has to be called with shp and shm_ids.mutex locked, 114 * but returns with shp unlocked and freed. 115 */ 116 static void shm_destroy (struct shmid_kernel *shp) 117 { 118 shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; 119 shm_rmid (shp->id); 120 shm_unlock(shp); 121 if (!is_file_hugepages(shp->shm_file)) 122 shmem_lock(shp->shm_file, 0, shp->mlock_user); 123 else 124 user_shm_unlock(shp->shm_file->f_dentry->d_inode->i_size, 125 shp->mlock_user); 126 fput (shp->shm_file); 127 security_shm_free(shp); 128 ipc_rcu_putref(shp); 129 } 130 131 /* 132 * remove the attach descriptor shmd. 133 * free memory for segment if it is marked destroyed. 134 * The descriptor has already been removed from the current->mm->mmap list 135 * and will later be kfree()d. 136 */ 137 static void shm_close (struct vm_area_struct *shmd) 138 { 139 struct file * file = shmd->vm_file; 140 int id = file->f_dentry->d_inode->i_ino; 141 struct shmid_kernel *shp; 142 143 mutex_lock(&shm_ids.mutex); 144 /* remove from the list of attaches of the shm segment */ 145 shp = shm_lock(id); 146 BUG_ON(!shp); 147 shp->shm_lprid = current->tgid; 148 shp->shm_dtim = get_seconds(); 149 shp->shm_nattch--; 150 if(shp->shm_nattch == 0 && 151 shp->shm_perm.mode & SHM_DEST) 152 shm_destroy (shp); 153 else 154 shm_unlock(shp); 155 mutex_unlock(&shm_ids.mutex); 156 } 157 158 static int shm_mmap(struct file * file, struct vm_area_struct * vma) 159 { 160 int ret; 161 162 ret = shmem_mmap(file, vma); 163 if (ret == 0) { 164 vma->vm_ops = &shm_vm_ops; 165 if (!(vma->vm_flags & VM_WRITE)) 166 vma->vm_flags &= ~VM_MAYWRITE; 167 shm_inc(file->f_dentry->d_inode->i_ino); 168 } 169 170 return ret; 171 } 172 173 static struct file_operations shm_file_operations = { 174 .mmap = shm_mmap, 175 #ifndef CONFIG_MMU 176 .get_unmapped_area = shmem_get_unmapped_area, 177 #endif 178 }; 179 180 static struct vm_operations_struct shm_vm_ops = { 181 .open = shm_open, /* callback for a new vm-area open */ 182 .close = shm_close, /* callback for when the vm-area is released */ 183 .nopage = shmem_nopage, 184 #if defined(CONFIG_NUMA) && defined(CONFIG_SHMEM) 185 .set_policy = shmem_set_policy, 186 .get_policy = shmem_get_policy, 187 #endif 188 }; 189 190 static int newseg (key_t key, int shmflg, size_t size) 191 { 192 int error; 193 struct shmid_kernel *shp; 194 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT; 195 struct file * file; 196 char name[13]; 197 int id; 198 199 if (size < SHMMIN || size > shm_ctlmax) 200 return -EINVAL; 201 202 if (shm_tot + numpages >= shm_ctlall) 203 return -ENOSPC; 204 205 shp = ipc_rcu_alloc(sizeof(*shp)); 206 if (!shp) 207 return -ENOMEM; 208 209 shp->shm_perm.key = key; 210 shp->shm_perm.mode = (shmflg & S_IRWXUGO); 211 shp->mlock_user = NULL; 212 213 shp->shm_perm.security = NULL; 214 error = security_shm_alloc(shp); 215 if (error) { 216 ipc_rcu_putref(shp); 217 return error; 218 } 219 220 if (shmflg & SHM_HUGETLB) { 221 /* hugetlb_zero_setup takes care of mlock user accounting */ 222 file = hugetlb_zero_setup(size); 223 shp->mlock_user = current->user; 224 } else { 225 int acctflag = VM_ACCOUNT; 226 /* 227 * Do not allow no accounting for OVERCOMMIT_NEVER, even 228 * if it's asked for. 229 */ 230 if ((shmflg & SHM_NORESERVE) && 231 sysctl_overcommit_memory != OVERCOMMIT_NEVER) 232 acctflag = 0; 233 sprintf (name, "SYSV%08x", key); 234 file = shmem_file_setup(name, size, acctflag); 235 } 236 error = PTR_ERR(file); 237 if (IS_ERR(file)) 238 goto no_file; 239 240 error = -ENOSPC; 241 id = shm_addid(shp); 242 if(id == -1) 243 goto no_id; 244 245 shp->shm_cprid = current->tgid; 246 shp->shm_lprid = 0; 247 shp->shm_atim = shp->shm_dtim = 0; 248 shp->shm_ctim = get_seconds(); 249 shp->shm_segsz = size; 250 shp->shm_nattch = 0; 251 shp->id = shm_buildid(id,shp->shm_perm.seq); 252 shp->shm_file = file; 253 file->f_dentry->d_inode->i_ino = shp->id; 254 255 /* Hugetlb ops would have already been assigned. */ 256 if (!(shmflg & SHM_HUGETLB)) 257 file->f_op = &shm_file_operations; 258 259 shm_tot += numpages; 260 shm_unlock(shp); 261 return shp->id; 262 263 no_id: 264 fput(file); 265 no_file: 266 security_shm_free(shp); 267 ipc_rcu_putref(shp); 268 return error; 269 } 270 271 asmlinkage long sys_shmget (key_t key, size_t size, int shmflg) 272 { 273 struct shmid_kernel *shp; 274 int err, id = 0; 275 276 mutex_lock(&shm_ids.mutex); 277 if (key == IPC_PRIVATE) { 278 err = newseg(key, shmflg, size); 279 } else if ((id = ipc_findkey(&shm_ids, key)) == -1) { 280 if (!(shmflg & IPC_CREAT)) 281 err = -ENOENT; 282 else 283 err = newseg(key, shmflg, size); 284 } else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) { 285 err = -EEXIST; 286 } else { 287 shp = shm_lock(id); 288 BUG_ON(shp==NULL); 289 if (shp->shm_segsz < size) 290 err = -EINVAL; 291 else if (ipcperms(&shp->shm_perm, shmflg)) 292 err = -EACCES; 293 else { 294 int shmid = shm_buildid(id, shp->shm_perm.seq); 295 err = security_shm_associate(shp, shmflg); 296 if (!err) 297 err = shmid; 298 } 299 shm_unlock(shp); 300 } 301 mutex_unlock(&shm_ids.mutex); 302 303 return err; 304 } 305 306 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) 307 { 308 switch(version) { 309 case IPC_64: 310 return copy_to_user(buf, in, sizeof(*in)); 311 case IPC_OLD: 312 { 313 struct shmid_ds out; 314 315 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); 316 out.shm_segsz = in->shm_segsz; 317 out.shm_atime = in->shm_atime; 318 out.shm_dtime = in->shm_dtime; 319 out.shm_ctime = in->shm_ctime; 320 out.shm_cpid = in->shm_cpid; 321 out.shm_lpid = in->shm_lpid; 322 out.shm_nattch = in->shm_nattch; 323 324 return copy_to_user(buf, &out, sizeof(out)); 325 } 326 default: 327 return -EINVAL; 328 } 329 } 330 331 struct shm_setbuf { 332 uid_t uid; 333 gid_t gid; 334 mode_t mode; 335 }; 336 337 static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version) 338 { 339 switch(version) { 340 case IPC_64: 341 { 342 struct shmid64_ds tbuf; 343 344 if (copy_from_user(&tbuf, buf, sizeof(tbuf))) 345 return -EFAULT; 346 347 out->uid = tbuf.shm_perm.uid; 348 out->gid = tbuf.shm_perm.gid; 349 out->mode = tbuf.shm_perm.mode; 350 351 return 0; 352 } 353 case IPC_OLD: 354 { 355 struct shmid_ds tbuf_old; 356 357 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) 358 return -EFAULT; 359 360 out->uid = tbuf_old.shm_perm.uid; 361 out->gid = tbuf_old.shm_perm.gid; 362 out->mode = tbuf_old.shm_perm.mode; 363 364 return 0; 365 } 366 default: 367 return -EINVAL; 368 } 369 } 370 371 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) 372 { 373 switch(version) { 374 case IPC_64: 375 return copy_to_user(buf, in, sizeof(*in)); 376 case IPC_OLD: 377 { 378 struct shminfo out; 379 380 if(in->shmmax > INT_MAX) 381 out.shmmax = INT_MAX; 382 else 383 out.shmmax = (int)in->shmmax; 384 385 out.shmmin = in->shmmin; 386 out.shmmni = in->shmmni; 387 out.shmseg = in->shmseg; 388 out.shmall = in->shmall; 389 390 return copy_to_user(buf, &out, sizeof(out)); 391 } 392 default: 393 return -EINVAL; 394 } 395 } 396 397 static void shm_get_stat(unsigned long *rss, unsigned long *swp) 398 { 399 int i; 400 401 *rss = 0; 402 *swp = 0; 403 404 for (i = 0; i <= shm_ids.max_id; i++) { 405 struct shmid_kernel *shp; 406 struct inode *inode; 407 408 shp = shm_get(i); 409 if(!shp) 410 continue; 411 412 inode = shp->shm_file->f_dentry->d_inode; 413 414 if (is_file_hugepages(shp->shm_file)) { 415 struct address_space *mapping = inode->i_mapping; 416 *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages; 417 } else { 418 struct shmem_inode_info *info = SHMEM_I(inode); 419 spin_lock(&info->lock); 420 *rss += inode->i_mapping->nrpages; 421 *swp += info->swapped; 422 spin_unlock(&info->lock); 423 } 424 } 425 } 426 427 asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) 428 { 429 struct shm_setbuf setbuf; 430 struct shmid_kernel *shp; 431 int err, version; 432 433 if (cmd < 0 || shmid < 0) { 434 err = -EINVAL; 435 goto out; 436 } 437 438 version = ipc_parse_version(&cmd); 439 440 switch (cmd) { /* replace with proc interface ? */ 441 case IPC_INFO: 442 { 443 struct shminfo64 shminfo; 444 445 err = security_shm_shmctl(NULL, cmd); 446 if (err) 447 return err; 448 449 memset(&shminfo,0,sizeof(shminfo)); 450 shminfo.shmmni = shminfo.shmseg = shm_ctlmni; 451 shminfo.shmmax = shm_ctlmax; 452 shminfo.shmall = shm_ctlall; 453 454 shminfo.shmmin = SHMMIN; 455 if(copy_shminfo_to_user (buf, &shminfo, version)) 456 return -EFAULT; 457 /* reading a integer is always atomic */ 458 err= shm_ids.max_id; 459 if(err<0) 460 err = 0; 461 goto out; 462 } 463 case SHM_INFO: 464 { 465 struct shm_info shm_info; 466 467 err = security_shm_shmctl(NULL, cmd); 468 if (err) 469 return err; 470 471 memset(&shm_info,0,sizeof(shm_info)); 472 mutex_lock(&shm_ids.mutex); 473 shm_info.used_ids = shm_ids.in_use; 474 shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp); 475 shm_info.shm_tot = shm_tot; 476 shm_info.swap_attempts = 0; 477 shm_info.swap_successes = 0; 478 err = shm_ids.max_id; 479 mutex_unlock(&shm_ids.mutex); 480 if(copy_to_user (buf, &shm_info, sizeof(shm_info))) { 481 err = -EFAULT; 482 goto out; 483 } 484 485 err = err < 0 ? 0 : err; 486 goto out; 487 } 488 case SHM_STAT: 489 case IPC_STAT: 490 { 491 struct shmid64_ds tbuf; 492 int result; 493 memset(&tbuf, 0, sizeof(tbuf)); 494 shp = shm_lock(shmid); 495 if(shp==NULL) { 496 err = -EINVAL; 497 goto out; 498 } else if(cmd==SHM_STAT) { 499 err = -EINVAL; 500 if (shmid > shm_ids.max_id) 501 goto out_unlock; 502 result = shm_buildid(shmid, shp->shm_perm.seq); 503 } else { 504 err = shm_checkid(shp,shmid); 505 if(err) 506 goto out_unlock; 507 result = 0; 508 } 509 err=-EACCES; 510 if (ipcperms (&shp->shm_perm, S_IRUGO)) 511 goto out_unlock; 512 err = security_shm_shmctl(shp, cmd); 513 if (err) 514 goto out_unlock; 515 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm); 516 tbuf.shm_segsz = shp->shm_segsz; 517 tbuf.shm_atime = shp->shm_atim; 518 tbuf.shm_dtime = shp->shm_dtim; 519 tbuf.shm_ctime = shp->shm_ctim; 520 tbuf.shm_cpid = shp->shm_cprid; 521 tbuf.shm_lpid = shp->shm_lprid; 522 if (!is_file_hugepages(shp->shm_file)) 523 tbuf.shm_nattch = shp->shm_nattch; 524 else 525 tbuf.shm_nattch = file_count(shp->shm_file) - 1; 526 shm_unlock(shp); 527 if(copy_shmid_to_user (buf, &tbuf, version)) 528 err = -EFAULT; 529 else 530 err = result; 531 goto out; 532 } 533 case SHM_LOCK: 534 case SHM_UNLOCK: 535 { 536 shp = shm_lock(shmid); 537 if(shp==NULL) { 538 err = -EINVAL; 539 goto out; 540 } 541 err = shm_checkid(shp,shmid); 542 if(err) 543 goto out_unlock; 544 545 if (!capable(CAP_IPC_LOCK)) { 546 err = -EPERM; 547 if (current->euid != shp->shm_perm.uid && 548 current->euid != shp->shm_perm.cuid) 549 goto out_unlock; 550 if (cmd == SHM_LOCK && 551 !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur) 552 goto out_unlock; 553 } 554 555 err = security_shm_shmctl(shp, cmd); 556 if (err) 557 goto out_unlock; 558 559 if(cmd==SHM_LOCK) { 560 struct user_struct * user = current->user; 561 if (!is_file_hugepages(shp->shm_file)) { 562 err = shmem_lock(shp->shm_file, 1, user); 563 if (!err) { 564 shp->shm_perm.mode |= SHM_LOCKED; 565 shp->mlock_user = user; 566 } 567 } 568 } else if (!is_file_hugepages(shp->shm_file)) { 569 shmem_lock(shp->shm_file, 0, shp->mlock_user); 570 shp->shm_perm.mode &= ~SHM_LOCKED; 571 shp->mlock_user = NULL; 572 } 573 shm_unlock(shp); 574 goto out; 575 } 576 case IPC_RMID: 577 { 578 /* 579 * We cannot simply remove the file. The SVID states 580 * that the block remains until the last person 581 * detaches from it, then is deleted. A shmat() on 582 * an RMID segment is legal in older Linux and if 583 * we change it apps break... 584 * 585 * Instead we set a destroyed flag, and then blow 586 * the name away when the usage hits zero. 587 */ 588 mutex_lock(&shm_ids.mutex); 589 shp = shm_lock(shmid); 590 err = -EINVAL; 591 if (shp == NULL) 592 goto out_up; 593 err = shm_checkid(shp, shmid); 594 if(err) 595 goto out_unlock_up; 596 597 if (current->euid != shp->shm_perm.uid && 598 current->euid != shp->shm_perm.cuid && 599 !capable(CAP_SYS_ADMIN)) { 600 err=-EPERM; 601 goto out_unlock_up; 602 } 603 604 err = security_shm_shmctl(shp, cmd); 605 if (err) 606 goto out_unlock_up; 607 608 if (shp->shm_nattch){ 609 shp->shm_perm.mode |= SHM_DEST; 610 /* Do not find it any more */ 611 shp->shm_perm.key = IPC_PRIVATE; 612 shm_unlock(shp); 613 } else 614 shm_destroy (shp); 615 mutex_unlock(&shm_ids.mutex); 616 goto out; 617 } 618 619 case IPC_SET: 620 { 621 if (copy_shmid_from_user (&setbuf, buf, version)) { 622 err = -EFAULT; 623 goto out; 624 } 625 mutex_lock(&shm_ids.mutex); 626 shp = shm_lock(shmid); 627 err=-EINVAL; 628 if(shp==NULL) 629 goto out_up; 630 if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid, 631 setbuf.mode, &(shp->shm_perm)))) 632 goto out_unlock_up; 633 err = shm_checkid(shp,shmid); 634 if(err) 635 goto out_unlock_up; 636 err=-EPERM; 637 if (current->euid != shp->shm_perm.uid && 638 current->euid != shp->shm_perm.cuid && 639 !capable(CAP_SYS_ADMIN)) { 640 goto out_unlock_up; 641 } 642 643 err = security_shm_shmctl(shp, cmd); 644 if (err) 645 goto out_unlock_up; 646 647 shp->shm_perm.uid = setbuf.uid; 648 shp->shm_perm.gid = setbuf.gid; 649 shp->shm_perm.mode = (shp->shm_perm.mode & ~S_IRWXUGO) 650 | (setbuf.mode & S_IRWXUGO); 651 shp->shm_ctim = get_seconds(); 652 break; 653 } 654 655 default: 656 err = -EINVAL; 657 goto out; 658 } 659 660 err = 0; 661 out_unlock_up: 662 shm_unlock(shp); 663 out_up: 664 mutex_unlock(&shm_ids.mutex); 665 goto out; 666 out_unlock: 667 shm_unlock(shp); 668 out: 669 return err; 670 } 671 672 /* 673 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. 674 * 675 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The 676 * "raddr" thing points to kernel space, and there has to be a wrapper around 677 * this. 678 */ 679 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) 680 { 681 struct shmid_kernel *shp; 682 unsigned long addr; 683 unsigned long size; 684 struct file * file; 685 int err; 686 unsigned long flags; 687 unsigned long prot; 688 unsigned long o_flags; 689 int acc_mode; 690 void *user_addr; 691 692 if (shmid < 0) { 693 err = -EINVAL; 694 goto out; 695 } else if ((addr = (ulong)shmaddr)) { 696 if (addr & (SHMLBA-1)) { 697 if (shmflg & SHM_RND) 698 addr &= ~(SHMLBA-1); /* round down */ 699 else 700 #ifndef __ARCH_FORCE_SHMLBA 701 if (addr & ~PAGE_MASK) 702 #endif 703 return -EINVAL; 704 } 705 flags = MAP_SHARED | MAP_FIXED; 706 } else { 707 if ((shmflg & SHM_REMAP)) 708 return -EINVAL; 709 710 flags = MAP_SHARED; 711 } 712 713 if (shmflg & SHM_RDONLY) { 714 prot = PROT_READ; 715 o_flags = O_RDONLY; 716 acc_mode = S_IRUGO; 717 } else { 718 prot = PROT_READ | PROT_WRITE; 719 o_flags = O_RDWR; 720 acc_mode = S_IRUGO | S_IWUGO; 721 } 722 if (shmflg & SHM_EXEC) { 723 prot |= PROT_EXEC; 724 acc_mode |= S_IXUGO; 725 } 726 727 /* 728 * We cannot rely on the fs check since SYSV IPC does have an 729 * additional creator id... 730 */ 731 shp = shm_lock(shmid); 732 if(shp == NULL) { 733 err = -EINVAL; 734 goto out; 735 } 736 err = shm_checkid(shp,shmid); 737 if (err) { 738 shm_unlock(shp); 739 goto out; 740 } 741 if (ipcperms(&shp->shm_perm, acc_mode)) { 742 shm_unlock(shp); 743 err = -EACCES; 744 goto out; 745 } 746 747 err = security_shm_shmat(shp, shmaddr, shmflg); 748 if (err) { 749 shm_unlock(shp); 750 return err; 751 } 752 753 file = shp->shm_file; 754 size = i_size_read(file->f_dentry->d_inode); 755 shp->shm_nattch++; 756 shm_unlock(shp); 757 758 down_write(¤t->mm->mmap_sem); 759 if (addr && !(shmflg & SHM_REMAP)) { 760 user_addr = ERR_PTR(-EINVAL); 761 if (find_vma_intersection(current->mm, addr, addr + size)) 762 goto invalid; 763 /* 764 * If shm segment goes below stack, make sure there is some 765 * space left for the stack to grow (at least 4 pages). 766 */ 767 if (addr < current->mm->start_stack && 768 addr > current->mm->start_stack - size - PAGE_SIZE * 5) 769 goto invalid; 770 } 771 772 user_addr = (void*) do_mmap (file, addr, size, prot, flags, 0); 773 774 invalid: 775 up_write(¤t->mm->mmap_sem); 776 777 mutex_lock(&shm_ids.mutex); 778 shp = shm_lock(shmid); 779 BUG_ON(!shp); 780 shp->shm_nattch--; 781 if(shp->shm_nattch == 0 && 782 shp->shm_perm.mode & SHM_DEST) 783 shm_destroy (shp); 784 else 785 shm_unlock(shp); 786 mutex_unlock(&shm_ids.mutex); 787 788 *raddr = (unsigned long) user_addr; 789 err = 0; 790 if (IS_ERR(user_addr)) 791 err = PTR_ERR(user_addr); 792 out: 793 return err; 794 } 795 796 asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg) 797 { 798 unsigned long ret; 799 long err; 800 801 err = do_shmat(shmid, shmaddr, shmflg, &ret); 802 if (err) 803 return err; 804 force_successful_syscall_return(); 805 return (long)ret; 806 } 807 808 /* 809 * detach and kill segment if marked destroyed. 810 * The work is done in shm_close. 811 */ 812 asmlinkage long sys_shmdt(char __user *shmaddr) 813 { 814 struct mm_struct *mm = current->mm; 815 struct vm_area_struct *vma, *next; 816 unsigned long addr = (unsigned long)shmaddr; 817 loff_t size = 0; 818 int retval = -EINVAL; 819 820 if (addr & ~PAGE_MASK) 821 return retval; 822 823 down_write(&mm->mmap_sem); 824 825 /* 826 * This function tries to be smart and unmap shm segments that 827 * were modified by partial mlock or munmap calls: 828 * - It first determines the size of the shm segment that should be 829 * unmapped: It searches for a vma that is backed by shm and that 830 * started at address shmaddr. It records it's size and then unmaps 831 * it. 832 * - Then it unmaps all shm vmas that started at shmaddr and that 833 * are within the initially determined size. 834 * Errors from do_munmap are ignored: the function only fails if 835 * it's called with invalid parameters or if it's called to unmap 836 * a part of a vma. Both calls in this function are for full vmas, 837 * the parameters are directly copied from the vma itself and always 838 * valid - therefore do_munmap cannot fail. (famous last words?) 839 */ 840 /* 841 * If it had been mremap()'d, the starting address would not 842 * match the usual checks anyway. So assume all vma's are 843 * above the starting address given. 844 */ 845 vma = find_vma(mm, addr); 846 847 while (vma) { 848 next = vma->vm_next; 849 850 /* 851 * Check if the starting address would match, i.e. it's 852 * a fragment created by mprotect() and/or munmap(), or it 853 * otherwise it starts at this address with no hassles. 854 */ 855 if ((vma->vm_ops == &shm_vm_ops || is_vm_hugetlb_page(vma)) && 856 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { 857 858 859 size = vma->vm_file->f_dentry->d_inode->i_size; 860 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 861 /* 862 * We discovered the size of the shm segment, so 863 * break out of here and fall through to the next 864 * loop that uses the size information to stop 865 * searching for matching vma's. 866 */ 867 retval = 0; 868 vma = next; 869 break; 870 } 871 vma = next; 872 } 873 874 /* 875 * We need look no further than the maximum address a fragment 876 * could possibly have landed at. Also cast things to loff_t to 877 * prevent overflows and make comparisions vs. equal-width types. 878 */ 879 size = PAGE_ALIGN(size); 880 while (vma && (loff_t)(vma->vm_end - addr) <= size) { 881 next = vma->vm_next; 882 883 /* finding a matching vma now does not alter retval */ 884 if ((vma->vm_ops == &shm_vm_ops || is_vm_hugetlb_page(vma)) && 885 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) 886 887 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 888 vma = next; 889 } 890 891 up_write(&mm->mmap_sem); 892 return retval; 893 } 894 895 #ifdef CONFIG_PROC_FS 896 static int sysvipc_shm_proc_show(struct seq_file *s, void *it) 897 { 898 struct shmid_kernel *shp = it; 899 char *format; 900 901 #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n" 902 #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n" 903 904 if (sizeof(size_t) <= sizeof(int)) 905 format = SMALL_STRING; 906 else 907 format = BIG_STRING; 908 return seq_printf(s, format, 909 shp->shm_perm.key, 910 shp->id, 911 shp->shm_perm.mode, 912 shp->shm_segsz, 913 shp->shm_cprid, 914 shp->shm_lprid, 915 is_file_hugepages(shp->shm_file) ? (file_count(shp->shm_file) - 1) : shp->shm_nattch, 916 shp->shm_perm.uid, 917 shp->shm_perm.gid, 918 shp->shm_perm.cuid, 919 shp->shm_perm.cgid, 920 shp->shm_atim, 921 shp->shm_dtim, 922 shp->shm_ctim); 923 } 924 #endif 925