1 /* 2 * linux/ipc/shm.c 3 * Copyright (C) 1992, 1993 Krishna Balasubramanian 4 * Many improvements/fixes by Bruno Haible. 5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. 6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. 7 * 8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com> 9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de> 10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr> 11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com> 12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com> 13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com> 14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com> 15 * 16 * support for audit of ipc object properties and permission changes 17 * Dustin Kirkland <dustin.kirkland@us.ibm.com> 18 * 19 * namespaces support 20 * OpenVZ, SWsoft Inc. 21 * Pavel Emelianov <xemul@openvz.org> 22 */ 23 24 #include <linux/slab.h> 25 #include <linux/mm.h> 26 #include <linux/hugetlb.h> 27 #include <linux/shm.h> 28 #include <linux/init.h> 29 #include <linux/file.h> 30 #include <linux/mman.h> 31 #include <linux/shmem_fs.h> 32 #include <linux/security.h> 33 #include <linux/syscalls.h> 34 #include <linux/audit.h> 35 #include <linux/capability.h> 36 #include <linux/ptrace.h> 37 #include <linux/seq_file.h> 38 #include <linux/rwsem.h> 39 #include <linux/nsproxy.h> 40 #include <linux/mount.h> 41 #include <linux/ipc_namespace.h> 42 43 #include <asm/uaccess.h> 44 45 #include "util.h" 46 47 struct shm_file_data { 48 int id; 49 struct ipc_namespace *ns; 50 struct file *file; 51 const struct vm_operations_struct *vm_ops; 52 }; 53 54 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) 55 56 static const struct file_operations shm_file_operations; 57 static struct vm_operations_struct shm_vm_ops; 58 59 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) 60 61 #define shm_unlock(shp) \ 62 ipc_unlock(&(shp)->shm_perm) 63 #define shm_buildid(id, seq) ipc_buildid(id, seq) 64 65 static int newseg(struct ipc_namespace *, struct ipc_params *); 66 static void shm_open(struct vm_area_struct *vma); 67 static void shm_close(struct vm_area_struct *vma); 68 static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp); 69 #ifdef CONFIG_PROC_FS 70 static int sysvipc_shm_proc_show(struct seq_file *s, void *it); 71 #endif 72 73 void shm_init_ns(struct ipc_namespace *ns) 74 { 75 ns->shm_ctlmax = SHMMAX; 76 ns->shm_ctlall = SHMALL; 77 ns->shm_ctlmni = SHMMNI; 78 ns->shm_tot = 0; 79 ipc_init_ids(&ns->ids[IPC_SHM_IDS]); 80 } 81 82 /* 83 * Called with shm_ids.rw_mutex (writer) and the shp structure locked. 84 * Only shm_ids.rw_mutex remains locked on exit. 85 */ 86 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) 87 { 88 struct shmid_kernel *shp; 89 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 90 91 if (shp->shm_nattch){ 92 shp->shm_perm.mode |= SHM_DEST; 93 /* Do not find it any more */ 94 shp->shm_perm.key = IPC_PRIVATE; 95 shm_unlock(shp); 96 } else 97 shm_destroy(ns, shp); 98 } 99 100 #ifdef CONFIG_IPC_NS 101 void shm_exit_ns(struct ipc_namespace *ns) 102 { 103 free_ipcs(ns, &shm_ids(ns), do_shm_rmid); 104 } 105 #endif 106 107 void __init shm_init (void) 108 { 109 shm_init_ns(&init_ipc_ns); 110 ipc_init_proc_interface("sysvipc/shm", 111 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n", 112 IPC_SHM_IDS, sysvipc_shm_proc_show); 113 } 114 115 /* 116 * shm_lock_(check_)down routines are called in the paths where the rw_mutex 117 * is held to protect access to the idr tree. 118 */ 119 static inline struct shmid_kernel *shm_lock_down(struct ipc_namespace *ns, 120 int id) 121 { 122 struct kern_ipc_perm *ipcp = ipc_lock_down(&shm_ids(ns), id); 123 124 if (IS_ERR(ipcp)) 125 return (struct shmid_kernel *)ipcp; 126 127 return container_of(ipcp, struct shmid_kernel, shm_perm); 128 } 129 130 static inline struct shmid_kernel *shm_lock_check_down( 131 struct ipc_namespace *ns, 132 int id) 133 { 134 struct kern_ipc_perm *ipcp = ipc_lock_check_down(&shm_ids(ns), id); 135 136 if (IS_ERR(ipcp)) 137 return (struct shmid_kernel *)ipcp; 138 139 return container_of(ipcp, struct shmid_kernel, shm_perm); 140 } 141 142 /* 143 * shm_lock_(check_) routines are called in the paths where the rw_mutex 144 * is not held. 145 */ 146 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) 147 { 148 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); 149 150 if (IS_ERR(ipcp)) 151 return (struct shmid_kernel *)ipcp; 152 153 return container_of(ipcp, struct shmid_kernel, shm_perm); 154 } 155 156 static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns, 157 int id) 158 { 159 struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id); 160 161 if (IS_ERR(ipcp)) 162 return (struct shmid_kernel *)ipcp; 163 164 return container_of(ipcp, struct shmid_kernel, shm_perm); 165 } 166 167 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) 168 { 169 ipc_rmid(&shm_ids(ns), &s->shm_perm); 170 } 171 172 static inline int shm_addid(struct ipc_namespace *ns, struct shmid_kernel *shp) 173 { 174 return ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); 175 } 176 177 178 179 /* This is called by fork, once for every shm attach. */ 180 static void shm_open(struct vm_area_struct *vma) 181 { 182 struct file *file = vma->vm_file; 183 struct shm_file_data *sfd = shm_file_data(file); 184 struct shmid_kernel *shp; 185 186 shp = shm_lock(sfd->ns, sfd->id); 187 BUG_ON(IS_ERR(shp)); 188 shp->shm_atim = get_seconds(); 189 shp->shm_lprid = task_tgid_vnr(current); 190 shp->shm_nattch++; 191 shm_unlock(shp); 192 } 193 194 /* 195 * shm_destroy - free the struct shmid_kernel 196 * 197 * @ns: namespace 198 * @shp: struct to free 199 * 200 * It has to be called with shp and shm_ids.rw_mutex (writer) locked, 201 * but returns with shp unlocked and freed. 202 */ 203 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) 204 { 205 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; 206 shm_rmid(ns, shp); 207 shm_unlock(shp); 208 if (!is_file_hugepages(shp->shm_file)) 209 shmem_lock(shp->shm_file, 0, shp->mlock_user); 210 else 211 user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size, 212 shp->mlock_user); 213 fput (shp->shm_file); 214 security_shm_free(shp); 215 ipc_rcu_putref(shp); 216 } 217 218 /* 219 * remove the attach descriptor vma. 220 * free memory for segment if it is marked destroyed. 221 * The descriptor has already been removed from the current->mm->mmap list 222 * and will later be kfree()d. 223 */ 224 static void shm_close(struct vm_area_struct *vma) 225 { 226 struct file * file = vma->vm_file; 227 struct shm_file_data *sfd = shm_file_data(file); 228 struct shmid_kernel *shp; 229 struct ipc_namespace *ns = sfd->ns; 230 231 down_write(&shm_ids(ns).rw_mutex); 232 /* remove from the list of attaches of the shm segment */ 233 shp = shm_lock_down(ns, sfd->id); 234 BUG_ON(IS_ERR(shp)); 235 shp->shm_lprid = task_tgid_vnr(current); 236 shp->shm_dtim = get_seconds(); 237 shp->shm_nattch--; 238 if(shp->shm_nattch == 0 && 239 shp->shm_perm.mode & SHM_DEST) 240 shm_destroy(ns, shp); 241 else 242 shm_unlock(shp); 243 up_write(&shm_ids(ns).rw_mutex); 244 } 245 246 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 247 { 248 struct file *file = vma->vm_file; 249 struct shm_file_data *sfd = shm_file_data(file); 250 251 return sfd->vm_ops->fault(vma, vmf); 252 } 253 254 #ifdef CONFIG_NUMA 255 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) 256 { 257 struct file *file = vma->vm_file; 258 struct shm_file_data *sfd = shm_file_data(file); 259 int err = 0; 260 if (sfd->vm_ops->set_policy) 261 err = sfd->vm_ops->set_policy(vma, new); 262 return err; 263 } 264 265 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, 266 unsigned long addr) 267 { 268 struct file *file = vma->vm_file; 269 struct shm_file_data *sfd = shm_file_data(file); 270 struct mempolicy *pol = NULL; 271 272 if (sfd->vm_ops->get_policy) 273 pol = sfd->vm_ops->get_policy(vma, addr); 274 else if (vma->vm_policy) 275 pol = vma->vm_policy; 276 else 277 pol = current->mempolicy; 278 return pol; 279 } 280 #endif 281 282 static int shm_mmap(struct file * file, struct vm_area_struct * vma) 283 { 284 struct shm_file_data *sfd = shm_file_data(file); 285 int ret; 286 287 ret = sfd->file->f_op->mmap(sfd->file, vma); 288 if (ret != 0) 289 return ret; 290 sfd->vm_ops = vma->vm_ops; 291 #ifdef CONFIG_MMU 292 BUG_ON(!sfd->vm_ops->fault); 293 #endif 294 vma->vm_ops = &shm_vm_ops; 295 shm_open(vma); 296 297 return ret; 298 } 299 300 static int shm_release(struct inode *ino, struct file *file) 301 { 302 struct shm_file_data *sfd = shm_file_data(file); 303 304 put_ipc_ns(sfd->ns); 305 shm_file_data(file) = NULL; 306 kfree(sfd); 307 return 0; 308 } 309 310 static int shm_fsync(struct file *file, struct dentry *dentry, int datasync) 311 { 312 int (*fsync) (struct file *, struct dentry *, int datasync); 313 struct shm_file_data *sfd = shm_file_data(file); 314 int ret = -EINVAL; 315 316 fsync = sfd->file->f_op->fsync; 317 if (fsync) 318 ret = fsync(sfd->file, sfd->file->f_path.dentry, datasync); 319 return ret; 320 } 321 322 static unsigned long shm_get_unmapped_area(struct file *file, 323 unsigned long addr, unsigned long len, unsigned long pgoff, 324 unsigned long flags) 325 { 326 struct shm_file_data *sfd = shm_file_data(file); 327 return get_unmapped_area(sfd->file, addr, len, pgoff, flags); 328 } 329 330 int is_file_shm_hugepages(struct file *file) 331 { 332 int ret = 0; 333 334 if (file->f_op == &shm_file_operations) { 335 struct shm_file_data *sfd; 336 sfd = shm_file_data(file); 337 ret = is_file_hugepages(sfd->file); 338 } 339 return ret; 340 } 341 342 static const struct file_operations shm_file_operations = { 343 .mmap = shm_mmap, 344 .fsync = shm_fsync, 345 .release = shm_release, 346 .get_unmapped_area = shm_get_unmapped_area, 347 }; 348 349 static struct vm_operations_struct shm_vm_ops = { 350 .open = shm_open, /* callback for a new vm-area open */ 351 .close = shm_close, /* callback for when the vm-area is released */ 352 .fault = shm_fault, 353 #if defined(CONFIG_NUMA) 354 .set_policy = shm_set_policy, 355 .get_policy = shm_get_policy, 356 #endif 357 }; 358 359 /** 360 * newseg - Create a new shared memory segment 361 * @ns: namespace 362 * @params: ptr to the structure that contains key, size and shmflg 363 * 364 * Called with shm_ids.rw_mutex held as a writer. 365 */ 366 367 static int newseg(struct ipc_namespace *ns, struct ipc_params *params) 368 { 369 key_t key = params->key; 370 int shmflg = params->flg; 371 size_t size = params->u.size; 372 int error; 373 struct shmid_kernel *shp; 374 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT; 375 struct file * file; 376 char name[13]; 377 int id; 378 379 if (size < SHMMIN || size > ns->shm_ctlmax) 380 return -EINVAL; 381 382 if (ns->shm_tot + numpages > ns->shm_ctlall) 383 return -ENOSPC; 384 385 shp = ipc_rcu_alloc(sizeof(*shp)); 386 if (!shp) 387 return -ENOMEM; 388 389 shp->shm_perm.key = key; 390 shp->shm_perm.mode = (shmflg & S_IRWXUGO); 391 shp->mlock_user = NULL; 392 393 shp->shm_perm.security = NULL; 394 error = security_shm_alloc(shp); 395 if (error) { 396 ipc_rcu_putref(shp); 397 return error; 398 } 399 400 sprintf (name, "SYSV%08x", key); 401 if (shmflg & SHM_HUGETLB) { 402 /* hugetlb_file_setup takes care of mlock user accounting */ 403 file = hugetlb_file_setup(name, size); 404 shp->mlock_user = current->user; 405 } else { 406 int acctflag = VM_ACCOUNT; 407 /* 408 * Do not allow no accounting for OVERCOMMIT_NEVER, even 409 * if it's asked for. 410 */ 411 if ((shmflg & SHM_NORESERVE) && 412 sysctl_overcommit_memory != OVERCOMMIT_NEVER) 413 acctflag = 0; 414 file = shmem_file_setup(name, size, acctflag); 415 } 416 error = PTR_ERR(file); 417 if (IS_ERR(file)) 418 goto no_file; 419 420 id = shm_addid(ns, shp); 421 if (id < 0) { 422 error = id; 423 goto no_id; 424 } 425 426 shp->shm_cprid = task_tgid_vnr(current); 427 shp->shm_lprid = 0; 428 shp->shm_atim = shp->shm_dtim = 0; 429 shp->shm_ctim = get_seconds(); 430 shp->shm_segsz = size; 431 shp->shm_nattch = 0; 432 shp->shm_perm.id = shm_buildid(id, shp->shm_perm.seq); 433 shp->shm_file = file; 434 /* 435 * shmid gets reported as "inode#" in /proc/pid/maps. 436 * proc-ps tools use this. Changing this will break them. 437 */ 438 file->f_dentry->d_inode->i_ino = shp->shm_perm.id; 439 440 ns->shm_tot += numpages; 441 error = shp->shm_perm.id; 442 shm_unlock(shp); 443 return error; 444 445 no_id: 446 fput(file); 447 no_file: 448 security_shm_free(shp); 449 ipc_rcu_putref(shp); 450 return error; 451 } 452 453 /* 454 * Called with shm_ids.rw_mutex and ipcp locked. 455 */ 456 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg) 457 { 458 struct shmid_kernel *shp; 459 460 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 461 return security_shm_associate(shp, shmflg); 462 } 463 464 /* 465 * Called with shm_ids.rw_mutex and ipcp locked. 466 */ 467 static inline int shm_more_checks(struct kern_ipc_perm *ipcp, 468 struct ipc_params *params) 469 { 470 struct shmid_kernel *shp; 471 472 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 473 if (shp->shm_segsz < params->u.size) 474 return -EINVAL; 475 476 return 0; 477 } 478 479 asmlinkage long sys_shmget (key_t key, size_t size, int shmflg) 480 { 481 struct ipc_namespace *ns; 482 struct ipc_ops shm_ops; 483 struct ipc_params shm_params; 484 485 ns = current->nsproxy->ipc_ns; 486 487 shm_ops.getnew = newseg; 488 shm_ops.associate = shm_security; 489 shm_ops.more_checks = shm_more_checks; 490 491 shm_params.key = key; 492 shm_params.flg = shmflg; 493 shm_params.u.size = size; 494 495 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params); 496 } 497 498 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) 499 { 500 switch(version) { 501 case IPC_64: 502 return copy_to_user(buf, in, sizeof(*in)); 503 case IPC_OLD: 504 { 505 struct shmid_ds out; 506 507 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); 508 out.shm_segsz = in->shm_segsz; 509 out.shm_atime = in->shm_atime; 510 out.shm_dtime = in->shm_dtime; 511 out.shm_ctime = in->shm_ctime; 512 out.shm_cpid = in->shm_cpid; 513 out.shm_lpid = in->shm_lpid; 514 out.shm_nattch = in->shm_nattch; 515 516 return copy_to_user(buf, &out, sizeof(out)); 517 } 518 default: 519 return -EINVAL; 520 } 521 } 522 523 struct shm_setbuf { 524 uid_t uid; 525 gid_t gid; 526 mode_t mode; 527 }; 528 529 static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version) 530 { 531 switch(version) { 532 case IPC_64: 533 { 534 struct shmid64_ds tbuf; 535 536 if (copy_from_user(&tbuf, buf, sizeof(tbuf))) 537 return -EFAULT; 538 539 out->uid = tbuf.shm_perm.uid; 540 out->gid = tbuf.shm_perm.gid; 541 out->mode = tbuf.shm_perm.mode; 542 543 return 0; 544 } 545 case IPC_OLD: 546 { 547 struct shmid_ds tbuf_old; 548 549 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) 550 return -EFAULT; 551 552 out->uid = tbuf_old.shm_perm.uid; 553 out->gid = tbuf_old.shm_perm.gid; 554 out->mode = tbuf_old.shm_perm.mode; 555 556 return 0; 557 } 558 default: 559 return -EINVAL; 560 } 561 } 562 563 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) 564 { 565 switch(version) { 566 case IPC_64: 567 return copy_to_user(buf, in, sizeof(*in)); 568 case IPC_OLD: 569 { 570 struct shminfo out; 571 572 if(in->shmmax > INT_MAX) 573 out.shmmax = INT_MAX; 574 else 575 out.shmmax = (int)in->shmmax; 576 577 out.shmmin = in->shmmin; 578 out.shmmni = in->shmmni; 579 out.shmseg = in->shmseg; 580 out.shmall = in->shmall; 581 582 return copy_to_user(buf, &out, sizeof(out)); 583 } 584 default: 585 return -EINVAL; 586 } 587 } 588 589 /* 590 * Called with shm_ids.rw_mutex held as a reader 591 */ 592 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, 593 unsigned long *swp) 594 { 595 int next_id; 596 int total, in_use; 597 598 *rss = 0; 599 *swp = 0; 600 601 in_use = shm_ids(ns).in_use; 602 603 for (total = 0, next_id = 0; total < in_use; next_id++) { 604 struct shmid_kernel *shp; 605 struct inode *inode; 606 607 shp = idr_find(&shm_ids(ns).ipcs_idr, next_id); 608 if (shp == NULL) 609 continue; 610 611 inode = shp->shm_file->f_path.dentry->d_inode; 612 613 if (is_file_hugepages(shp->shm_file)) { 614 struct address_space *mapping = inode->i_mapping; 615 *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages; 616 } else { 617 struct shmem_inode_info *info = SHMEM_I(inode); 618 spin_lock(&info->lock); 619 *rss += inode->i_mapping->nrpages; 620 *swp += info->swapped; 621 spin_unlock(&info->lock); 622 } 623 624 total++; 625 } 626 } 627 628 asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) 629 { 630 struct shm_setbuf setbuf; 631 struct shmid_kernel *shp; 632 int err, version; 633 struct ipc_namespace *ns; 634 635 if (cmd < 0 || shmid < 0) { 636 err = -EINVAL; 637 goto out; 638 } 639 640 version = ipc_parse_version(&cmd); 641 ns = current->nsproxy->ipc_ns; 642 643 switch (cmd) { /* replace with proc interface ? */ 644 case IPC_INFO: 645 { 646 struct shminfo64 shminfo; 647 648 err = security_shm_shmctl(NULL, cmd); 649 if (err) 650 return err; 651 652 memset(&shminfo,0,sizeof(shminfo)); 653 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni; 654 shminfo.shmmax = ns->shm_ctlmax; 655 shminfo.shmall = ns->shm_ctlall; 656 657 shminfo.shmmin = SHMMIN; 658 if(copy_shminfo_to_user (buf, &shminfo, version)) 659 return -EFAULT; 660 661 down_read(&shm_ids(ns).rw_mutex); 662 err = ipc_get_maxid(&shm_ids(ns)); 663 up_read(&shm_ids(ns).rw_mutex); 664 665 if(err<0) 666 err = 0; 667 goto out; 668 } 669 case SHM_INFO: 670 { 671 struct shm_info shm_info; 672 673 err = security_shm_shmctl(NULL, cmd); 674 if (err) 675 return err; 676 677 memset(&shm_info,0,sizeof(shm_info)); 678 down_read(&shm_ids(ns).rw_mutex); 679 shm_info.used_ids = shm_ids(ns).in_use; 680 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp); 681 shm_info.shm_tot = ns->shm_tot; 682 shm_info.swap_attempts = 0; 683 shm_info.swap_successes = 0; 684 err = ipc_get_maxid(&shm_ids(ns)); 685 up_read(&shm_ids(ns).rw_mutex); 686 if(copy_to_user (buf, &shm_info, sizeof(shm_info))) { 687 err = -EFAULT; 688 goto out; 689 } 690 691 err = err < 0 ? 0 : err; 692 goto out; 693 } 694 case SHM_STAT: 695 case IPC_STAT: 696 { 697 struct shmid64_ds tbuf; 698 int result; 699 700 if (!buf) { 701 err = -EFAULT; 702 goto out; 703 } 704 705 if (cmd == SHM_STAT) { 706 shp = shm_lock(ns, shmid); 707 if (IS_ERR(shp)) { 708 err = PTR_ERR(shp); 709 goto out; 710 } 711 result = shp->shm_perm.id; 712 } else { 713 shp = shm_lock_check(ns, shmid); 714 if (IS_ERR(shp)) { 715 err = PTR_ERR(shp); 716 goto out; 717 } 718 result = 0; 719 } 720 err=-EACCES; 721 if (ipcperms (&shp->shm_perm, S_IRUGO)) 722 goto out_unlock; 723 err = security_shm_shmctl(shp, cmd); 724 if (err) 725 goto out_unlock; 726 memset(&tbuf, 0, sizeof(tbuf)); 727 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm); 728 tbuf.shm_segsz = shp->shm_segsz; 729 tbuf.shm_atime = shp->shm_atim; 730 tbuf.shm_dtime = shp->shm_dtim; 731 tbuf.shm_ctime = shp->shm_ctim; 732 tbuf.shm_cpid = shp->shm_cprid; 733 tbuf.shm_lpid = shp->shm_lprid; 734 tbuf.shm_nattch = shp->shm_nattch; 735 shm_unlock(shp); 736 if(copy_shmid_to_user (buf, &tbuf, version)) 737 err = -EFAULT; 738 else 739 err = result; 740 goto out; 741 } 742 case SHM_LOCK: 743 case SHM_UNLOCK: 744 { 745 shp = shm_lock_check(ns, shmid); 746 if (IS_ERR(shp)) { 747 err = PTR_ERR(shp); 748 goto out; 749 } 750 751 err = audit_ipc_obj(&(shp->shm_perm)); 752 if (err) 753 goto out_unlock; 754 755 if (!capable(CAP_IPC_LOCK)) { 756 err = -EPERM; 757 if (current->euid != shp->shm_perm.uid && 758 current->euid != shp->shm_perm.cuid) 759 goto out_unlock; 760 if (cmd == SHM_LOCK && 761 !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur) 762 goto out_unlock; 763 } 764 765 err = security_shm_shmctl(shp, cmd); 766 if (err) 767 goto out_unlock; 768 769 if(cmd==SHM_LOCK) { 770 struct user_struct * user = current->user; 771 if (!is_file_hugepages(shp->shm_file)) { 772 err = shmem_lock(shp->shm_file, 1, user); 773 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){ 774 shp->shm_perm.mode |= SHM_LOCKED; 775 shp->mlock_user = user; 776 } 777 } 778 } else if (!is_file_hugepages(shp->shm_file)) { 779 shmem_lock(shp->shm_file, 0, shp->mlock_user); 780 shp->shm_perm.mode &= ~SHM_LOCKED; 781 shp->mlock_user = NULL; 782 } 783 shm_unlock(shp); 784 goto out; 785 } 786 case IPC_RMID: 787 { 788 /* 789 * We cannot simply remove the file. The SVID states 790 * that the block remains until the last person 791 * detaches from it, then is deleted. A shmat() on 792 * an RMID segment is legal in older Linux and if 793 * we change it apps break... 794 * 795 * Instead we set a destroyed flag, and then blow 796 * the name away when the usage hits zero. 797 */ 798 down_write(&shm_ids(ns).rw_mutex); 799 shp = shm_lock_check_down(ns, shmid); 800 if (IS_ERR(shp)) { 801 err = PTR_ERR(shp); 802 goto out_up; 803 } 804 805 err = audit_ipc_obj(&(shp->shm_perm)); 806 if (err) 807 goto out_unlock_up; 808 809 if (current->euid != shp->shm_perm.uid && 810 current->euid != shp->shm_perm.cuid && 811 !capable(CAP_SYS_ADMIN)) { 812 err=-EPERM; 813 goto out_unlock_up; 814 } 815 816 err = security_shm_shmctl(shp, cmd); 817 if (err) 818 goto out_unlock_up; 819 820 do_shm_rmid(ns, &shp->shm_perm); 821 up_write(&shm_ids(ns).rw_mutex); 822 goto out; 823 } 824 825 case IPC_SET: 826 { 827 if (!buf) { 828 err = -EFAULT; 829 goto out; 830 } 831 832 if (copy_shmid_from_user (&setbuf, buf, version)) { 833 err = -EFAULT; 834 goto out; 835 } 836 down_write(&shm_ids(ns).rw_mutex); 837 shp = shm_lock_check_down(ns, shmid); 838 if (IS_ERR(shp)) { 839 err = PTR_ERR(shp); 840 goto out_up; 841 } 842 err = audit_ipc_obj(&(shp->shm_perm)); 843 if (err) 844 goto out_unlock_up; 845 err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode); 846 if (err) 847 goto out_unlock_up; 848 err=-EPERM; 849 if (current->euid != shp->shm_perm.uid && 850 current->euid != shp->shm_perm.cuid && 851 !capable(CAP_SYS_ADMIN)) { 852 goto out_unlock_up; 853 } 854 855 err = security_shm_shmctl(shp, cmd); 856 if (err) 857 goto out_unlock_up; 858 859 shp->shm_perm.uid = setbuf.uid; 860 shp->shm_perm.gid = setbuf.gid; 861 shp->shm_perm.mode = (shp->shm_perm.mode & ~S_IRWXUGO) 862 | (setbuf.mode & S_IRWXUGO); 863 shp->shm_ctim = get_seconds(); 864 break; 865 } 866 867 default: 868 err = -EINVAL; 869 goto out; 870 } 871 872 err = 0; 873 out_unlock_up: 874 shm_unlock(shp); 875 out_up: 876 up_write(&shm_ids(ns).rw_mutex); 877 goto out; 878 out_unlock: 879 shm_unlock(shp); 880 out: 881 return err; 882 } 883 884 /* 885 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. 886 * 887 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The 888 * "raddr" thing points to kernel space, and there has to be a wrapper around 889 * this. 890 */ 891 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) 892 { 893 struct shmid_kernel *shp; 894 unsigned long addr; 895 unsigned long size; 896 struct file * file; 897 int err; 898 unsigned long flags; 899 unsigned long prot; 900 int acc_mode; 901 unsigned long user_addr; 902 struct ipc_namespace *ns; 903 struct shm_file_data *sfd; 904 struct path path; 905 mode_t f_mode; 906 907 err = -EINVAL; 908 if (shmid < 0) 909 goto out; 910 else if ((addr = (ulong)shmaddr)) { 911 if (addr & (SHMLBA-1)) { 912 if (shmflg & SHM_RND) 913 addr &= ~(SHMLBA-1); /* round down */ 914 else 915 #ifndef __ARCH_FORCE_SHMLBA 916 if (addr & ~PAGE_MASK) 917 #endif 918 goto out; 919 } 920 flags = MAP_SHARED | MAP_FIXED; 921 } else { 922 if ((shmflg & SHM_REMAP)) 923 goto out; 924 925 flags = MAP_SHARED; 926 } 927 928 if (shmflg & SHM_RDONLY) { 929 prot = PROT_READ; 930 acc_mode = S_IRUGO; 931 f_mode = FMODE_READ; 932 } else { 933 prot = PROT_READ | PROT_WRITE; 934 acc_mode = S_IRUGO | S_IWUGO; 935 f_mode = FMODE_READ | FMODE_WRITE; 936 } 937 if (shmflg & SHM_EXEC) { 938 prot |= PROT_EXEC; 939 acc_mode |= S_IXUGO; 940 } 941 942 /* 943 * We cannot rely on the fs check since SYSV IPC does have an 944 * additional creator id... 945 */ 946 ns = current->nsproxy->ipc_ns; 947 shp = shm_lock_check(ns, shmid); 948 if (IS_ERR(shp)) { 949 err = PTR_ERR(shp); 950 goto out; 951 } 952 953 err = -EACCES; 954 if (ipcperms(&shp->shm_perm, acc_mode)) 955 goto out_unlock; 956 957 err = security_shm_shmat(shp, shmaddr, shmflg); 958 if (err) 959 goto out_unlock; 960 961 path.dentry = dget(shp->shm_file->f_path.dentry); 962 path.mnt = shp->shm_file->f_path.mnt; 963 shp->shm_nattch++; 964 size = i_size_read(path.dentry->d_inode); 965 shm_unlock(shp); 966 967 err = -ENOMEM; 968 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); 969 if (!sfd) 970 goto out_put_dentry; 971 972 err = -ENOMEM; 973 974 file = alloc_file(path.mnt, path.dentry, f_mode, &shm_file_operations); 975 if (!file) 976 goto out_free; 977 978 file->private_data = sfd; 979 file->f_mapping = shp->shm_file->f_mapping; 980 sfd->id = shp->shm_perm.id; 981 sfd->ns = get_ipc_ns(ns); 982 sfd->file = shp->shm_file; 983 sfd->vm_ops = NULL; 984 985 down_write(¤t->mm->mmap_sem); 986 if (addr && !(shmflg & SHM_REMAP)) { 987 err = -EINVAL; 988 if (find_vma_intersection(current->mm, addr, addr + size)) 989 goto invalid; 990 /* 991 * If shm segment goes below stack, make sure there is some 992 * space left for the stack to grow (at least 4 pages). 993 */ 994 if (addr < current->mm->start_stack && 995 addr > current->mm->start_stack - size - PAGE_SIZE * 5) 996 goto invalid; 997 } 998 999 user_addr = do_mmap (file, addr, size, prot, flags, 0); 1000 *raddr = user_addr; 1001 err = 0; 1002 if (IS_ERR_VALUE(user_addr)) 1003 err = (long)user_addr; 1004 invalid: 1005 up_write(¤t->mm->mmap_sem); 1006 1007 fput(file); 1008 1009 out_nattch: 1010 down_write(&shm_ids(ns).rw_mutex); 1011 shp = shm_lock_down(ns, shmid); 1012 BUG_ON(IS_ERR(shp)); 1013 shp->shm_nattch--; 1014 if(shp->shm_nattch == 0 && 1015 shp->shm_perm.mode & SHM_DEST) 1016 shm_destroy(ns, shp); 1017 else 1018 shm_unlock(shp); 1019 up_write(&shm_ids(ns).rw_mutex); 1020 1021 out: 1022 return err; 1023 1024 out_unlock: 1025 shm_unlock(shp); 1026 goto out; 1027 1028 out_free: 1029 kfree(sfd); 1030 out_put_dentry: 1031 dput(path.dentry); 1032 goto out_nattch; 1033 } 1034 1035 asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg) 1036 { 1037 unsigned long ret; 1038 long err; 1039 1040 err = do_shmat(shmid, shmaddr, shmflg, &ret); 1041 if (err) 1042 return err; 1043 force_successful_syscall_return(); 1044 return (long)ret; 1045 } 1046 1047 /* 1048 * detach and kill segment if marked destroyed. 1049 * The work is done in shm_close. 1050 */ 1051 asmlinkage long sys_shmdt(char __user *shmaddr) 1052 { 1053 struct mm_struct *mm = current->mm; 1054 struct vm_area_struct *vma, *next; 1055 unsigned long addr = (unsigned long)shmaddr; 1056 loff_t size = 0; 1057 int retval = -EINVAL; 1058 1059 if (addr & ~PAGE_MASK) 1060 return retval; 1061 1062 down_write(&mm->mmap_sem); 1063 1064 /* 1065 * This function tries to be smart and unmap shm segments that 1066 * were modified by partial mlock or munmap calls: 1067 * - It first determines the size of the shm segment that should be 1068 * unmapped: It searches for a vma that is backed by shm and that 1069 * started at address shmaddr. It records it's size and then unmaps 1070 * it. 1071 * - Then it unmaps all shm vmas that started at shmaddr and that 1072 * are within the initially determined size. 1073 * Errors from do_munmap are ignored: the function only fails if 1074 * it's called with invalid parameters or if it's called to unmap 1075 * a part of a vma. Both calls in this function are for full vmas, 1076 * the parameters are directly copied from the vma itself and always 1077 * valid - therefore do_munmap cannot fail. (famous last words?) 1078 */ 1079 /* 1080 * If it had been mremap()'d, the starting address would not 1081 * match the usual checks anyway. So assume all vma's are 1082 * above the starting address given. 1083 */ 1084 vma = find_vma(mm, addr); 1085 1086 while (vma) { 1087 next = vma->vm_next; 1088 1089 /* 1090 * Check if the starting address would match, i.e. it's 1091 * a fragment created by mprotect() and/or munmap(), or it 1092 * otherwise it starts at this address with no hassles. 1093 */ 1094 if ((vma->vm_ops == &shm_vm_ops) && 1095 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { 1096 1097 1098 size = vma->vm_file->f_path.dentry->d_inode->i_size; 1099 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 1100 /* 1101 * We discovered the size of the shm segment, so 1102 * break out of here and fall through to the next 1103 * loop that uses the size information to stop 1104 * searching for matching vma's. 1105 */ 1106 retval = 0; 1107 vma = next; 1108 break; 1109 } 1110 vma = next; 1111 } 1112 1113 /* 1114 * We need look no further than the maximum address a fragment 1115 * could possibly have landed at. Also cast things to loff_t to 1116 * prevent overflows and make comparisions vs. equal-width types. 1117 */ 1118 size = PAGE_ALIGN(size); 1119 while (vma && (loff_t)(vma->vm_end - addr) <= size) { 1120 next = vma->vm_next; 1121 1122 /* finding a matching vma now does not alter retval */ 1123 if ((vma->vm_ops == &shm_vm_ops) && 1124 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) 1125 1126 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 1127 vma = next; 1128 } 1129 1130 up_write(&mm->mmap_sem); 1131 return retval; 1132 } 1133 1134 #ifdef CONFIG_PROC_FS 1135 static int sysvipc_shm_proc_show(struct seq_file *s, void *it) 1136 { 1137 struct shmid_kernel *shp = it; 1138 char *format; 1139 1140 #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n" 1141 #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n" 1142 1143 if (sizeof(size_t) <= sizeof(int)) 1144 format = SMALL_STRING; 1145 else 1146 format = BIG_STRING; 1147 return seq_printf(s, format, 1148 shp->shm_perm.key, 1149 shp->shm_perm.id, 1150 shp->shm_perm.mode, 1151 shp->shm_segsz, 1152 shp->shm_cprid, 1153 shp->shm_lprid, 1154 shp->shm_nattch, 1155 shp->shm_perm.uid, 1156 shp->shm_perm.gid, 1157 shp->shm_perm.cuid, 1158 shp->shm_perm.cgid, 1159 shp->shm_atim, 1160 shp->shm_dtim, 1161 shp->shm_ctim); 1162 } 1163 #endif 1164